summaryrefslogtreecommitdiff
path: root/3.2.54
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-01-06 14:01:09 -0500
committerAnthony G. Basile <blueness@gentoo.org>2014-01-06 14:01:09 -0500
commitc32f9e1a1e58aa66f4b9928c9526c31c66984958 (patch)
tree06e7af682b18797dd9afc5b22cc3c338731d75e1 /3.2.54
parentGrsec/PaX: 3.0-3.12.6-201401021726 (diff)
downloadhardened-patchset-c32f9e1a1e58aa66f4b9928c9526c31c66984958.tar.gz
hardened-patchset-c32f9e1a1e58aa66f4b9928c9526c31c66984958.tar.bz2
hardened-patchset-c32f9e1a1e58aa66f4b9928c9526c31c66984958.zip
Grsec/PaX: 3.0-3.2.54-20140105164920140105
Diffstat (limited to '3.2.54')
-rw-r--r--3.2.54/0000_README176
-rw-r--r--3.2.54/1021_linux-3.2.22.patch1245
-rw-r--r--3.2.54/1022_linux-3.2.23.patch1862
-rw-r--r--3.2.54/1023_linux-3.2.24.patch4684
-rw-r--r--3.2.54/1024_linux-3.2.25.patch4503
-rw-r--r--3.2.54/1025_linux-3.2.26.patch238
-rw-r--r--3.2.54/1026_linux-3.2.27.patch3188
-rw-r--r--3.2.54/1027_linux-3.2.28.patch1114
-rw-r--r--3.2.54/1028_linux-3.2.29.patch4279
-rw-r--r--3.2.54/1029_linux-3.2.30.patch5552
-rw-r--r--3.2.54/1030_linux-3.2.31.patch3327
-rw-r--r--3.2.54/1031_linux-3.2.32.patch6206
-rw-r--r--3.2.54/1032_linux-3.2.33.patch3450
-rw-r--r--3.2.54/1033_linux-3.2.34.patch3678
-rw-r--r--3.2.54/1034_linux-3.2.35.patch3014
-rw-r--r--3.2.54/1035_linux-3.2.36.patch6434
-rw-r--r--3.2.54/1036_linux-3.2.37.patch1689
-rw-r--r--3.2.54/1037_linux-3.2.38.patch4587
-rw-r--r--3.2.54/1038_linux-3.2.39.patch2660
-rw-r--r--3.2.54/1039_linux-3.2.40.patch6295
-rw-r--r--3.2.54/1040_linux-3.2.41.patch3865
-rw-r--r--3.2.54/1041_linux-3.2.42.patch3602
-rw-r--r--3.2.54/1042_linux-3.2.43.patch2442
-rw-r--r--3.2.54/1043_linux-3.2.44.patch2808
-rw-r--r--3.2.54/1044_linux-3.2.45.patch3809
-rw-r--r--3.2.54/1045_linux-3.2.46.patch3142
-rw-r--r--3.2.54/1046_linux-3.2.47.patch3314
-rw-r--r--3.2.54/1047_linux-3.2.48.patch952
-rw-r--r--3.2.54/1048_linux-3.2.49.patch2970
-rw-r--r--3.2.54/1049_linux-3.2.50.patch2495
-rw-r--r--3.2.54/1050_linux-3.2.51.patch3886
-rw-r--r--3.2.54/1051_linux-3.2.52.patch5221
-rw-r--r--3.2.54/1052_linux-3.2.53.patch3357
-rw-r--r--3.2.54/1053_linux-3.2.54.patch6825
-rw-r--r--3.2.54/4420_grsecurity-3.0-3.2.54-201401051649.patch117735
-rw-r--r--3.2.54/4425_grsec_remove_EI_PAX.patch19
-rw-r--r--3.2.54/4427_force_XATTR_PAX_tmpfs.patch35
-rw-r--r--3.2.54/4430_grsec-remove-localversion-grsec.patch9
-rw-r--r--3.2.54/4435_grsec-mute-warnings.patch43
-rw-r--r--3.2.54/4440_grsec-remove-protected-paths.patch19
-rw-r--r--3.2.54/4450_grsec-kconfig-default-gids.patch111
-rw-r--r--3.2.54/4465_selinux-avc_audit-log-curr_ip.patch73
-rw-r--r--3.2.54/4470_disable-compat_vdso.patch46
-rw-r--r--3.2.54/4475_emutramp_default_on.patch21
44 files changed, 234980 insertions, 0 deletions
diff --git a/3.2.54/0000_README b/3.2.54/0000_README
new file mode 100644
index 0000000..ffb2597
--- /dev/null
+++ b/3.2.54/0000_README
@@ -0,0 +1,176 @@
+README
+-----------------------------------------------------------------------------
+Individual Patch Descriptions:
+-----------------------------------------------------------------------------
+Patch: 1021_linux-3.2.22.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.22
+
+Patch: 1022_linux-3.2.23.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.23
+
+Patch: 1023_linux-3.2.24.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.24
+
+Patch: 1024_linux-3.2.25.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.25
+
+Patch: 1025_linux-3.2.26.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.26
+
+Patch: 1026_linux-3.2.27.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.27
+
+Patch: 1027_linux-3.2.28.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.28
+
+Patch: 1028_linux-3.2.29.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.29
+
+Patch: 1029_linux-3.2.30.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.30
+
+Patch: 1030_linux-3.2.31.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.31
+
+Patch: 1031_linux-3.2.32.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.32
+
+Patch: 1032_linux-3.2.33.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.33
+
+Patch: 1033_linux-3.2.34.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.34
+
+Patch: 1034_linux-3.2.35.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.35
+
+Patch: 1035_linux-3.2.36.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.36
+
+Patch: 1036_linux-3.2.37.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.37
+
+Patch: 1037_linux-3.2.38.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.38
+
+Patch: 1038_linux-3.2.39.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.39
+
+Patch: 1039_linux-3.2.40.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.40
+
+Patch: 1040_linux-3.2.41.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.41
+
+Patch: 1041_linux-3.2.42.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.42
+
+Patch: 1042_linux-3.2.43.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.43
+
+Patch: 1043_linux-3.2.44.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.44
+
+Patch: 1044_linux-3.2.45.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.45
+
+Patch: 1045_linux-3.2.46.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.46
+
+Patch: 1046_linux-3.2.47.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.47
+
+Patch: 1047_linux-3.2.48.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.48
+
+Patch: 1048_linux-3.2.49.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.49
+
+Patch: 1049_linux-3.2.50.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.50
+
+Patch: 1050_linux-3.2.51.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.51
+
+Patch: 1051_linux-3.2.52.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.52
+
+Patch: 1052_linux-3.2.53.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.53
+
+Patch: 1053_linux-3.2.54.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.54
+
+Patch: 4420_grsecurity-3.0-3.2.54-201401051649.patch
+From: http://www.grsecurity.net
+Desc: hardened-sources base patch from upstream grsecurity
+
+Patch: 4425_grsec_remove_EI_PAX.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Remove EI_PAX option and force off
+
+Patch: 4430_grsec-remove-localversion-grsec.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Removes grsecurity's localversion-grsec file
+
+Patch: 4435_grsec-mute-warnings.patch
+From: Alexander Gabert <gaberta@fh-trier.de>
+ Gordon Malm <gengor@gentoo.org>
+Desc: Removes verbose compile warning settings from grsecurity, restores
+ mainline Linux kernel behavior
+
+Patch: 4440_grsec-remove-protected-paths.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Removes chmod statements from grsecurity/Makefile
+
+Patch: 4450_grsec-kconfig-default-gids.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Sets sane(r) default GIDs on various grsecurity group-dependent
+ features
+
+Patch: 4465_selinux-avc_audit-log-curr_ip.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Anthony G. Basile <blueness@gentoo.org>
+Desc: Configurable option to add src IP address to SELinux log messages
+
+Patch: 4470_disable-compat_vdso.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Kerin Millar <kerframil@gmail.com>
+Desc: Disables VDSO_COMPAT operation completely
+
+Patch: 4475_emutramp_default_on.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Set PAX_EMUTRAMP default on for libffi, bugs #329499 and #457194
diff --git a/3.2.54/1021_linux-3.2.22.patch b/3.2.54/1021_linux-3.2.22.patch
new file mode 100644
index 0000000..e6ad93a
--- /dev/null
+++ b/3.2.54/1021_linux-3.2.22.patch
@@ -0,0 +1,1245 @@
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index 21fd05c..e1f856b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
+ marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
+ security issue, or some "oh, that's not good" issue. In short, something
+ critical.
++ - Serious issues as reported by a user of a distribution kernel may also
++ be considered if they fix a notable performance or interactivity issue.
++ As these fixes are not as obvious and have a higher risk of a subtle
++ regression they should only be submitted by a distribution kernel
++ maintainer and include an addendum linking to a bugzilla entry if it
++ exists and additional information on the user-visible impact.
+ - New device IDs and quirks are also accepted.
+ - No "theoretical race condition" issues, unless an explanation of how the
+ race can be exploited is also provided.
+diff --git a/Makefile b/Makefile
+index 7eb465e..9a7d921 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h
+index 7d04875..c0c70a8 100644
+--- a/arch/arm/plat-samsung/include/plat/map-s3c.h
++++ b/arch/arm/plat-samsung/include/plat/map-s3c.h
+@@ -22,7 +22,7 @@
+ #define S3C24XX_VA_WATCHDOG S3C_VA_WATCHDOG
+
+ #define S3C2412_VA_SSMC S3C_ADDR_CPU(0x00000000)
+-#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00010000)
++#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00100000)
+
+ #define S3C2410_PA_UART (0x50000000)
+ #define S3C24XX_PA_UART S3C2410_PA_UART
+diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+index 40dbb2b..11b19ea 100644
+--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
++++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+@@ -24,7 +24,7 @@ static inline void arch_wdt_reset(void)
+
+ __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
+
+- if (s3c2410_wdtclk)
++ if (!IS_ERR(s3c2410_wdtclk))
+ clk_enable(s3c2410_wdtclk);
+
+ /* put initial values into count and data */
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index f3444f7..0c3b775 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -175,7 +175,7 @@
+ #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
+ #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
+ #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
+-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
++#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index effff47..cb00ccc 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -31,6 +31,60 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ ptep->pte_low = pte.pte_low;
+ }
+
++#define pmd_read_atomic pmd_read_atomic
++/*
++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
++ * a "*pmdp" dereference done by gcc. Problem is, in certain places
++ * where pte_offset_map_lock is called, concurrent page faults are
++ * allowed, if the mmap_sem is hold for reading. An example is mincore
++ * vs page faults vs MADV_DONTNEED. On the page fault side
++ * pmd_populate rightfully does a set_64bit, but if we're reading the
++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
++ * because gcc will not read the 64bit of the pmd atomically. To fix
++ * this all places running pmd_offset_map_lock() while holding the
++ * mmap_sem in read mode, shall read the pmdp pointer using this
++ * function to know if the pmd is null nor not, and in turn to know if
++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
++ * operations.
++ *
++ * Without THP if the mmap_sem is hold for reading, the pmd can only
++ * transition from null to not null while pmd_read_atomic runs. So
++ * we can always return atomic pmd values with this function.
++ *
++ * With THP if the mmap_sem is hold for reading, the pmd can become
++ * trans_huge or none or point to a pte (and in turn become "stable")
++ * at any time under pmd_read_atomic. We could read it really
++ * atomically here with a atomic64_read for the THP enabled case (and
++ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
++ * only return an atomic pmdval if the low part of the pmdval is later
++ * found stable (i.e. pointing to a pte). And we're returning a none
++ * pmdval if the low part of the pmd is none. In some cases the high
++ * and low part of the pmdval returned may not be consistent if THP is
++ * enabled (the low part may point to previously mapped hugepage,
++ * while the high part may point to a more recently mapped hugepage),
++ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
++ * of the pmd to be read atomically to decide if the pmd is unstable
++ * or not, with the only exception of when the low part of the pmd is
++ * zero in which case we return a none pmd.
++ */
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++ pmdval_t ret;
++ u32 *tmp = (u32 *)pmdp;
++
++ ret = (pmdval_t) (*tmp);
++ if (ret) {
++ /*
++ * If the low part is null, we must not read the high part
++ * or we can end up with a partial pmd.
++ */
++ smp_rmb();
++ ret |= ((pmdval_t)*(tmp + 1)) << 32;
++ }
++
++ return (pmd_t) { ret };
++}
++
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index c7f64e6..ea6106c 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
+ const struct cpuid_bit *cb;
+
+ static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+- { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
++ { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
+ { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
+ { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
+ { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
+diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
+index a43fa1a..1502c502 100644
+--- a/drivers/acpi/acpi_pad.c
++++ b/drivers/acpi/acpi_pad.c
+@@ -36,6 +36,7 @@
+ #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
+ #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+ static DEFINE_MUTEX(isolated_cpus_lock);
++static DEFINE_MUTEX(round_robin_lock);
+
+ static unsigned long power_saving_mwait_eax;
+
+@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return;
+
+- mutex_lock(&isolated_cpus_lock);
++ mutex_lock(&round_robin_lock);
+ cpumask_clear(tmp);
+ for_each_cpu(cpu, pad_busy_cpus)
+ cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
+@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ if (cpumask_empty(tmp))
+ cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
+ if (cpumask_empty(tmp)) {
+- mutex_unlock(&isolated_cpus_lock);
++ mutex_unlock(&round_robin_lock);
+ return;
+ }
+ for_each_cpu(cpu, tmp) {
+@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ tsk_in_cpu[tsk_index] = preferred_cpu;
+ cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
+ cpu_weight[preferred_cpu]++;
+- mutex_unlock(&isolated_cpus_lock);
++ mutex_unlock(&round_robin_lock);
+
+ set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
+ }
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c3d2dfc..b96544a 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -869,7 +869,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ dpm_wait_for_children(dev, async);
+
+ if (async_error)
+- return 0;
++ goto Complete;
+
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+@@ -878,7 +878,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ async_error = -EBUSY;
+- return 0;
++ goto Complete;
+ }
+
+ device_lock(dev);
+@@ -926,6 +926,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ }
+
+ device_unlock(dev);
++
++ Complete:
+ complete_all(&dev->power.completion);
+
+ if (error) {
+diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
+index 0477982..1b5675b 100644
+--- a/drivers/char/hw_random/atmel-rng.c
++++ b/drivers/char/hw_random/atmel-rng.c
+@@ -34,7 +34,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
+ u32 *data = buf;
+
+ /* data ready? */
+- if (readl(trng->base + TRNG_ODATA) & 1) {
++ if (readl(trng->base + TRNG_ISR) & 1) {
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ ensure data ready is only set again AFTER the next data
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 70ad892..b3ccefa 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1932,12 +1932,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ if (mce->bank != 8)
+ return NOTIFY_DONE;
+
+-#ifdef CONFIG_SMP
+- /* Only handle if it is the right mc controller */
+- if (mce->socketid != pvt->i7core_dev->socket)
+- return NOTIFY_DONE;
+-#endif
+-
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+ smp_wmb();
+@@ -2234,8 +2228,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
+ if (pvt->enable_scrub)
+ disable_sdram_scrub_setting(mci);
+
+- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+-
+ /* Disable EDAC polling */
+ i7core_pci_ctl_release(pvt);
+
+@@ -2336,8 +2328,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
+ /* DCLK for scrub rate setting */
+ pvt->dclk_freq = get_dclk_freq();
+
+- atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+-
+ return 0;
+
+ fail0:
+@@ -2481,8 +2471,10 @@ static int __init i7core_init(void)
+
+ pci_rc = pci_register_driver(&i7core_driver);
+
+- if (pci_rc >= 0)
++ if (pci_rc >= 0) {
++ atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+ return 0;
++ }
+
+ i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+@@ -2498,6 +2490,7 @@ static void __exit i7core_exit(void)
+ {
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i7core_driver);
++ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+ }
+
+ module_init(i7core_init);
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 7a402bf..18a1293 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1661,9 +1661,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &sbridge_dev->pdev[0]->dev);
+
+- atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
+- &sbridge_mce_dec);
+-
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->dev);
+
+@@ -1731,8 +1728,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
+ goto fail0;
+ }
+
+- atomic_notifier_chain_register(&x86_mce_decoder_chain,
+- &sbridge_mce_dec);
+ return 0;
+
+ fail0:
+@@ -1861,8 +1856,10 @@ static int __init sbridge_init(void)
+
+ pci_rc = pci_register_driver(&sbridge_driver);
+
+- if (pci_rc >= 0)
++ if (pci_rc >= 0) {
++ atomic_notifier_chain_register(&x86_mce_decoder_chain, &sbridge_mce_dec);
+ return 0;
++ }
+
+ sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+@@ -1878,6 +1875,7 @@ static void __exit sbridge_exit(void)
+ {
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&sbridge_driver);
++ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &sbridge_mce_dec);
+ }
+
+ module_init(sbridge_init);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 3e927ce..a1ee634 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -585,7 +585,7 @@ static bool
+ drm_monitor_supports_rb(struct edid *edid)
+ {
+ if (edid->revision >= 4) {
+- bool ret;
++ bool ret = false;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 3e7c478..3e2edc6 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3312,6 +3312,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
++ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
++ seqno) ||
++ atomic_read(&dev_priv->mm.wedged), 3000)) {
++ ret = -EBUSY;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index d3820c2..578ddfc 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -424,6 +424,30 @@ static void gen6_pm_rps_work(struct work_struct *work)
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+ }
+
++static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
++ u32 pm_iir)
++{
++ unsigned long flags;
++
++ /*
++ * IIR bits should never already be set because IMR should
++ * prevent an interrupt from being shown in IIR. The warning
++ * displays a case where we've unsafely cleared
++ * dev_priv->pm_iir. Although missing an interrupt of the same
++ * type is not a problem, it displays a problem in the logic.
++ *
++ * The mask bit in IMR is cleared by rps_work.
++ */
++
++ spin_lock_irqsave(&dev_priv->rps_lock, flags);
++ dev_priv->pm_iir |= pm_iir;
++ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
++ POSTING_READ(GEN6_PMIMR);
++ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
++
++ queue_work(dev_priv->wq, &dev_priv->rps_work);
++}
++
+ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+@@ -529,16 +553,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ pch_irq_handler(dev, pch_iir);
+ }
+
+- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+- unsigned long flags;
+- spin_lock_irqsave(&dev_priv->rps_lock, flags);
+- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+- dev_priv->pm_iir |= pm_iir;
+- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+- POSTING_READ(GEN6_PMIMR);
+- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+- queue_work(dev_priv->wq, &dev_priv->rps_work);
+- }
++ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
++ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+@@ -634,25 +650,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ i915_handle_rps_change(dev);
+ }
+
+- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+- /*
+- * IIR bits should never already be set because IMR should
+- * prevent an interrupt from being shown in IIR. The warning
+- * displays a case where we've unsafely cleared
+- * dev_priv->pm_iir. Although missing an interrupt of the same
+- * type is not a problem, it displays a problem in the logic.
+- *
+- * The mask bit in IMR is cleared by rps_work.
+- */
+- unsigned long flags;
+- spin_lock_irqsave(&dev_priv->rps_lock, flags);
+- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+- dev_priv->pm_iir |= pm_iir;
+- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+- POSTING_READ(GEN6_PMIMR);
+- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+- queue_work(dev_priv->wq, &dev_priv->rps_work);
+- }
++ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
++ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index a1eb83d..f38d196 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -739,8 +739,11 @@ static void i915_restore_display(struct drm_device *dev)
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
++ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
++ * otherwise we get blank eDP screen after S3 on some machines
++ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
++ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 5c1cdb8..6aa7716 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2187,6 +2187,33 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ }
+
+ static int
++intel_finish_fb(struct drm_framebuffer *old_fb)
++{
++ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
++ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
++ bool was_interruptible = dev_priv->mm.interruptible;
++ int ret;
++
++ wait_event(dev_priv->pending_flip_queue,
++ atomic_read(&dev_priv->mm.wedged) ||
++ atomic_read(&obj->pending_flip) == 0);
++
++ /* Big Hammer, we also need to ensure that any pending
++ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
++ * current scanout is retired before unpinning the old
++ * framebuffer.
++ *
++ * This should only fail upon a hung GPU, in which case we
++ * can safely continue.
++ */
++ dev_priv->mm.interruptible = false;
++ ret = i915_gem_object_finish_gpu(obj);
++ dev_priv->mm.interruptible = was_interruptible;
++
++ return ret;
++}
++
++static int
+ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+ {
+@@ -2224,25 +2251,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ return ret;
+ }
+
+- if (old_fb) {
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+-
+- wait_event(dev_priv->pending_flip_queue,
+- atomic_read(&dev_priv->mm.wedged) ||
+- atomic_read(&obj->pending_flip) == 0);
+-
+- /* Big Hammer, we also need to ensure that any pending
+- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+- * current scanout is retired before unpinning the old
+- * framebuffer.
+- *
+- * This should only fail upon a hung GPU, in which case we
+- * can safely continue.
+- */
+- ret = i915_gem_object_finish_gpu(obj);
+- (void) ret;
+- }
++ if (old_fb)
++ intel_finish_fb(old_fb);
+
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+ LEAVE_ATOMIC_MODE_SET);
+@@ -3312,6 +3322,23 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
++ /* Flush any pending WAITs before we disable the pipe. Note that
++ * we need to drop the struct_mutex in order to acquire it again
++ * during the lowlevel dpms routines around a couple of the
++ * operations. It does not look trivial nor desirable to move
++ * that locking higher. So instead we leave a window for the
++ * submission of further commands on the fb before we can actually
++ * disable it. This race with userspace exists anyway, and we can
++ * only rely on the pipe being disabled by userspace after it
++ * receives the hotplug notification and has flushed any pending
++ * batches.
++ */
++ if (crtc->fb) {
++ mutex_lock(&dev->struct_mutex);
++ intel_finish_fb(crtc->fb);
++ mutex_unlock(&dev->struct_mutex);
++ }
++
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 933e66b..f6613dc 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -306,7 +306,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+- | RING_REPORT_64K | RING_VALID);
++ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+@@ -1157,18 +1157,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long end;
+- u32 head;
+-
+- /* If the reported head position has wrapped or hasn't advanced,
+- * fallback to the slow and accurate path.
+- */
+- head = intel_read_status_page(ring, 4);
+- if (head > ring->head) {
+- ring->head = head;
+- ring->space = ring_space(ring);
+- if (ring->space >= n)
+- return 0;
+- }
+
+ trace_i915_ring_wait_begin(ring);
+ end = jiffies + 3 * HZ;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 3a4cc32..cc0801d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -499,7 +499,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &nfbdev->helper,
+- nv_two_heads(dev) ? 2 : 1, 4);
++ dev->mode_config.num_crtc, 4);
+ if (ret) {
+ kfree(nfbdev);
+ return ret;
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 4c07436..d99aa84 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+- pr_warn("%s: read arg fail\n", key);
++ pr_warn("%.4s: read arg fail\n", key);
+ return -EIO;
+ }
+
+@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+
+ for (i = 0; i < len; i++) {
+ if (__wait_status(0x05)) {
+- pr_warn("%s: read data fail\n", key);
++ pr_warn("%.4s: read data fail\n", key);
+ return -EIO;
+ }
+ buffer[i] = inb(APPLESMC_DATA_PORT);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 427468f..0790c98 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -660,7 +660,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+- if (!cpu_has(c, X86_FEATURE_DTS))
++ if (!cpu_has(c, X86_FEATURE_DTHERM))
+ return;
+
+ if (!pdev) {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index da2f021..532a902 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -288,8 +288,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
+
+ hlist_del(&cell->list);
+
+- bio_list_add(inmates, cell->holder);
+- bio_list_merge(inmates, &cell->bios);
++ if (inmates) {
++ bio_list_add(inmates, cell->holder);
++ bio_list_merge(inmates, &cell->bios);
++ }
+
+ mempool_free(cell, prison->cell_pool);
+ }
+@@ -312,9 +314,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
+ */
+ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+ {
+- hlist_del(&cell->list);
+ BUG_ON(cell->holder != bio);
+ BUG_ON(!bio_list_empty(&cell->bios));
++
++ __cell_release(cell, NULL);
+ }
+
+ static void cell_release_singleton(struct cell *cell, struct bio *bio)
+diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
+index b7d1e3e..fb68805 100644
+--- a/drivers/media/dvb/siano/smsusb.c
++++ b/drivers/media/dvb/siano/smsusb.c
+@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc0a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
++ { USB_DEVICE(0x2040, 0xf5a0),
++ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
+index 2ca10df..981501f 100644
+--- a/drivers/media/video/gspca/gspca.c
++++ b/drivers/media/video/gspca/gspca.c
+@@ -1697,7 +1697,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+ {
+ struct gspca_dev *gspca_dev = priv;
+- int ret;
++ int i, ret;
+
+ if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+@@ -1728,6 +1728,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ wake_up_interruptible(&gspca_dev->wq);
+
+ /* empty the transfer queues */
++ for (i = 0; i < gspca_dev->nframes; i++)
++ gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
+ atomic_set(&gspca_dev->fr_q, 0);
+ atomic_set(&gspca_dev->fr_i, 0);
+ gspca_dev->fr_o = 0;
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 8dc84d6..86cd532 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_ENABLE_AR);
+
+- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+- CAN_CTRLMODE_LOOPBACK)) {
++ if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
++ (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
+ /* loopback + silent mode : useful for hot self-test */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e023379..e59d006 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -933,12 +933,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
+ u32 clock_freq = 0;
+
+ if (pdev->dev.of_node) {
+- const u32 *clock_freq_p;
++ const __be32 *clock_freq_p;
+
+ clock_freq_p = of_get_property(pdev->dev.of_node,
+ "clock-frequency", NULL);
+ if (clock_freq_p)
+- clock_freq = *clock_freq_p;
++ clock_freq = be32_to_cpup(clock_freq_p);
+ }
+
+ if (!clock_freq) {
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index a3e65fd..e556fc3 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -2080,8 +2080,9 @@ const struct e1000_info e1000_82574_info = {
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+- .flags2 = FLAG2_CHECK_PHY_HANG
++ .flags2 = FLAG2_CHECK_PHY_HANG
+ | FLAG2_DISABLE_ASPM_L0S
++ | FLAG2_DISABLE_ASPM_L1
+ | FLAG2_NO_DISABLE_RX,
+ .pba = 32,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 4e933d1..64d3f98 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5132,14 +5132,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ return -EINVAL;
+ }
+
+- /* 82573 Errata 17 */
+- if (((adapter->hw.mac.type == e1000_82573) ||
+- (adapter->hw.mac.type == e1000_82574)) &&
+- (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+- adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+- }
+-
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 8b0c2ca..6973620 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -718,13 +718,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
+
+ u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+ {
++ struct ath_common *common = ath9k_hw_common(ah);
++ int i = 0;
++
+ REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+ udelay(100);
+ REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+
+- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
++ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
++
+ udelay(100);
+
++ if (WARN_ON_ONCE(i >= 100)) {
++ ath_err(common, "PLL4 meaurement not done\n");
++ break;
++ }
++
++ i++;
++ }
++
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+ }
+ EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index f76a814..95437fc 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1042,6 +1042,15 @@ void ath_hw_pll_work(struct work_struct *work)
+ hw_pll_work.work);
+ u32 pll_sqsum;
+
++ /*
++ * ensure that the PLL WAR is executed only
++ * after the STA is associated (or) if the
++ * beaconing had started in interfaces that
++ * uses beacons.
++ */
++ if (!(sc->sc_flags & SC_OP_BEACONS))
++ return;
++
+ if (AR_SREV_9485(sc->sc_ah)) {
+
+ ath9k_ps_wakeup(sc);
+@@ -1486,15 +1495,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
+ }
+ }
+
+- if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
+- ((vif->type == NL80211_IFTYPE_ADHOC) &&
+- sc->nvifs > 0)) {
+- ath_err(common, "Cannot create ADHOC interface when other"
+- " interfaces already exist.\n");
+- ret = -EINVAL;
+- goto out;
+- }
+-
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 76fd277..c59c592 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -936,13 +936,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
+ }
+
+ /* legacy rates */
++ rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+ !(rate->flags & IEEE80211_RATE_ERP_G))
+ phy = WLAN_RC_PHY_CCK;
+ else
+ phy = WLAN_RC_PHY_OFDM;
+
+- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ info->rates[i].Rate = rate->hw_value;
+ if (rate->hw_value_short) {
+ if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+index 5815cf5..4661a64 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+@@ -1777,6 +1777,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ }
+
++#ifdef CONFIG_IWLWIFI_DEBUG
+ static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+@@ -1814,6 +1815,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+
+ return count;
+ }
++#endif
+
+ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+@@ -1941,7 +1943,9 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ return ret;
+ }
+
++#ifdef CONFIG_IWLWIFI_DEBUG
+ DEBUGFS_READ_WRITE_FILE_OPS(log_event);
++#endif
+ DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+ DEBUGFS_READ_FILE_OPS(fh_reg);
+ DEBUGFS_READ_FILE_OPS(rx_queue);
+@@ -1957,7 +1961,9 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ {
+ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
++#ifdef CONFIG_IWLWIFI_DEBUG
+ DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
++#endif
+ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 226faab..fc35308 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1922,14 +1922,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+- unregister_netdev(info->netdev);
+-
+ xennet_disconnect_backend(info);
+
+- del_timer_sync(&info->rx_refill_timer);
+-
+ xennet_sysfs_delif(info->netdev);
+
++ unregister_netdev(info->netdev);
++
++ del_timer_sync(&info->rx_refill_timer);
++
+ free_percpu(info->stats);
+
+ free_netdev(info->netdev);
+diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
+index da14432..efc4b7f 100644
+--- a/drivers/oprofile/oprofile_perf.c
++++ b/drivers/oprofile/oprofile_perf.c
+@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
+ static DEFINE_MUTEX(oprofile_perf_mutex);
+
+ static struct op_counter_config *counter_config;
+-static struct perf_event **perf_events[nr_cpumask_bits];
++static struct perf_event **perf_events[NR_CPUS];
+ static int num_counters;
+
+ /*
+diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
+index 54423ab..2ee187f 100644
+--- a/drivers/staging/iio/adc/ad7606_core.c
++++ b/drivers/staging/iio/adc/ad7606_core.c
+@@ -241,6 +241,7 @@ static const struct attribute_group ad7606_attribute_group = {
+ .indexed = 1, \
+ .channel = num, \
+ .address = num, \
++ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .scan_index = num, \
+ .scan_type = IIO_ST('s', 16, 16, 0), \
+ }
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index ec41d38..f4b738f 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ /* - */
+ {USB_DEVICE(0x20F4, 0x646B)},
+ {USB_DEVICE(0x083A, 0xC512)},
++ {USB_DEVICE(0x25D4, 0x4CA1)},
++ {USB_DEVICE(0x25D4, 0x4CAB)},
+
+ /* RTL8191SU */
+ /* Realtek */
+diff --git a/drivers/staging/rts_pstor/rtsx_transport.c b/drivers/staging/rts_pstor/rtsx_transport.c
+index 4e3d2c1..9b2e5c9 100644
+--- a/drivers/staging/rts_pstor/rtsx_transport.c
++++ b/drivers/staging/rts_pstor/rtsx_transport.c
+@@ -335,6 +335,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ int sg_cnt, i, resid;
+ int err = 0;
+ long timeleft;
++ struct scatterlist *sg_ptr;
+ u32 val = TRIG_DMA;
+
+ if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
+@@ -371,7 +372,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ resid = size;
+-
++ sg_ptr = sg;
+ chip->sgi = 0;
+ /* Usually the next entry will be @sg@ + 1, but if this sg element
+ * is part of a chained scatterlist, it could jump to the start of
+@@ -379,14 +380,14 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ * the proper sg
+ */
+ for (i = 0; i < *index; i++)
+- sg = sg_next(sg);
++ sg_ptr = sg_next(sg_ptr);
+ for (i = *index; i < sg_cnt; i++) {
+ dma_addr_t addr;
+ unsigned int len;
+ u8 option;
+
+- addr = sg_dma_address(sg);
+- len = sg_dma_len(sg);
++ addr = sg_dma_address(sg_ptr);
++ len = sg_dma_len(sg_ptr);
+
+ RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+@@ -415,7 +416,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ if (!resid)
+ break;
+
+- sg = sg_next(sg);
++ sg_ptr = sg_next(sg_ptr);
+ }
+
+ RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index aa0c43f..35e6b5f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
+ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
++ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
+ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
+ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+@@ -134,7 +135,13 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
++ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
++ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
+ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
++ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
++ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
++ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
++ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
+ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+@@ -146,7 +153,11 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
++ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
++ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
++ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
+ { } /* Terminating Entry */
+ };
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 61d6c31..21a4734 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -235,6 +235,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_G1 0xA001
+ #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
++#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+
+ /* AMOI PRODUCTS */
+ #define AMOI_VENDOR_ID 0x1614
+@@ -496,6 +497,10 @@ static void option_instat_callback(struct urb *urb);
+ /* MediaTek products */
+ #define MEDIATEK_VENDOR_ID 0x0e8d
+
++/* Cellient products */
++#define CELLIENT_VENDOR_ID 0x2692
++#define CELLIENT_PRODUCT_MEN200 0x9005
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -730,6 +735,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
++ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
++ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+@@ -1227,6 +1234,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
++ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index 08a07a2..57ceaf3 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
+ while (!list_empty(head)) {
+ ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
+ list_del_init(&ii->i_dirty);
++ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
++ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ iput(&ii->vfs_inode);
+ }
+ }
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index bb24ab6..6f24e67 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
+ if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
+ continue;
+ list_del_init(&ii->i_dirty);
++ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
++ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ iput(&ii->vfs_inode);
+ }
+ }
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index a03c098..bc00876 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
+ #endif /* __HAVE_ARCH_PMD_WRITE */
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
++#ifndef pmd_read_atomic
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++ /*
++ * Depend on compiler for an atomic pmd read. NOTE: this is
++ * only going to work, if the pmdval_t isn't larger than
++ * an unsigned long.
++ */
++ return *pmdp;
++}
++#endif
++
+ /*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+@@ -458,14 +470,30 @@ static inline int pmd_write(pmd_t pmd)
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
++ *
++ * For 32bit kernels with a 64bit large pmd_t this automatically takes
++ * care of reading the pmd atomically to avoid SMP race conditions
++ * against pmd_populate() when the mmap_sem is hold for reading by the
++ * caller (a special atomic read not done by "gcc" as in the generic
++ * version above, is also needed when THP is disabled because the page
++ * fault can populate the pmd from under us).
+ */
+ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+ {
+- /* depend on compiler for an atomic pmd read */
+- pmd_t pmdval = *pmd;
++ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
++ *
++ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
++ * pmd_read_atomic is allowed to return a not atomic pmdval
++ * (for example pointing to an hugepage that has never been
++ * mapped in the pmd). The below checks will only care about
++ * the low part of the pmd with 32bit PAE x86 anyway, with the
++ * exception of pmd_none(). So the important thing is that if
++ * the low part of the pmd is found null, the high part will
++ * be also null or the pmd_none() check below would be
++ * confused.
+ */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index f961cc5..da587ad 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -619,6 +619,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
+ /* packet needs to be linearized to access the TT changes */
+ if (skb_linearize(skb) < 0)
+ goto out;
++ /* skb_linearize() possibly changed skb->data */
++ tt_query = (struct tt_query_packet *)skb->data;
+
+ if (is_my_mac(tt_query->dst))
+ handle_tt_response(bat_priv, tt_query);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 5f09a57..088af45 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -1816,10 +1816,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+ {
+ struct tt_local_entry *tt_local_entry = NULL;
+ struct tt_global_entry *tt_global_entry = NULL;
+- bool ret = true;
++ bool ret = false;
+
+ if (!atomic_read(&bat_priv->ap_isolation))
+- return false;
++ goto out;
+
+ tt_local_entry = tt_local_hash_find(bat_priv, dst);
+ if (!tt_local_entry)
+@@ -1829,10 +1829,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+ if (!tt_global_entry)
+ goto out;
+
+- if (_is_ap_isolated(tt_local_entry, tt_global_entry))
++ if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
+- ret = false;
++ ret = true;
+
+ out:
+ if (tt_global_entry)
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index c1c99dd..d57d05b 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -1369,7 +1369,7 @@ static void reg_set_request_processed(void)
+ spin_unlock(&reg_requests_lock);
+
+ if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
+- cancel_delayed_work_sync(&reg_timeout);
++ cancel_delayed_work(&reg_timeout);
+
+ if (need_more_processing)
+ schedule_work(&reg_work);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0005bde..5f096a5 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5988,6 +5988,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
+ { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
++ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 11224ed..323d4d9 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -384,14 +384,18 @@ int main(void)
+ pfd.fd = fd;
+
+ while (1) {
++ struct sockaddr *addr_p = (struct sockaddr *) &addr;
++ socklen_t addr_l = sizeof(addr);
+ pfd.events = POLLIN;
+ pfd.revents = 0;
+ poll(&pfd, 1, -1);
+
+- len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
++ len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
++ addr_p, &addr_l);
+
+- if (len < 0) {
+- syslog(LOG_ERR, "recv failed; error:%d", len);
++ if (len < 0 || addr.nl_pid) {
++ syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
++ addr.nl_pid, errno, strerror(errno));
+ close(fd);
+ return -1;
+ }
diff --git a/3.2.54/1022_linux-3.2.23.patch b/3.2.54/1022_linux-3.2.23.patch
new file mode 100644
index 0000000..3d796d0
--- /dev/null
+++ b/3.2.54/1022_linux-3.2.23.patch
@@ -0,0 +1,1862 @@
+diff --git a/Makefile b/Makefile
+index 9a7d921..40d1e3b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index e10e59a..1d1710e 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -471,9 +471,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
+ static void ipi_timer(void)
+ {
+ struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
+- irq_enter();
+ evt->event_handler(evt);
+- irq_exit();
+ }
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+@@ -572,7 +570,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+
+ switch (ipinr) {
+ case IPI_TIMER:
++ irq_enter();
+ ipi_timer();
++ irq_exit();
+ break;
+
+ case IPI_RESCHEDULE:
+@@ -580,15 +580,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+ break;
+
+ case IPI_CALL_FUNC:
++ irq_enter();
+ generic_smp_call_function_interrupt();
++ irq_exit();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
++ irq_enter();
+ generic_smp_call_function_single_interrupt();
++ irq_exit();
+ break;
+
+ case IPI_CPU_STOP:
++ irq_enter();
+ ipi_cpu_stop(cpu);
++ irq_exit();
+ break;
+
+ default:
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 44d8829..5e8dc08 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -763,7 +763,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ lwz r3,VCORE_NAPPING_THREADS(r5)
+ lwz r4,VCPU_PTID(r9)
+ li r0,1
+- sldi r0,r0,r4
++ sld r0,r0,r4
+ andc. r3,r3,r0 /* no sense IPI'ing ourselves */
+ beq 43f
+ mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 03a217a..b7e63d8 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -975,7 +975,7 @@ static int cpu_cmd(void)
+ /* print cpus waiting or in xmon */
+ printf("cpus stopped:");
+ count = 0;
+- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
+ if (count == 0)
+ printf(" %x", cpu);
+diff --git a/drivers/block/umem.c b/drivers/block/umem.c
+index aa27120..9a72277 100644
+--- a/drivers/block/umem.c
++++ b/drivers/block/umem.c
+@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
+ }
+ }
+
++struct mm_plug_cb {
++ struct blk_plug_cb cb;
++ struct cardinfo *card;
++};
++
++static void mm_unplug(struct blk_plug_cb *cb)
++{
++ struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
++
++ spin_lock_irq(&mmcb->card->lock);
++ activate(mmcb->card);
++ spin_unlock_irq(&mmcb->card->lock);
++ kfree(mmcb);
++}
++
++static int mm_check_plugged(struct cardinfo *card)
++{
++ struct blk_plug *plug = current->plug;
++ struct mm_plug_cb *mmcb;
++
++ if (!plug)
++ return 0;
++
++ list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
++ if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
++ return 1;
++ }
++ /* Not currently on the callback list */
++ mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
++ if (!mmcb)
++ return 0;
++
++ mmcb->card = card;
++ mmcb->cb.callback = mm_unplug;
++ list_add(&mmcb->cb.list, &plug->cb_list);
++ return 1;
++}
++
+ static void mm_make_request(struct request_queue *q, struct bio *bio)
+ {
+ struct cardinfo *card = q->queuedata;
+@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
+ *card->biotail = bio;
+ bio->bi_next = NULL;
+ card->biotail = &bio->bi_next;
++ if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
++ activate(card);
+ spin_unlock_irq(&card->lock);
+
+ return;
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index c4da951..ca67338 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1890,6 +1890,27 @@ ips_ping_for_i915_load(void)
+ }
+ }
+
++static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
++{
++ struct apertures_struct *ap;
++ struct pci_dev *pdev = dev_priv->dev->pdev;
++ bool primary;
++
++ ap = alloc_apertures(1);
++ if (!ap)
++ return;
++
++ ap->ranges[0].base = dev_priv->dev->agp->base;
++ ap->ranges[0].size =
++ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
++ primary =
++ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
++
++ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
++
++ kfree(ap);
++}
++
+ /**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+@@ -1927,6 +1948,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ goto free_priv;
+ }
+
++ dev_priv->mm.gtt = intel_gtt_get();
++ if (!dev_priv->mm.gtt) {
++ DRM_ERROR("Failed to initialize GTT\n");
++ ret = -ENODEV;
++ goto put_bridge;
++ }
++
++ i915_kick_out_firmware_fb(dev_priv);
++
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+@@ -1950,13 +1980,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ goto put_bridge;
+ }
+
+- dev_priv->mm.gtt = intel_gtt_get();
+- if (!dev_priv->mm.gtt) {
+- DRM_ERROR("Failed to initialize GTT\n");
+- ret = -ENODEV;
+- goto out_rmmap;
+- }
+-
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ dev_priv->mm.gtt_mapping =
+diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
+index 50ed53b..fc90c11 100644
+--- a/drivers/md/persistent-data/dm-space-map-checker.c
++++ b/drivers/md/persistent-data/dm-space-map-checker.c
+@@ -8,6 +8,7 @@
+
+ #include <linux/device-mapper.h>
+ #include <linux/export.h>
++#include <linux/vmalloc.h>
+
+ #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
+
+@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
+
+ ca->nr = nr_blocks;
+ ca->nr_free = nr_blocks;
+- ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
+- if (!ca->counts)
+- return -ENOMEM;
++
++ if (!nr_blocks)
++ ca->counts = NULL;
++ else {
++ ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
++ if (!ca->counts)
++ return -ENOMEM;
++ }
+
+ return 0;
+ }
+
++static void ca_destroy(struct count_array *ca)
++{
++ vfree(ca->counts);
++}
++
+ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
+ {
+ int r;
+@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
+ static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
+ {
+ dm_block_t nr_blocks = ca->nr + extra_blocks;
+- uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
++ uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
+ if (!counts)
+ return -ENOMEM;
+
+- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+- kfree(ca->counts);
++ if (ca->counts) {
++ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
++ ca_destroy(ca);
++ }
+ ca->nr = nr_blocks;
+ ca->nr_free += extra_blocks;
+ ca->counts = counts;
+@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
+ return 0;
+ }
+
+-static void ca_destroy(struct count_array *ca)
+-{
+- kfree(ca->counts);
+-}
+-
+ /*----------------------------------------------------------------*/
+
+ struct sm_checker {
+@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+ int r;
+ struct sm_checker *smc;
+
+- if (!sm)
+- return NULL;
++ if (IS_ERR_OR_NULL(sm))
++ return ERR_PTR(-EINVAL);
+
+ smc = kmalloc(sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smc->sm, &ops_, sizeof(smc->sm));
+ r = ca_create(&smc->old_counts, sm);
+ if (r) {
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ r = ca_create(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ smc->real_sm = sm;
+@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+ ca_destroy(&smc->counts);
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ r = ca_commit(&smc->old_counts, &smc->counts);
+@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+ ca_destroy(&smc->counts);
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ return &smc->sm;
+@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
+ int r;
+ struct sm_checker *smc;
+
+- if (!sm)
+- return NULL;
++ if (IS_ERR_OR_NULL(sm))
++ return ERR_PTR(-EINVAL);
+
+ smc = kmalloc(sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smc->sm, &ops_, sizeof(smc->sm));
+ r = ca_create(&smc->old_counts, sm);
+ if (r) {
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ r = ca_create(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ smc->real_sm = sm;
+diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
+index fc469ba..3d0ed53 100644
+--- a/drivers/md/persistent-data/dm-space-map-disk.c
++++ b/drivers/md/persistent-data/dm-space-map-disk.c
+@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+ {
+ struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
+- return dm_sm_checker_create_fresh(sm);
++ struct dm_space_map *smc;
++
++ if (IS_ERR_OR_NULL(sm))
++ return sm;
++
++ smc = dm_sm_checker_create_fresh(sm);
++ if (IS_ERR(smc))
++ dm_sm_destroy(sm);
++
++ return smc;
+ }
+ EXPORT_SYMBOL_GPL(dm_sm_disk_create);
+
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 6f8d387..ba54aac 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+ void dm_tm_destroy(struct dm_transaction_manager *tm)
+ {
++ if (!tm->is_clone)
++ wipe_shadow_table(tm);
++
+ kfree(tm);
+ }
+ EXPORT_SYMBOL_GPL(dm_tm_destroy);
+@@ -342,8 +345,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
+ }
+
+ *sm = dm_sm_checker_create(inner);
+- if (!*sm)
++ if (IS_ERR(*sm)) {
++ r = PTR_ERR(*sm);
+ goto bad2;
++ }
+
+ } else {
+ r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
+@@ -362,8 +367,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
+ }
+
+ *sm = dm_sm_checker_create(inner);
+- if (!*sm)
++ if (IS_ERR(*sm)) {
++ r = PTR_ERR(*sm);
+ goto bad2;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index b219449..7a9eef6 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1919,7 +1919,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ if (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+- s<<9, conf->tmppage, WRITE)
++ s, conf->tmppage, WRITE)
+ == 0) {
+ /* Well, this device is dead */
+ printk(KERN_NOTICE
+@@ -1956,7 +1956,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ switch (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+- s<<9, conf->tmppage,
++ s, conf->tmppage,
+ READ)) {
+ case 0:
+ /* Well, this device is dead */
+@@ -2119,7 +2119,7 @@ read_more:
+ rdev = conf->mirrors[mirror].rdev;
+ printk_ratelimited(
+ KERN_ERR
+- "md/raid10:%s: %s: redirecting"
++ "md/raid10:%s: %s: redirecting "
+ "sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+@@ -2436,6 +2436,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ /* want to reconstruct this device */
+ rb2 = r10_bio;
+ sect = raid10_find_virt(conf, sector_nr, i);
++ if (sect >= mddev->resync_max_sectors) {
++ /* last stripe is not complete - don't
++ * try to recover this sector.
++ */
++ continue;
++ }
+ /* Unless we are doing a full sync, we only need
+ * to recover the block if it is set in the bitmap
+ */
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 858fdbb..6ba4954 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -542,6 +542,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
+ * a chance*/
+ md_check_recovery(conf->mddev);
+ }
++ /*
++ * Because md_wait_for_blocked_rdev
++ * will dec nr_pending, we must
++ * increment it first.
++ */
++ atomic_inc(&rdev->nr_pending);
+ md_wait_for_blocked_rdev(rdev, conf->mddev);
+ } else {
+ /* Acknowledged bad block - skip the write */
+@@ -3621,7 +3627,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+ raid_bio->bi_next = (void*)rdev;
+ align_bi->bi_bdev = rdev->bdev;
+ align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
+- align_bi->bi_sector += rdev->data_offset;
+
+ if (!bio_fits_rdev(align_bi) ||
+ is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+@@ -3632,6 +3637,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+ return 0;
+ }
+
++ /* No reshape active, so we can trust rdev->data_offset */
++ align_bi->bi_sector += rdev->data_offset;
++
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ conf->quiesce == 0,
+diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
+index 72d3f23..68ecf48 100644
+--- a/drivers/mtd/nand/cafe_nand.c
++++ b/drivers/mtd/nand/cafe_nand.c
+@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+ static int cafe_device_ready(struct mtd_info *mtd)
+ {
+ struct cafe_priv *cafe = mtd->priv;
+- int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
++ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+ cafe_writel(cafe, irqs, NAND_IRQ);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f65e0b9..1a88e38 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -77,6 +77,7 @@
+ #include <net/route.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <net/pkt_sched.h>
+ #include "bonding.h"
+ #include "bond_3ad.h"
+ #include "bond_alb.h"
+@@ -382,8 +383,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
+ return next;
+ }
+
+-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+-
+ /**
+ * bond_dev_queue_xmit - Prepare skb for xmit.
+ *
+@@ -396,7 +395,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+ {
+ skb->dev = slave_dev;
+
+- skb->queue_mapping = bond_queue_mapping(skb);
++ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
++ sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
++ skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
+
+ if (unlikely(netpoll_tx_running(slave_dev)))
+ bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+@@ -4151,7 +4152,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+- bond_queue_mapping(skb) = skb->queue_mapping;
++ qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do {
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index eeac9ca..68fe73c 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -37,6 +37,7 @@
+ #include <linux/rtnetlink.h>
+ #include <net/rtnetlink.h>
+ #include <linux/u64_stats_sync.h>
++#include <linux/sched.h>
+
+ static int numdummies = 1;
+
+@@ -186,8 +187,10 @@ static int __init dummy_init_module(void)
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
+
+- for (i = 0; i < numdummies && !err; i++)
++ for (i = 0; i < numdummies && !err; i++) {
+ err = dummy_init_one();
++ cond_resched();
++ }
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index bf266a0..36c7c4e 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -696,6 +696,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
+
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ if (copied) {
++ int gso_segs = skb_shinfo(skb)->gso_segs;
++
+ /* record the sent skb in the sent_skb table */
+ BUG_ON(txo->sent_skb_list[start]);
+ txo->sent_skb_list[start] = skb;
+@@ -713,8 +715,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
+
+ be_txq_notify(adapter, txq->id, wrb_cnt);
+
+- be_tx_stats_update(txo, wrb_cnt, copied,
+- skb_shinfo(skb)->gso_segs, stopped);
++ be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
+ } else {
+ txq->head = start;
+ dev_kfree_skb_any(skb);
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 65c51ff..11ddd838 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -4361,10 +4361,12 @@ static int sky2_set_features(struct net_device *dev, u32 features)
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+- if (changed & NETIF_F_RXCSUM) {
+- u32 on = features & NETIF_F_RXCSUM;
+- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+- on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
++ if ((changed & NETIF_F_RXCSUM) &&
++ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
++ sky2_write32(sky2->hw,
++ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
++ (features & NETIF_F_RXCSUM)
++ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ }
+
+ if (changed & NETIF_F_RXHASH)
+diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
+index 0f9ee46..4cc4a8b 100644
+--- a/drivers/net/wireless/ath/ath.h
++++ b/drivers/net/wireless/ath/ath.h
+@@ -143,6 +143,7 @@ struct ath_common {
+ u32 keymax;
+ DECLARE_BITMAP(keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
++ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
+ enum ath_crypt_caps crypt_caps;
+
+ unsigned int clockrate;
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 6973620..7f97164 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -557,7 +557,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+- ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
++ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
+ !ah->is_pciexpress)) {
+ ah->config.serialize_regmode =
+ SER_REG_MODE_ON;
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index 2f3aeac..e6d791c 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -829,7 +829,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
++ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
++ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ if (!rx_stats->rs_datalen)
+diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
+index 4cf7c5e..1ec3fa5 100644
+--- a/drivers/net/wireless/ath/key.c
++++ b/drivers/net/wireless/ath/key.c
+@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
+ return -EIO;
+
+ set_bit(idx, common->keymap);
++ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
++ set_bit(idx, common->ccmp_keymap);
++
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ set_bit(idx, common->tkip_keymap);
+@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
++ clear_bit(key->hw_key_idx, common->ccmp_keymap);
+ if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
+index 7aa9aa0..39fd4d5 100644
+--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
+@@ -267,7 +267,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
+ else
+ last_seq = priv->rx_seq[tid];
+
+- if (last_seq >= new_node->start_win)
++ if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
++ last_seq >= new_node->start_win)
+ new_node->start_win = last_seq + 1;
+
+ new_node->win_size = win_size;
+@@ -611,5 +612,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+ INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+- memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
++ mwifiex_reset_11n_rx_seq_num(priv);
+ }
+diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
+index 033c8ad..7128baa 100644
+--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
++++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
+@@ -37,6 +37,13 @@
+
+ #define ADDBA_RSP_STATUS_ACCEPT 0
+
++#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
++
++static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
++{
++ memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
++}
++
+ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
+ u16 seqNum,
+ u16 tid, u8 *ta,
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 462c710..01dcb1a 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1177,11 +1177,11 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ void *mdev_priv;
+
+ if (!priv)
+- return NULL;
++ return ERR_PTR(-EFAULT);
+
+ adapter = priv->adapter;
+ if (!adapter)
+- return NULL;
++ return ERR_PTR(-EFAULT);
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+@@ -1190,7 +1190,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ if (priv->bss_mode) {
+ wiphy_err(wiphy, "cannot create multiple"
+ " station/adhoc interfaces\n");
+- return NULL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED)
+@@ -1208,14 +1208,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ break;
+ default:
+ wiphy_err(wiphy, "type not supported\n");
+- return NULL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
+ ether_setup, 1);
+ if (!dev) {
+ wiphy_err(wiphy, "no memory available for netdevice\n");
+- goto error;
++ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
++ return ERR_PTR(-ENOMEM);
+ }
+
+ dev_net_set(dev, wiphy_net(wiphy));
+@@ -1240,7 +1241,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ /* Register network device */
+ if (register_netdevice(dev)) {
+ wiphy_err(wiphy, "cannot register virtual network device\n");
+- goto error;
++ free_netdev(dev);
++ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
++ return ERR_PTR(-EFAULT);
+ }
+
+ sema_init(&priv->async_sem, 1);
+@@ -1252,12 +1255,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ mwifiex_dev_debugfs_init(priv);
+ #endif
+ return dev;
+-error:
+- if (dev && (dev->reg_state == NETREG_UNREGISTERED))
+- free_netdev(dev);
+- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+-
+- return NULL;
+ }
+ EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
+
+diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
+index 6c239c3..06fcf1e 100644
+--- a/drivers/net/wireless/mwifiex/wmm.c
++++ b/drivers/net/wireless/mwifiex/wmm.c
+@@ -406,6 +406,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
+ priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
+ priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
+
++ mwifiex_reset_11n_rx_seq_num(priv);
++
+ atomic_set(&priv->wmm.tx_pkts_queued, 0);
+ atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+ }
+@@ -1209,10 +1211,12 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
+ return 0;
+ }
+
+- if (!ptr->is_11n_enabled || mwifiex_is_ba_stream_setup(priv, ptr, tid)
+- || ((priv->sec_info.wpa_enabled
+- || priv->sec_info.wpa2_enabled) && !priv->wpa_is_gtk_set)
+- ) {
++ if (!ptr->is_11n_enabled ||
++ mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
++ priv->wps.session_enable ||
++ ((priv->sec_info.wpa_enabled ||
++ priv->sec_info.wpa2_enabled) &&
++ !priv->wpa_is_gtk_set)) {
+ mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ /* ra_list_spinlock has been freed in
+ mwifiex_send_single_packet() */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 94a3e17..0302148 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -311,9 +311,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
++ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
+@@ -355,6 +357,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
++ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
+ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+ {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 3269213..64ddb63 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -61,7 +61,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+ struct ft_tport *tport;
+ int i;
+
+- tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
++ tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
++ lockdep_is_held(&ft_lport_lock));
+ if (tport && tport->tpg)
+ return tport;
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 3568374..19b127c 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -692,6 +692,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ kfree(name);
+
+ iput(inode);
++
++ btrfs_run_delayed_items(trans, root);
+ return ret;
+ }
+
+@@ -897,6 +899,7 @@ again:
+ ret = btrfs_unlink_inode(trans, root, dir,
+ inode, victim_name,
+ victim_name_len);
++ btrfs_run_delayed_items(trans, root);
+ }
+ kfree(victim_name);
+ ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
+@@ -1477,6 +1480,9 @@ again:
+ ret = btrfs_unlink_inode(trans, root, dir, inode,
+ name, name_len);
+ BUG_ON(ret);
++
++ btrfs_run_delayed_items(trans, root);
++
+ kfree(name);
+ iput(inode);
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 9e0675a..b21670c 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2975,18 +2975,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ * MS-CIFS indicates that servers are only limited by the client's
+ * bufsize for reads, testing against win98se shows that it throws
+ * INVALID_PARAMETER errors if you try to request too large a read.
++ * OS/2 just sends back short reads.
+ *
+- * If the server advertises a MaxBufferSize of less than one page,
+- * assume that it also can't satisfy reads larger than that either.
+- *
+- * FIXME: Is there a better heuristic for this?
++ * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
++ * it can't handle a read request larger than its MaxBufferSize either.
+ */
+ if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
+ defsize = CIFS_DEFAULT_IOSIZE;
+ else if (server->capabilities & CAP_LARGE_READ_X)
+ defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
+- else if (server->maxBuf >= PAGE_CACHE_SIZE)
+- defsize = CIFSMaxBufSize;
+ else
+ defsize = server->maxBuf - sizeof(READ_RSP);
+
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 6e39668..07ee5b4 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2422,8 +2422,10 @@ out_dio:
+ unaligned_dio = 0;
+ }
+
+- if (unaligned_dio)
++ if (unaligned_dio) {
++ ocfs2_iocb_clear_unaligned_aio(iocb);
+ atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
++ }
+
+ out:
+ if (rw_level != -1)
+diff --git a/fs/open.c b/fs/open.c
+index 22c41b5..e2b5d51 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -396,10 +396,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ {
+ struct file *file;
+ struct inode *inode;
+- int error;
++ int error, fput_needed;
+
+ error = -EBADF;
+- file = fget(fd);
++ file = fget_raw_light(fd, &fput_needed);
+ if (!file)
+ goto out;
+
+@@ -413,7 +413,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+- fput(file);
++ fput_light(file, fput_needed);
+ out:
+ return error;
+ }
+diff --git a/fs/splice.c b/fs/splice.c
+index 6d0dfb8..014fcb4 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -274,13 +274,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
+ * Check if we need to grow the arrays holding pages and partial page
+ * descriptions.
+ */
+-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
++int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+ {
+- if (pipe->buffers <= PIPE_DEF_BUFFERS)
++ unsigned int buffers = ACCESS_ONCE(pipe->buffers);
++
++ spd->nr_pages_max = buffers;
++ if (buffers <= PIPE_DEF_BUFFERS)
+ return 0;
+
+- spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
+- spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
++ spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
++ spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
+
+ if (spd->pages && spd->partial)
+ return 0;
+@@ -290,10 +293,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+ return -ENOMEM;
+ }
+
+-void splice_shrink_spd(struct pipe_inode_info *pipe,
+- struct splice_pipe_desc *spd)
++void splice_shrink_spd(struct splice_pipe_desc *spd)
+ {
+- if (pipe->buffers <= PIPE_DEF_BUFFERS)
++ if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
+ return;
+
+ kfree(spd->pages);
+@@ -316,6 +318,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &page_cache_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -327,7 +330,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
+ index = *ppos >> PAGE_CACHE_SHIFT;
+ loff = *ppos & ~PAGE_CACHE_MASK;
+ req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+- nr_pages = min(req_pages, pipe->buffers);
++ nr_pages = min(req_pages, spd.nr_pages_max);
+
+ /*
+ * Lookup the (hopefully) full range of pages we need.
+@@ -498,7 +501,7 @@ fill_it:
+ if (spd.nr_pages)
+ error = splice_to_pipe(pipe, &spd);
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return error;
+ }
+
+@@ -599,6 +602,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &default_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -609,8 +613,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+
+ res = -ENOMEM;
+ vec = __vec;
+- if (pipe->buffers > PIPE_DEF_BUFFERS) {
+- vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
++ if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
++ vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
+ if (!vec)
+ goto shrink_ret;
+ }
+@@ -618,7 +622,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ offset = *ppos & ~PAGE_CACHE_MASK;
+ nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+- for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
++ for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
+ struct page *page;
+
+ page = alloc_page(GFP_USER);
+@@ -666,7 +670,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ shrink_ret:
+ if (vec != __vec)
+ kfree(vec);
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return res;
+
+ err:
+@@ -1616,6 +1620,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &user_page_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -1631,13 +1636,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+
+ spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
+ spd.partial, flags & SPLICE_F_GIFT,
+- pipe->buffers);
++ spd.nr_pages_max);
+ if (spd.nr_pages <= 0)
+ ret = spd.nr_pages;
+ else
+ ret = splice_to_pipe(pipe, &spd);
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return ret;
+ }
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 87cb24a..270e135 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -56,6 +56,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/bitmap.h>
+ #include <linux/crc-itu-t.h>
++#include <linux/log2.h>
+ #include <asm/byteorder.h>
+
+ #include "udf_sb.h"
+@@ -1217,16 +1218,65 @@ out_bh:
+ return ret;
+ }
+
++static int udf_load_sparable_map(struct super_block *sb,
++ struct udf_part_map *map,
++ struct sparablePartitionMap *spm)
++{
++ uint32_t loc;
++ uint16_t ident;
++ struct sparingTable *st;
++ struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
++ int i;
++ struct buffer_head *bh;
++
++ map->s_partition_type = UDF_SPARABLE_MAP15;
++ sdata->s_packet_len = le16_to_cpu(spm->packetLength);
++ if (!is_power_of_2(sdata->s_packet_len)) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Invalid packet length %u\n",
++ (unsigned)sdata->s_packet_len);
++ return -EIO;
++ }
++ if (spm->numSparingTables > 4) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Too many sparing tables (%d)\n",
++ (int)spm->numSparingTables);
++ return -EIO;
++ }
++
++ for (i = 0; i < spm->numSparingTables; i++) {
++ loc = le32_to_cpu(spm->locSparingTable[i]);
++ bh = udf_read_tagged(sb, loc, loc, &ident);
++ if (!bh)
++ continue;
++
++ st = (struct sparingTable *)bh->b_data;
++ if (ident != 0 ||
++ strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
++ strlen(UDF_ID_SPARING)) ||
++ sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
++ sb->s_blocksize) {
++ brelse(bh);
++ continue;
++ }
++
++ sdata->s_spar_map[i] = bh;
++ }
++ map->s_partition_func = udf_get_pblock_spar15;
++ return 0;
++}
++
+ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ struct kernel_lb_addr *fileset)
+ {
+ struct logicalVolDesc *lvd;
+- int i, j, offset;
++ int i, offset;
+ uint8_t type;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct genericPartitionMap *gpm;
+ uint16_t ident;
+ struct buffer_head *bh;
++ unsigned int table_len;
+ int ret = 0;
+
+ bh = udf_read_tagged(sb, block, block, &ident);
+@@ -1234,15 +1284,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ return 1;
+ BUG_ON(ident != TAG_IDENT_LVD);
+ lvd = (struct logicalVolDesc *)bh->b_data;
+-
+- i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+- if (i != 0) {
+- ret = i;
++ table_len = le32_to_cpu(lvd->mapTableLength);
++ if (sizeof(*lvd) + table_len > sb->s_blocksize) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Partition table too long (%u > %lu)\n", table_len,
++ sb->s_blocksize - sizeof(*lvd));
+ goto out_bh;
+ }
+
++ ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
++ if (ret)
++ goto out_bh;
++
+ for (i = 0, offset = 0;
+- i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
++ i < sbi->s_partitions && offset < table_len;
+ i++, offset += gpm->partitionMapLength) {
+ struct udf_part_map *map = &sbi->s_partmaps[i];
+ gpm = (struct genericPartitionMap *)
+@@ -1277,38 +1332,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_SPARABLE,
+ strlen(UDF_ID_SPARABLE))) {
+- uint32_t loc;
+- struct sparingTable *st;
+- struct sparablePartitionMap *spm =
+- (struct sparablePartitionMap *)gpm;
+-
+- map->s_partition_type = UDF_SPARABLE_MAP15;
+- map->s_type_specific.s_sparing.s_packet_len =
+- le16_to_cpu(spm->packetLength);
+- for (j = 0; j < spm->numSparingTables; j++) {
+- struct buffer_head *bh2;
+-
+- loc = le32_to_cpu(
+- spm->locSparingTable[j]);
+- bh2 = udf_read_tagged(sb, loc, loc,
+- &ident);
+- map->s_type_specific.s_sparing.
+- s_spar_map[j] = bh2;
+-
+- if (bh2 == NULL)
+- continue;
+-
+- st = (struct sparingTable *)bh2->b_data;
+- if (ident != 0 || strncmp(
+- st->sparingIdent.ident,
+- UDF_ID_SPARING,
+- strlen(UDF_ID_SPARING))) {
+- brelse(bh2);
+- map->s_type_specific.s_sparing.
+- s_spar_map[j] = NULL;
+- }
+- }
+- map->s_partition_func = udf_get_pblock_spar15;
++ if (udf_load_sparable_map(sb, map,
++ (struct sparablePartitionMap *)gpm) < 0)
++ goto out_bh;
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_METADATA,
+ strlen(UDF_ID_METADATA))) {
+diff --git a/include/linux/aio.h b/include/linux/aio.h
+index 2314ad8..b1a520e 100644
+--- a/include/linux/aio.h
++++ b/include/linux/aio.h
+@@ -140,6 +140,7 @@ struct kiocb {
+ (x)->ki_dtor = NULL; \
+ (x)->ki_obj.tsk = tsk; \
+ (x)->ki_user_data = 0; \
++ (x)->private = NULL; \
+ } while (0)
+
+ #define AIO_RING_MAGIC 0xa10a10a1
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+index 26e5b61..09a545a 100644
+--- a/include/linux/splice.h
++++ b/include/linux/splice.h
+@@ -51,7 +51,8 @@ struct partial_page {
+ struct splice_pipe_desc {
+ struct page **pages; /* page map */
+ struct partial_page *partial; /* pages[] may not be contig */
+- int nr_pages; /* number of pages in map */
++ int nr_pages; /* number of populated pages in map */
++ unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
+ unsigned int flags; /* splice flags */
+ const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+ void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+ /*
+ * for dynamic pipe sizing
+ */
+-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
+-extern void splice_shrink_spd(struct pipe_inode_info *,
+- struct splice_pipe_desc *);
++extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
++extern void splice_shrink_spd(struct splice_pipe_desc *);
+ extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
+
+ extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index 9808877..a7a683e 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -42,6 +42,7 @@
+ #include <net/netlabel.h>
+ #include <net/request_sock.h>
+ #include <linux/atomic.h>
++#include <asm/unaligned.h>
+
+ /* known doi values */
+ #define CIPSO_V4_DOI_UNKNOWN 0x00000000
+@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char **option)
+ {
+- return -ENOSYS;
++ unsigned char *opt = *option;
++ unsigned char err_offset = 0;
++ u8 opt_len = opt[1];
++ u8 opt_iter;
++
++ if (opt_len < 8) {
++ err_offset = 1;
++ goto out;
++ }
++
++ if (get_unaligned_be32(&opt[2]) == 0) {
++ err_offset = 2;
++ goto out;
++ }
++
++ for (opt_iter = 6; opt_iter < opt_len;) {
++ if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
++ err_offset = opt_iter + 1;
++ goto out;
++ }
++ opt_iter += opt[opt_iter + 1];
++ }
++
++out:
++ *option = opt + err_offset;
++ return err_offset;
++
+ }
+ #endif /* CONFIG_NETLABEL */
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 55ce96b..9d7d54a 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -220,13 +220,16 @@ struct tcf_proto {
+
+ struct qdisc_skb_cb {
+ unsigned int pkt_len;
+- unsigned char data[24];
++ u16 bond_queue_mapping;
++ u16 _pad;
++ unsigned char data[20];
+ };
+
+ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+ {
+ struct qdisc_skb_cb *qcb;
+- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
++
++ BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(qcb->data) < sz);
+ }
+
+diff --git a/kernel/relay.c b/kernel/relay.c
+index b6f803a..a535fc9 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .nr_pages = 0,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .partial = partial,
+ .flags = flags,
+ .ops = &relay_pipe_buf_ops,
+@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
+ ret += padding;
+
+ out:
+- splice_shrink_spd(pipe, &spd);
+- return ret;
++ splice_shrink_spd(&spd);
++ return ret;
+ }
+
+ static ssize_t relay_file_splice_read(struct file *in,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 697e49d..5638104 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2541,10 +2541,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+ !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ atomic_inc(&global_trace.data[cpu]->disabled);
++ ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
+ }
+ if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+ cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
++ ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
+ }
+ }
+ arch_spin_unlock(&ftrace_max_lock);
+@@ -3456,6 +3458,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ .pages = pages_def,
+ .partial = partial_def,
+ .nr_pages = 0, /* This gets updated below. */
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &tracing_pipe_buf_ops,
+ .spd_release = tracing_spd_release_pipe,
+@@ -3527,7 +3530,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+
+ ret = splice_to_pipe(pipe, &spd);
+ out:
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return ret;
+
+ out_err:
+@@ -4017,6 +4020,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages_def,
+ .partial = partial_def,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &buffer_pipe_buf_ops,
+ .spd_release = buffer_spd_release,
+@@ -4104,7 +4108,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ }
+
+ ret = splice_to_pipe(pipe, &spd);
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ out:
+ return ret;
+ }
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 74bf193..23d3a6b 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -13,6 +13,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/sched.h>
+ #include <linux/ksm.h>
++#include <linux/file.h>
+
+ /*
+ * Any behaviour which results in changes to the vma->vm_flags needs to
+@@ -197,14 +198,16 @@ static long madvise_remove(struct vm_area_struct *vma,
+ struct address_space *mapping;
+ loff_t offset, endoff;
+ int error;
++ struct file *f;
+
+ *prev = NULL; /* tell sys_madvise we drop mmap_sem */
+
+ if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
+ return -EINVAL;
+
+- if (!vma->vm_file || !vma->vm_file->f_mapping
+- || !vma->vm_file->f_mapping->host) {
++ f = vma->vm_file;
++
++ if (!f || !f->f_mapping || !f->f_mapping->host) {
+ return -EINVAL;
+ }
+
+@@ -218,9 +221,16 @@ static long madvise_remove(struct vm_area_struct *vma,
+ endoff = (loff_t)(end - vma->vm_start - 1)
+ + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+
+- /* vmtruncate_range needs to take i_mutex */
++ /*
++ * vmtruncate_range may need to take i_mutex. We need to
++ * explicitly grab a reference because the vma (and hence the
++ * vma's reference to the file) can go away as soon as we drop
++ * mmap_sem.
++ */
++ get_file(f);
+ up_read(&current->mm->mmap_sem);
+ error = vmtruncate_range(mapping->host, offset, endoff);
++ fput(f);
+ down_read(&current->mm->mmap_sem);
+ return error;
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 6c253f7..7a82174 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1359,6 +1359,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &page_cache_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -1447,7 +1448,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
+ if (spd.nr_pages)
+ error = splice_to_pipe(pipe, &spd);
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+
+ if (error > 0) {
+ *ppos += error;
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index f603e5b..f3f75ad 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
+ return -ENOMEM;
+
+ dev_net_set(dev, net);
++ dev->rtnl_link_ops = &br_link_ops;
+
+ res = register_netdev(dev);
+ if (res)
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index a1daf82..cbf9ccd 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -211,7 +211,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static struct rtnl_link_ops br_link_ops __read_mostly = {
++struct rtnl_link_ops br_link_ops __read_mostly = {
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 93264df..b9bba8f 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -536,6 +536,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
+ #endif
+
+ /* br_netlink.c */
++extern struct rtnl_link_ops br_link_ops;
+ extern int br_netlink_init(void);
+ extern void br_netlink_fini(void);
+ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 2b587ec..2367246 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1672,6 +1672,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_GSG:
++ case ETHTOOL_GSSET_INFO:
+ case ETHTOOL_GSTRINGS:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GPERMADDR:
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index ab0633f..db4bb7a 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -351,22 +351,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
+
+ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+ {
+- int total_len, eth_len, ip_len, udp_len;
++ int total_len, ip_len, udp_len;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+
+ udp_len = len + sizeof(*udph);
+- ip_len = eth_len = udp_len + sizeof(*iph);
+- total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
++ ip_len = udp_len + sizeof(*iph);
++ total_len = ip_len + LL_RESERVED_SPACE(np->dev);
+
+- skb = find_skb(np, total_len, total_len - len);
++ skb = find_skb(np, total_len + np->dev->needed_tailroom,
++ total_len - len);
+ if (!skb)
+ return;
+
+ skb_copy_to_linear_data(skb, msg, len);
+- skb->len += len;
++ skb_put(skb, len);
+
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2ec200de..af9c3c6 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1663,6 +1663,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = MAX_SKB_FRAGS,
+ .flags = flags,
+ .ops = &sock_pipe_buf_ops,
+ .spd_release = sock_spd_release,
+@@ -1709,7 +1710,7 @@ done:
+ lock_sock(sk);
+ }
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return ret;
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index b23f174..8d095b9 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1497,6 +1497,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ gfp_t gfp_mask;
+ long timeo;
+ int err;
++ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
++
++ err = -EMSGSIZE;
++ if (npages > MAX_SKB_FRAGS)
++ goto failure;
+
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+@@ -1515,14 +1520,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, gfp_mask);
+ if (skb) {
+- int npages;
+ int i;
+
+ /* No pages, we're done... */
+ if (!data_len)
+ break;
+
+- npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ skb->truesize += data_len;
+ skb_shinfo(skb)->nr_frags = npages;
+ for (i = 0; i < npages; i++) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 059b9d9..2e21751 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2881,10 +2881,6 @@ static int __net_init ip6_route_net_init(struct net *net)
+ net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
+ net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
+
+-#ifdef CONFIG_PROC_FS
+- proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+- proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+-#endif
+ net->ipv6.ip6_rt_gc_expire = 30*HZ;
+
+ ret = 0;
+@@ -2905,10 +2901,6 @@ out_ip6_dst_ops:
+
+ static void __net_exit ip6_route_net_exit(struct net *net)
+ {
+-#ifdef CONFIG_PROC_FS
+- proc_net_remove(net, "ipv6_route");
+- proc_net_remove(net, "rt6_stats");
+-#endif
+ kfree(net->ipv6.ip6_null_entry);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ kfree(net->ipv6.ip6_prohibit_entry);
+@@ -2917,11 +2909,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
+ }
+
++static int __net_init ip6_route_net_init_late(struct net *net)
++{
++#ifdef CONFIG_PROC_FS
++ proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
++ proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
++#endif
++ return 0;
++}
++
++static void __net_exit ip6_route_net_exit_late(struct net *net)
++{
++#ifdef CONFIG_PROC_FS
++ proc_net_remove(net, "ipv6_route");
++ proc_net_remove(net, "rt6_stats");
++#endif
++}
++
+ static struct pernet_operations ip6_route_net_ops = {
+ .init = ip6_route_net_init,
+ .exit = ip6_route_net_exit,
+ };
+
++static struct pernet_operations ip6_route_net_late_ops = {
++ .init = ip6_route_net_init_late,
++ .exit = ip6_route_net_exit_late,
++};
++
+ static struct notifier_block ip6_route_dev_notifier = {
+ .notifier_call = ip6_route_dev_notify,
+ .priority = 0,
+@@ -2971,19 +2985,25 @@ int __init ip6_route_init(void)
+ if (ret)
+ goto xfrm6_init;
+
++ ret = register_pernet_subsys(&ip6_route_net_late_ops);
++ if (ret)
++ goto fib6_rules_init;
++
+ ret = -ENOBUFS;
+ if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
+ __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
+ __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
+- goto fib6_rules_init;
++ goto out_register_late_subsys;
+
+ ret = register_netdevice_notifier(&ip6_route_dev_notifier);
+ if (ret)
+- goto fib6_rules_init;
++ goto out_register_late_subsys;
+
+ out:
+ return ret;
+
++out_register_late_subsys:
++ unregister_pernet_subsys(&ip6_route_net_late_ops);
+ fib6_rules_init:
+ fib6_rules_cleanup();
+ xfrm6_init:
+@@ -3002,6 +3022,7 @@ out_kmem_cache:
+ void ip6_route_cleanup(void)
+ {
+ unregister_netdevice_notifier(&ip6_route_dev_notifier);
++ unregister_pernet_subsys(&ip6_route_net_late_ops);
+ fib6_rules_cleanup();
+ xfrm6_fini();
+ fib6_gc_cleanup();
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index d2726a7..3c55f63 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -167,6 +167,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
+ if (dev) {
+ unregister_netdev(dev);
+ spriv->dev = NULL;
++ module_put(THIS_MODULE);
+ }
+ }
+ }
+@@ -254,6 +255,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+ if (rc < 0)
+ goto out_del_dev;
+
++ __module_get(THIS_MODULE);
+ /* Must be done after register_netdev() */
+ strlcpy(session->ifname, dev->name, IFNAMSIZ);
+
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 2fbbe1f..6c7e609 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -515,10 +515,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ sk->sk_bound_dev_if);
+ if (IS_ERR(rt))
+ goto no_route;
+- if (connected)
++ if (connected) {
+ sk_setup_caps(sk, &rt->dst);
+- else
+- dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
++ } else {
++ skb_dst_set(skb, &rt->dst);
++ goto xmit;
++ }
+ }
+
+ /* We dont need to clone dst here, it is guaranteed to not disappear.
+@@ -526,6 +528,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ */
+ skb_dst_set_noref(skb, &rt->dst);
+
++xmit:
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(skb, &inet->cork.fl);
+ rcu_read_unlock();
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 064d20f..cda4875 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2389,7 +2389,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
+ * frames that we didn't handle, including returning unknown
+ * ones. For all other modes we will return them to the sender,
+ * setting the 0x80 bit in the action category, as required by
+- * 802.11-2007 7.3.1.11.
++ * 802.11-2012 9.24.4.
+ * Newer versions of hostapd shall also use the management frame
+ * registration mechanisms, but older ones still use cooked
+ * monitor interfaces so push all frames there.
+@@ -2399,6 +2399,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
+ return RX_DROP_MONITOR;
+
++ if (is_multicast_ether_addr(mgmt->da))
++ return RX_DROP_MONITOR;
++
+ /* do not return rejected action frames */
+ if (mgmt->u.action.category & 0x80)
+ return RX_DROP_UNUSABLE;
+diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
+index 96633f5..12b6a80 100644
+--- a/net/nfc/nci/ntf.c
++++ b/net/nfc/nci/ntf.c
+@@ -86,7 +86,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
+ nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
+ data += 2;
+
+- nfca_poll->nfcid1_len = *data++;
++ nfca_poll->nfcid1_len = min_t(__u8, *data++, sizeof(nfca_poll->nfcid1));
+
+ nfc_dbg("sens_res 0x%x, nfcid1_len %d",
+ nfca_poll->sens_res,
+@@ -111,7 +111,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
+
+ switch (ntf->rf_interface_type) {
+ case NCI_RF_INTERFACE_ISO_DEP:
+- nfca_poll_iso_dep->rats_res_len = *data++;
++ nfca_poll_iso_dep->rats_res_len = min_t(__u8, *data++, 20);
+ if (nfca_poll_iso_dep->rats_res_len > 0) {
+ memcpy(nfca_poll_iso_dep->rats_res,
+ data,
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index ee7b2b3..7a167fc 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -52,7 +52,10 @@ static int rawsock_release(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
+
+- nfc_dbg("sock=%p", sock);
++ nfc_dbg("sock=%p sk=%p", sock, sk);
++
++ if (!sk)
++ return 0;
+
+ sock_orphan(sk);
+ sock_put(sk);
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 7b7a516..2b973f5 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4457,7 +4457,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ AC_PINCTL_IN_EN);
+ for (i = 0; i < spec->num_pwrs; i++) {
+ hda_nid_t nid = spec->pwr_nids[i];
+- int pinctl, def_conf;
++ unsigned int pinctl, def_conf;
+
+ /* power on when no jack detection is available */
+ /* or when the VREF is used for controlling LED */
+@@ -4484,7 +4484,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ def_conf = get_defcfg_connect(def_conf);
+ /* skip any ports that don't have jacks since presence
+ * detection is useless */
+- if (def_conf != AC_JACK_PORT_NONE &&
++ if (def_conf != AC_JACK_PORT_COMPLEX ||
+ !is_jack_detectable(codec, nid)) {
+ stac_toggle_power_map(codec, nid, 1);
+ continue;
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index 87d5ef1..8b48801 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -963,9 +963,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
+ }
+
+ found:
+- data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
+- snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
+- data | (pll_p << PLLP_SHIFT));
++ snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
+ snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
+ pll_r << PLLR_SHIFT);
+ snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
+diff --git a/sound/soc/codecs/tlv320aic3x.h b/sound/soc/codecs/tlv320aic3x.h
+index 06a1978..16d9999 100644
+--- a/sound/soc/codecs/tlv320aic3x.h
++++ b/sound/soc/codecs/tlv320aic3x.h
+@@ -166,6 +166,7 @@
+
+ /* PLL registers bitfields */
+ #define PLLP_SHIFT 0
++#define PLLP_MASK 7
+ #define PLLQ_SHIFT 3
+ #define PLLR_SHIFT 0
+ #define PLLJ_SHIFT 2
diff --git a/3.2.54/1023_linux-3.2.24.patch b/3.2.54/1023_linux-3.2.24.patch
new file mode 100644
index 0000000..4692eb4
--- /dev/null
+++ b/3.2.54/1023_linux-3.2.24.patch
@@ -0,0 +1,4684 @@
+diff --git a/Makefile b/Makefile
+index 40d1e3b..80bb4fd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
+index 33ecd0c..b1e05cc 100644
+--- a/arch/arm/plat-samsung/adc.c
++++ b/arch/arm/plat-samsung/adc.c
+@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
+ return -EINVAL;
+ }
+
+- if (client->is_ts && adc->ts_pend)
+- return -EAGAIN;
+-
+ spin_lock_irqsave(&adc->lock, flags);
+
++ if (client->is_ts && adc->ts_pend) {
++ spin_unlock_irqrestore(&adc->lock, flags);
++ return -EAGAIN;
++ }
++
+ client->channel = channel;
+ client->nr_samples = nr_samples;
+
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 97f8bf6..adda036 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -60,6 +60,8 @@ struct thread_info {
+ register struct thread_info *__current_thread_info __asm__("$28");
+ #define current_thread_info() __current_thread_info
+
++#endif /* !__ASSEMBLY__ */
++
+ /* thread information allocation */
+ #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
+ #define THREAD_SIZE_ORDER (1)
+@@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
+
+ #define free_thread_info(info) kfree(info)
+
+-#endif /* !__ASSEMBLY__ */
+-
+ #define PREEMPT_ACTIVE 0x10000000
+
+ /*
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index a81176f..be281c6 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -1,5 +1,6 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/page.h>
++#include <asm/thread_info.h>
+ #include <asm-generic/vmlinux.lds.h>
+
+ #undef mips
+@@ -73,7 +74,7 @@ SECTIONS
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+
+- INIT_TASK_DATA(PAGE_SIZE)
++ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
+index 98b7c4b..fa3f921 100644
+--- a/arch/powerpc/include/asm/cputime.h
++++ b/arch/powerpc/include/asm/cputime.h
+@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
+ /*
+ * Convert cputime <-> microseconds
+ */
+-extern u64 __cputime_msec_factor;
++extern u64 __cputime_usec_factor;
+
+ static inline unsigned long cputime_to_usecs(const cputime_t ct)
+ {
+- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
++ return mulhdu(ct, __cputime_usec_factor);
+ }
+
+ static inline cputime_t usecs_to_cputime(const unsigned long us)
+@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
+ sec = us / 1000000;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+- do_div(ct, 1000);
++ do_div(ct, 1000000);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 5db163c..ec8affe 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ /*
+ * Factors for converting from cputime_t (timebase ticks) to
+- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
++ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * These are all stored as 0.64 fixed-point binary fractions.
+ */
+ u64 __cputime_jiffies_factor;
+ EXPORT_SYMBOL(__cputime_jiffies_factor);
+-u64 __cputime_msec_factor;
+-EXPORT_SYMBOL(__cputime_msec_factor);
++u64 __cputime_usec_factor;
++EXPORT_SYMBOL(__cputime_usec_factor);
+ u64 __cputime_sec_factor;
+ EXPORT_SYMBOL(__cputime_sec_factor);
+ u64 __cputime_clockt_factor;
+@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
+
+ div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
+ __cputime_jiffies_factor = res.result_low;
+- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
+- __cputime_msec_factor = res.result_low;
++ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
++ __cputime_usec_factor = res.result_low;
+ div128_by_32(1, 0, tb_ticks_per_sec, &res);
+ __cputime_sec_factor = res.result_low;
+ div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 4558f0d..479d03c 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -416,12 +416,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
+ return 0;
+ }
+
+- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
++ if (intsrc->source_irq == 0) {
+ if (acpi_skip_timer_override) {
+- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ return 0;
+ }
+- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
++
++ if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
++ && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ }
+@@ -1327,17 +1329,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
+ }
+
+ /*
+- * Force ignoring BIOS IRQ0 pin2 override
++ * Force ignoring BIOS IRQ0 override
+ */
+ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
+ {
+- /*
+- * The ati_ixp4x0_rev() early PCI quirk should have set
+- * the acpi_skip_timer_override flag already:
+- */
+ if (!acpi_skip_timer_override) {
+- WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
+- pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
++ pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
+ d->ident);
+ acpi_skip_timer_override = 1;
+ }
+@@ -1431,7 +1428,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+ * is enabled. This input is incorrectly designated the
+ * ISA IRQ 0 via an interrupt source override even though
+ * it is wired to the output of the master 8259A and INTIN0
+- * is not connected at all. Force ignoring BIOS IRQ0 pin2
++ * is not connected at all. Force ignoring BIOS IRQ0
+ * override in that cases.
+ */
+ {
+@@ -1466,6 +1463,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
++ {
++ .callback = dmi_ignore_irq0_timer_override,
++ .ident = "FUJITSU SIEMENS",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
++ },
++ },
+ {}
+ };
+
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 37a458b..e61f79c 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -460,6 +460,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
++ { /* Handle problems with rebooting on the Precision M6600. */
++ .callback = set_pci_reboot,
++ .ident = "Dell OptiPlex 990",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
++ },
++ },
+ { }
+ };
+
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 688be8a..9e76a32 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
+ break;
+ }
+
++ if (capable(CAP_SYS_RAWIO))
++ return 0;
++
+ /* In particular, rule out all resets and host-specific ioctls. */
+ printk_ratelimited(KERN_WARNING
+ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
+
+- return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
++ return -ENOTTY;
+ }
+ EXPORT_SYMBOL(scsi_verify_blk_ioctl);
+
+diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
+index c850de4..eff7222 100644
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+- * Ignores apic_id and always return 0 for CPU0's handle.
++ * Ignores apic_id and always returns 0 for the processor
++ * handle with acpi id 0 if nr_cpu_ids is 1.
++ * This should be the case if SMP tables are not found.
+ * Return -1 for other CPU's handle.
+ */
+- if (acpi_id == 0)
++ if (nr_cpu_ids <= 1 && acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index ca191ff..ed6bc52 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -702,8 +702,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (acpi_target_sleep_state == ACPI_STATE_S0 ||
+- (device_may_wakeup(dev) &&
+- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
++ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
++ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
+ acpi_status status;
+
+ acpi_method[3] = 'W';
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index 9f66181..240a244 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ {
+ int result = 0;
+
+- if (!strncmp(val, "enable", strlen("enable") - 1)) {
++ if (!strncmp(val, "enable", strlen("enable"))) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ goto exit;
+ }
+
+- if (!strncmp(val, "disable", strlen("disable") - 1)) {
++ if (!strncmp(val, "disable", strlen("disable"))) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
+index 96198f3..a2da8f2 100644
+--- a/drivers/gpio/gpio-wm8994.c
++++ b/drivers/gpio/gpio-wm8994.c
+@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
++ if (value)
++ value = WM8994_GPN_LVL;
++
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+- WM8994_GPN_DIR, 0);
++ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
+ }
+
+ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6aa7716..cc75c4b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8043,8 +8043,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ if (intel_enable_rc6(dev_priv->dev))
+- rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
+- GEN6_RC_CTL_RC6_ENABLE;
++ rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
++ ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 299d238..899c712 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -514,6 +514,12 @@ static const struct hid_device_id apple_devices[] = {
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
++ .driver_data = APPLE_HAS_FN },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index c27b402..95430a0 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1374,6 +1374,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+@@ -1884,6 +1887,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+@@ -1968,6 +1972,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index fba3fc4..7db934d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -125,6 +125,9 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+@@ -491,6 +494,9 @@
+ #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+ #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
+
++#define USB_VENDOR_ID_MADCATZ 0x0738
++#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
++
+ #define USB_VENDOR_ID_MCC 0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+ #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index d912649..1ba7af2 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -2086,7 +2086,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
+
+ /* Start monitoring */
+ it87_write_value(data, IT87_REG_CONFIG,
+- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
++ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
+ | (update_vbat ? 0x41 : 0x01));
+ }
+
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index 61c9cf1..1201a15 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+ spin_lock_init(&hwlock->lock);
+ hwlock->bank = bank;
+
+- ret = hwspin_lock_register_single(hwlock, i);
++ ret = hwspin_lock_register_single(hwlock, base_id + i);
+ if (ret)
+ goto reg_failed;
+ }
+@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+
+ reg_failed:
+ while (--i >= 0)
+- hwspin_lock_unregister_single(i);
++ hwspin_lock_unregister_single(base_id + i);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hwspin_lock_register);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index d728875..2189cbf 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -142,6 +142,7 @@ static const struct xpad_device {
+ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
++ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
+@@ -164,6 +165,7 @@ static const struct xpad_device {
+ { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+ };
+@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
+ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
+ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
++ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
+ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ { }
+ };
+
+diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
+index 5ec617e..ec58f48 100644
+--- a/drivers/input/mouse/bcm5974.c
++++ b/drivers/input/mouse/bcm5974.c
+@@ -79,6 +79,10 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
++/* MacbookPro10,1 (unibody, June 2012) */
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+
+ #define BCM5974_DEVICE(prod) { \
+ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
+@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
++ /* MacbookPro10,1 */
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ /* Terminating entry */
+ {}
+ };
+@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
++ {
++ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
++ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
++ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
++ HAS_INTEGRATED_BUTTON,
++ 0x84, sizeof(struct bt_data),
++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
++ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
++ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
++ },
+ {}
+ };
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index f1d5408..a1b8caa 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -59,6 +59,8 @@ static struct protection_domain *pt_domain;
+
+ static struct iommu_ops amd_iommu_ops;
+
++static struct dma_map_ops amd_iommu_dma_ops;
++
+ /*
+ * general struct to manage commands send to an IOMMU
+ */
+@@ -1878,6 +1880,11 @@ static int device_change_notifier(struct notifier_block *nb,
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
++ if (!iommu_pass_through)
++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
++ else
++ dev->archdata.dma_ops = &nommu_dma_ops;
++
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 6269eb0..ef2d493 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1468,6 +1468,8 @@ static int __init amd_iommu_init(void)
+
+ register_syscore_ops(&amd_iommu_syscore_ops);
+
++ x86_platform.iommu_shutdown = disable_iommus;
++
+ if (iommu_pass_through)
+ goto out;
+
+@@ -1476,7 +1478,6 @@ static int __init amd_iommu_init(void)
+ else
+ printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
+
+- x86_platform.iommu_shutdown = disable_iommus;
+ out:
+ return ret;
+
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 9bfd057..dae2b7a 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1080,6 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ ti->split_io = dm_rh_get_region_size(ms->rh);
+ ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
++ ti->discard_zeroes_data_unsupported = 1;
+
+ ms->kmirrord_wq = alloc_workqueue("kmirrord",
+ WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+@@ -1210,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
+ * We need to dec pending if this was a write.
+ */
+ if (rw == WRITE) {
+- if (!(bio->bi_rw & REQ_FLUSH))
++ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+ dm_rh_dec(ms->rh, map_context->ll);
+ return error;
+ }
+diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
+index 7771ed2..69732e0 100644
+--- a/drivers/md/dm-region-hash.c
++++ b/drivers/md/dm-region-hash.c
+@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
+ return;
+ }
+
++ if (bio->bi_rw & REQ_DISCARD)
++ return;
++
+ /* We must inform the log that the sync count has changed. */
+ log->type->set_region_sync(log, region, 0);
+
+@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
+ struct bio *bio;
+
+ for (bio = bios->head; bio; bio = bio->bi_next) {
+- if (bio->bi_rw & REQ_FLUSH)
++ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+ continue;
+ rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 700ecae..d8646d7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
+ return sprintf(page, "%s\n", array_states[st]);
+ }
+
+-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
+-static int md_set_readonly(struct mddev * mddev, int is_open);
++static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
++static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
+ static int do_md_run(struct mddev * mddev);
+ static int restart_array(struct mddev *mddev);
+
+@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ /* stopping an active array */
+ if (atomic_read(&mddev->openers) > 0)
+ return -EBUSY;
+- err = do_md_stop(mddev, 0, 0);
++ err = do_md_stop(mddev, 0, NULL);
+ break;
+ case inactive:
+ /* stopping an active array */
+ if (mddev->pers) {
+ if (atomic_read(&mddev->openers) > 0)
+ return -EBUSY;
+- err = do_md_stop(mddev, 2, 0);
++ err = do_md_stop(mddev, 2, NULL);
+ } else
+ err = 0; /* already inactive */
+ break;
+@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ break; /* not supported yet */
+ case readonly:
+ if (mddev->pers)
+- err = md_set_readonly(mddev, 0);
++ err = md_set_readonly(mddev, NULL);
+ else {
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ case read_auto:
+ if (mddev->pers) {
+ if (mddev->ro == 0)
+- err = md_set_readonly(mddev, 0);
++ err = md_set_readonly(mddev, NULL);
+ else if (mddev->ro == 1)
+ err = restart_array(mddev);
+ if (err == 0) {
+@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
+ }
+ EXPORT_SYMBOL_GPL(md_stop);
+
+-static int md_set_readonly(struct mddev *mddev, int is_open)
++static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ {
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+- if (atomic_read(&mddev->openers) > is_open) {
++ if (atomic_read(&mddev->openers) > !!bdev) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
++ if (bdev)
++ sync_blockdev(bdev);
+ if (mddev->pers) {
+ __md_stop_writes(mddev);
+
+@@ -5108,18 +5110,26 @@ out:
+ * 0 - completely stop and dis-assemble array
+ * 2 - stop but do not disassemble array
+ */
+-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
++static int do_md_stop(struct mddev * mddev, int mode,
++ struct block_device *bdev)
+ {
+ struct gendisk *disk = mddev->gendisk;
+ struct md_rdev *rdev;
+
+ mutex_lock(&mddev->open_mutex);
+- if (atomic_read(&mddev->openers) > is_open ||
++ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sysfs_active) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
++ if (bdev)
++ /* It is possible IO was issued on some other
++ * open file which was closed before we took ->open_mutex.
++ * As that was not the last close __blkdev_put will not
++ * have called sync_blockdev, so we must.
++ */
++ sync_blockdev(bdev);
+
+ if (mddev->pers) {
+ if (mddev->ro)
+@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
+ err = do_md_run(mddev);
+ if (err) {
+ printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+- do_md_stop(mddev, 0, 0);
++ do_md_stop(mddev, 0, NULL);
+ }
+ }
+
+@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ goto done_unlock;
+
+ case STOP_ARRAY:
+- err = do_md_stop(mddev, 0, 1);
++ err = do_md_stop(mddev, 0, bdev);
+ goto done_unlock;
+
+ case STOP_ARRAY_RO:
+- err = md_set_readonly(mddev, 1);
++ err = md_set_readonly(mddev, bdev);
+ goto done_unlock;
+
+ case BLKROSET:
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7af60ec..2d97bf0 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1713,8 +1713,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+
+ if (atomic_dec_and_test(&r1_bio->remaining)) {
+ /* if we're here, all write(s) have completed, so clean up */
+- md_done_sync(mddev, r1_bio->sectors, 1);
+- put_buf(r1_bio);
++ int s = r1_bio->sectors;
++ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
++ test_bit(R1BIO_WriteError, &r1_bio->state))
++ reschedule_retry(r1_bio);
++ else {
++ put_buf(r1_bio);
++ md_done_sync(mddev, s, 1);
++ }
+ }
+ }
+
+@@ -2378,9 +2384,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ atomic_set(&r1_bio->remaining, read_targets);
+- for (i=0; i<conf->raid_disks; i++) {
++ for (i = 0; i < conf->raid_disks && read_targets; i++) {
+ bio = r1_bio->bios[i];
+ if (bio->bi_end_io == end_sync_read) {
++ read_targets--;
+ md_sync_acct(bio->bi_bdev, nr_sectors);
+ generic_make_request(bio);
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 6ba4954..26ef63a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
+ if (test_bit(STRIPE_HANDLE, &sh->state)) {
+- if (test_bit(STRIPE_DELAYED, &sh->state))
++ if (test_bit(STRIPE_DELAYED, &sh->state) &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ list_add_tail(&sh->lru, &conf->delayed_list);
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ sh->bm_seq - conf->seq_write > 0)
+ list_add_tail(&sh->lru, &conf->bitmap_list);
+ else {
++ clear_bit(STRIPE_DELAYED, &sh->state);
+ clear_bit(STRIPE_BIT_DELAY, &sh->state);
+ list_add_tail(&sh->lru, &conf->handle_list);
+ }
+diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
+index f732877..d5cda35 100644
+--- a/drivers/media/dvb/dvb-core/dvbdev.c
++++ b/drivers/media/dvb/dvb-core/dvbdev.c
+@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ if (minor == MAX_DVB_MINORS) {
+ kfree(dvbdevfops);
+ kfree(dvbdev);
++ up_write(&minor_rwsem);
+ mutex_unlock(&dvbdev_register_lock);
+ return -EINVAL;
+ }
+diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
+index 34c03be..83e8e1b 100644
+--- a/drivers/mtd/nand/nandsim.c
++++ b/drivers/mtd/nand/nandsim.c
+@@ -28,7 +28,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/vmalloc.h>
+-#include <asm/div64.h>
++#include <linux/math64.h>
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -547,12 +547,6 @@ static char *get_partition_name(int i)
+ return kstrdup(buf, GFP_KERNEL);
+ }
+
+-static uint64_t divide(uint64_t n, uint32_t d)
+-{
+- do_div(n, d);
+- return n;
+-}
+-
+ /*
+ * Initialize the nandsim structure.
+ *
+@@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
++ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+@@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
+
+ if (!rptwear)
+ return 0;
+- wear_eb_count = divide(mtd->size, mtd->erasesize);
++ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+ mem = wear_eb_count * sizeof(unsigned long);
+ if (mem / sizeof(unsigned long) != wear_eb_count) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
+index 3680aa2..2cf084e 100644
+--- a/drivers/net/bonding/bond_debugfs.c
++++ b/drivers/net/bonding/bond_debugfs.c
+@@ -6,7 +6,7 @@
+ #include "bonding.h"
+ #include "bond_alb.h"
+
+-#ifdef CONFIG_DEBUG_FS
++#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
+
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1a88e38..6c284d1 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3184,6 +3184,12 @@ static int bond_master_netdev_event(unsigned long event,
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ return bond_event_changename(event_bond);
++ case NETDEV_UNREGISTER:
++ bond_remove_proc_entry(event_bond);
++ break;
++ case NETDEV_REGISTER:
++ bond_create_proc_entry(event_bond);
++ break;
+ default:
+ break;
+ }
+@@ -4391,8 +4397,6 @@ static void bond_uninit(struct net_device *bond_dev)
+
+ bond_work_cancel_all(bond);
+
+- bond_remove_proc_entry(bond);
+-
+ bond_debug_unregister(bond);
+
+ __hw_addr_flush(&bond->mc_list);
+@@ -4794,7 +4798,6 @@ static int bond_init(struct net_device *bond_dev)
+
+ bond_set_lockdep_class(bond_dev);
+
+- bond_create_proc_entry(bond);
+ list_add_tail(&bond->bond_list, &bn->dev_list);
+
+ bond_prepare_sysfs_group(bond);
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index eccdcff..5ae7df7 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -267,7 +267,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
+ dev_warn(&pdev->dev, "stop mac failed\n");
+ atl1c_set_aspm(hw, false);
+ netif_carrier_off(netdev);
+- netif_stop_queue(netdev);
+ atl1c_phy_reset(hw);
+ atl1c_phy_init(&adapter->hw);
+ } else {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index aec7212..8dda46a 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -723,21 +723,6 @@ struct bnx2x_fastpath {
+
+ #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+-#define BNX2X_IP_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+-
+-#define BNX2X_L4_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+-
+-#define BNX2X_RX_CSUM_OK(cqe) \
+- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+-
+ #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 580b44e..2c1a5c0 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -220,7 +220,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
++ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+@@ -551,6 +551,26 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+ }
+
++static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
++ struct bnx2x_fastpath *fp)
++{
++ /* Do nothing if no IP/L4 csum validation was done */
++
++ if (cqe->fast_path_cqe.status_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
++ return;
++
++ /* If both IP/L4 validation were done, check if an error was found. */
++
++ if (cqe->fast_path_cqe.type_error_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
++ fp->eth_q_stats.hw_csum_err++;
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
+ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+ {
+ struct bnx2x *bp = fp->bp;
+@@ -746,13 +766,9 @@ reuse_rx:
+
+ skb_checksum_none_assert(skb);
+
+- if (bp->dev->features & NETIF_F_RXCSUM) {
++ if (bp->dev->features & NETIF_F_RXCSUM)
++ bnx2x_csum_validate(skb, cqe, fp);
+
+- if (likely(BNX2X_RX_CSUM_OK(cqe)))
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- else
+- fp->eth_q_stats.hw_csum_err++;
+- }
+ }
+
+ skb_record_rx_queue(skb, fp->index);
+@@ -2238,8 +2254,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
+ /* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+- * So far this has only been observed to happen
+- * in Other Operating Systems(TM)
+ */
+ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+@@ -2890,7 +2904,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ txdata->tx_bd_prod += nbd;
+
+- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
++ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+@@ -2899,7 +2913,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ smp_mb();
+
+ fp->eth_q_stats.driver_xoff++;
+- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
++ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 2dcac28..6b258d9 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14046,7 +14046,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ }
+ }
+
+- if (tg3_flag(tp, 5755_PLUS))
++ if (tg3_flag(tp, 5755_PLUS) ||
++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_flag_set(tp, SHORT_DMA_BUG);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index e556fc3..3072d35 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -1571,6 +1571,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
++ /* SYNCH bit and IV bit are sticky */
++ udelay(10);
++ rxcw = er32(RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index cc2565c..9e61d6b 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4185,6 +4185,7 @@ out:
+ return rc;
+
+ err_out_msi_4:
++ netif_napi_del(&tp->napi);
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+ err_out_free_res_3:
+@@ -4210,6 +4211,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
+
+ cancel_delayed_work_sync(&tp->task);
+
++ netif_napi_del(&tp->napi);
++
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 72cd190..d4d2bc1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1174,6 +1174,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ wmb();
+ priv->hw->desc->set_tx_owner(desc);
++ wmb();
+ }
+
+ /* Interrupt on completition only for the latest segment */
+@@ -1189,6 +1190,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ /* To avoid raise condition */
+ priv->hw->desc->set_tx_owner(first);
++ wmb();
+
+ priv->cur_tx++;
+
+@@ -1252,6 +1254,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+ }
+ wmb();
+ priv->hw->desc->set_rx_owner(p + entry);
++ wmb();
+ }
+ }
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 1b7082d..26106c0 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -504,10 +504,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ if (copy > size) {
+ ++from;
+ --count;
+- }
++ offset = 0;
++ } else
++ offset += size;
+ copy -= size;
+ offset1 += size;
+- offset = 0;
+ }
+
+ if (len == offset1)
+@@ -517,24 +518,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
++ unsigned long truesize;
+
+- len = from->iov_len - offset1;
++ len = from->iov_len - offset;
+ if (!len) {
+- offset1 = 0;
++ offset = 0;
+ ++from;
+ continue;
+ }
+- base = (unsigned long)from->iov_base + offset1;
++ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
++ if (i + size > MAX_SKB_FRAGS)
++ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+- if ((num_pages != size) ||
+- (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
+- /* put_page is in skb free */
++ if (num_pages != size) {
++ for (i = 0; i < num_pages; i++)
++ put_page(page[i]);
+ return -EFAULT;
++ }
++ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+- skb->truesize += len;
+- atomic_add(len, &skb->sk->sk_wmem_alloc);
++ skb->truesize += truesize;
++ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+@@ -545,7 +551,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ len -= size;
+ i++;
+ }
+- offset1 = 0;
++ offset = 0;
+ ++from;
+ }
+ return 0;
+@@ -645,7 +651,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+- int copylen;
++ int copylen = 0;
+ bool zerocopy = false;
+
+ if (q->flags & IFF_VNET_HDR) {
+@@ -674,15 +680,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (unlikely(len < ETH_HLEN))
+ goto err;
+
++ err = -EMSGSIZE;
++ if (unlikely(count > UIO_MAXIOV))
++ goto err;
++
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
+ zerocopy = true;
+
+ if (zerocopy) {
++ /* Userspace may produce vectors with count greater than
++ * MAX_SKB_FRAGS, so we need to linearize parts of the skb
++ * to let the rest of data to be fit in the frags.
++ */
++ if (count > MAX_SKB_FRAGS) {
++ copylen = iov_length(iv, count - MAX_SKB_FRAGS);
++ if (copylen < vnet_hdr_len)
++ copylen = 0;
++ else
++ copylen -= vnet_hdr_len;
++ }
+ /* There are 256 bytes to be copied in skb, so there is enough
+ * room for skb expand head in case it is used.
+ * The rest buffer is mapped from userspace.
+ */
+- copylen = vnet_hdr.hdr_len;
++ if (copylen < vnet_hdr.hdr_len)
++ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+ } else
+@@ -693,10 +715,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (!skb)
+ goto err;
+
+- if (zerocopy) {
++ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+- } else
++ else
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
+ if (err)
+@@ -715,8 +736,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ rcu_read_lock_bh();
+ vlan = rcu_dereference_bh(q->vlan);
+ /* copy skb_ubuf_info for callback when skb has no error */
+- if (zerocopy)
++ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = m->msg_control;
++ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
++ }
+ if (vlan)
+ macvlan_start_xmit(skb, vlan->dev);
+ else
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index ad96164..00ed9c1 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -59,6 +59,7 @@
+ #define USB_PRODUCT_IPHONE_3G 0x1292
+ #define USB_PRODUCT_IPHONE_3GS 0x1294
+ #define USB_PRODUCT_IPHONE_4 0x1297
++#define USB_PRODUCT_IPAD 0x129a
+ #define USB_PRODUCT_IPHONE_4_VZW 0x129c
+ #define USB_PRODUCT_IPHONE_4S 0x12a0
+
+@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
++ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
++ IPHETH_USBINTF_PROTO) },
++ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+index 833cbef..8a40ff9 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+@@ -900,8 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
+ */
+ if (!(txs->status & TX_STATUS_AMPDU)
+ && (txs->status & TX_STATUS_INTERMEDIATE)) {
+- wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
+- __func__);
++ BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
+ return false;
+ }
+
+diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
+new file mode 100644
+index 0000000..4007bf5
+--- /dev/null
++++ b/drivers/net/wireless/ipw2x00/ipw.h
+@@ -0,0 +1,23 @@
++/*
++ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
++ *
++ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __IPW_H__
++#define __IPW_H__
++
++#include <linux/ieee80211.h>
++
++static const u32 ipw_cipher_suites[] = {
++ WLAN_CIPHER_SUITE_WEP40,
++ WLAN_CIPHER_SUITE_WEP104,
++ WLAN_CIPHER_SUITE_TKIP,
++ WLAN_CIPHER_SUITE_CCMP,
++};
++
++#endif
+diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
+index 127e9c6..10862d4 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/ipw2x00/ipw2100.c
+@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
+ #include <net/lib80211.h>
+
+ #include "ipw2100.h"
++#include "ipw.h"
+
+ #define IPW2100_VERSION "git-1.2.2"
+
+@@ -1955,6 +1956,9 @@ static int ipw2100_wdev_init(struct net_device *dev)
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ }
+
++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
++
+ set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+ if (wiphy_register(wdev->wiphy)) {
+ ipw2100_down(priv);
+diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
+index 827889b..56bd370 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <net/cfg80211-wext.h>
+ #include "ipw2200.h"
++#include "ipw.h"
+
+
+ #ifndef KBUILD_EXTMOD
+@@ -11535,6 +11536,9 @@ static int ipw_wdev_init(struct net_device *dev)
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+ }
+
++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
++
+ set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+
+ /* With that information in place, we can now register the wiphy... */
+diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+index a262c23..0116ca8 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
++++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+@@ -466,7 +466,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ return 0;
+ }
+
+- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
++ if (priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
+ IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+@@ -483,7 +483,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ sizeof(struct iwl4965_keyinfo));
+ priv->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
++ priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
+index 2bd5659..1bb64c9 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-core.c
++++ b/drivers/net/wireless/iwlegacy/iwl-core.c
+@@ -1884,14 +1884,12 @@ void iwl_legacy_bg_watchdog(unsigned long data)
+ return;
+
+ /* monitor and check for other stuck queues */
+- if (iwl_legacy_is_any_associated(priv)) {
+- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+- /* skip as we already checked the command queue */
+- if (cnt == priv->cmd_queue)
+- continue;
+- if (iwl_legacy_check_stuck_queue(priv, cnt))
+- return;
+- }
++ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
++ /* skip as we already checked the command queue */
++ if (cnt == priv->cmd_queue)
++ continue;
++ if (iwl_legacy_check_stuck_queue(priv, cnt))
++ return;
+ }
+
+ mod_timer(&priv->watchdog, jiffies +
+diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
+index 1e31050..ba28807 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
++++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
+@@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
+ case QID_RX:
+ if (!rt2x00queue_full(queue))
+ rt2x00queue_for_each_entry(queue,
+- Q_INDEX_DONE,
+ Q_INDEX,
++ Q_INDEX_DONE,
+ NULL,
+ rt2x00usb_kick_rx_entry);
+ break;
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
+index 2e0de2f..c2d5b49 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
+@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
+ radio_on = true;
+ } else if (radio_on) {
+ radio_on = false;
+- cancel_delayed_work_sync(&priv->led_on);
++ cancel_delayed_work(&priv->led_on);
+ ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
+ }
+ } else if (radio_on) {
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 12d1e81..d024f83 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -742,6 +742,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
+
+ pci_pm_set_unknown_state(pci_dev);
+
++ /*
++ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
++ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
++ * hasn't been quiesced and tries to turn it off. If the controller
++ * is already in D3, this can hang or cause memory corruption.
++ *
++ * Since the value of the COMMAND register doesn't matter once the
++ * device has been suspended, we can safely set it to 0 here.
++ */
++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e5b75eb..6d4a531 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1689,11 +1689,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+- /* Some devices mustn't be in D3 during system sleep */
+- if (target_state == PCI_D3hot &&
+- (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
+- return 0;
+-
+ pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+
+ error = pci_set_power_state(dev, target_state);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 3c56fec..78fda9c 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2940,32 +2940,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+
+-/*
+- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
+- * ASUS motherboards will cause memory corruption or a system crash
+- * if they are in D3 while the system is put into S3 sleep.
+- */
+-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+-{
+- const char *sys_info;
+- static const char good_Asus_board[] = "P8Z68-V";
+-
+- if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
+- return;
+- if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
+- return;
+- sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+- if (sys_info && memcmp(sys_info, good_Asus_board,
+- sizeof(good_Asus_board) - 1) == 0)
+- return;
+-
+- dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
+- dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
+- device_set_wakeup_capable(&dev->dev, false);
+-}
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+-
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+ {
+diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
+index 809a3ae..b46ec11 100644
+--- a/drivers/platform/x86/intel_ips.c
++++ b/drivers/platform/x86/intel_ips.c
+@@ -72,6 +72,7 @@
+ #include <linux/string.h>
+ #include <linux/tick.h>
+ #include <linux/timer.h>
++#include <linux/dmi.h>
+ #include <drm/i915_drm.h>
+ #include <asm/msr.h>
+ #include <asm/processor.h>
+@@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
+
+ MODULE_DEVICE_TABLE(pci, ips_id_table);
+
++static int ips_blacklist_callback(const struct dmi_system_id *id)
++{
++ pr_info("Blacklisted intel_ips for %s\n", id->ident);
++ return 1;
++}
++
++static const struct dmi_system_id ips_blacklist[] = {
++ {
++ .callback = ips_blacklist_callback,
++ .ident = "HP ProBook",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
++ },
++ },
++ { } /* terminating entry */
++};
++
+ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ u64 platform_info;
+@@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ u16 htshi, trc, trc_required_mask;
+ u8 tse;
+
++ if (dmi_check_system(ips_blacklist))
++ return -ENODEV;
++
+ ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
+ if (!ips)
+ return -ENOMEM;
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 09e26bf..af1e296 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -540,245 +540,34 @@ static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
+ get_performance_level, set_performance_level);
+
+
+-static int __init dmi_check_cb(const struct dmi_system_id *id)
+-{
+- pr_info("found laptop model '%s'\n",
+- id->ident);
+- return 1;
+-}
+-
+ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ {
+- .ident = "N128",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
+- DMI_MATCH(DMI_BOARD_NAME, "N128"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N130",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
+- DMI_MATCH(DMI_BOARD_NAME, "N130"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "N510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
+- DMI_MATCH(DMI_BOARD_NAME, "N510"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "X125",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
+- DMI_MATCH(DMI_BOARD_NAME, "X125"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "X120/X170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
+- DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NC10",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
+- DMI_MATCH(DMI_BOARD_NAME, "NC10"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NP-Q45",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
+- DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "X360",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
+- DMI_MATCH(DMI_BOARD_NAME, "X360"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R410 Plus",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+- DMI_MATCH(DMI_BOARD_NAME, "R460"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R518",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
+- DMI_MATCH(DMI_BOARD_NAME, "R518"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R519/R719",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
+- DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150/N210/N220",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N220",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N220"),
+- DMI_MATCH(DMI_BOARD_NAME, "N220"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150/N210/N220/N230",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150P/N210P/N220P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R700",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
+- DMI_MATCH(DMI_BOARD_NAME, "SR700"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R530/R730",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
+- DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NF110/NF210/NF310",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
+- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N145P/N250P/N260P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R70/R71",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
+- DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "P460",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
+- DMI_MATCH(DMI_BOARD_NAME, "P460"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R528/R728",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
+- DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NC210/NC110",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+- DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "X520",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
+- DMI_MATCH(DMI_BOARD_NAME, "X520"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ },
+- .callback = dmi_check_cb,
+ },
+ { },
+ };
+@@ -819,7 +608,8 @@ static int __init samsung_init(void)
+
+ f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+ if (!f0000_segment) {
+- pr_err("Can't map the segment at 0xf0000\n");
++ if (debug || force)
++ pr_err("Can't map the segment at 0xf0000\n");
+ return -EINVAL;
+ }
+
+@@ -832,7 +622,8 @@ static int __init samsung_init(void)
+ }
+
+ if (loca == 0xffff) {
+- pr_err("This computer does not support SABI\n");
++ if (debug || force)
++ pr_err("This computer does not support SABI\n");
+ goto error_no_signature;
+ }
+
+diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
+index 39e41fb..5160354 100644
+--- a/drivers/rtc/rtc-mxc.c
++++ b/drivers/rtc/rtc-mxc.c
+@@ -191,10 +191,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
++ unsigned long flags;
+ u32 status;
+ u32 events = 0;
+
+- spin_lock_irq(&pdata->rtc->irq_lock);
++ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
+ /* clear interrupt sources */
+ writew(status, ioaddr + RTC_RTCISR);
+@@ -217,7 +218,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+ rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
+
+ rtc_update_irq(pdata->rtc, 1, events);
+- spin_unlock_irq(&pdata->rtc->irq_lock);
++ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
+index 532d212..393e7ce 100644
+--- a/drivers/scsi/aic94xx/aic94xx_task.c
++++ b/drivers/scsi/aic94xx/aic94xx_task.c
+@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
+
+ if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+ resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
+- memcpy(&resp->ending_fis[0], r+16, 24);
++ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
+ ts->buf_valid_size = sizeof(*resp);
+ }
+ }
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index db9238f..4868fc9 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -112,12 +112,12 @@ static void sas_ata_task_done(struct sas_task *task)
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
+- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
++ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
+
+ if (!link->sactive) {
+- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
++ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ } else {
+- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
++ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+@@ -138,8 +138,8 @@ static void sas_ata_task_done(struct sas_task *task)
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
+- dev->sata_dev.tf.feature = 0x04; /* status err */
+- dev->sata_dev.tf.command = ATA_ERR;
++ dev->sata_dev.fis[3] = 0x04; /* status err */
++ dev->sata_dev.fis[2] = ATA_ERR;
+ }
+ }
+
+@@ -252,7 +252,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
+ {
+ struct domain_device *dev = qc->ap->private_data;
+
+- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
++ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
+ return true;
+ }
+
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 65ea65a..93b9406 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -1199,7 +1199,7 @@ int target_emulate_write_same(struct se_task *task)
+ if (num_blocks != 0)
+ range = num_blocks;
+ else
+- range = (dev->transport->get_blocks(dev) - lba);
++ range = (dev->transport->get_blocks(dev) - lba) + 1;
+
+ pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+ (unsigned long long)lba, (unsigned long long)range);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index b75bc92..9145141 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ pr_err("filp_open(%s) for APTPL metadata"
+ " failed\n", path);
+- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
++ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+ }
+
+ iov[0].iov_base = &buf[0];
+@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ " SPC-2 reservation is held, returning"
+ " RESERVATION_CONFLICT\n");
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+- ret = EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ */
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ if (cmd->data_length < 24) {
+diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
+index d95cfe2..278819c 100644
+--- a/drivers/target/tcm_fc/tfc_cmd.c
++++ b/drivers/target/tcm_fc/tfc_cmd.c
+@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
+ {
+ struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
++ if (cmd->aborted)
++ return ~0;
+ return fc_seq_exch(cmd->seq)->rxid;
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 19fb5fa..9aaed0d 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -473,6 +473,8 @@ retry:
+ goto retry;
+ }
+ if (!desc->reslength) { /* zero length read */
++ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
++ clear_bit(WDM_READ, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+ goto retry;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 52d27ed..175b6bb 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2039,12 +2039,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+
+-/* Is a USB 3.0 port in the Inactive state? */
+-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
++/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
++ * Port worm reset is required to recover
++ */
++static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
+ {
+ return hub_is_superspeed(hub->hdev) &&
+- (portstatus & USB_PORT_STAT_LINK_STATE) ==
+- USB_SS_PORT_LS_SS_INACTIVE;
++ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_INACTIVE) ||
++ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_COMP_MOD)) ;
+ }
+
+ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+@@ -2080,7 +2084,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ *
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
+ */
+- if (hub_port_inactive(hub, portstatus)) {
++ if (hub_port_warm_reset_required(hub, portstatus)) {
+ int ret;
+
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+@@ -3646,9 +3650,7 @@ static void hub_events(void)
+ /* Warm reset a USB3 protocol port if it's in
+ * SS.Inactive state.
+ */
+- if (hub_is_superspeed(hub->hdev) &&
+- (portstatus & USB_PORT_STAT_LINK_STATE)
+- == USB_SS_PORT_LS_SS_INACTIVE) {
++ if (hub_port_warm_reset_required(hub, portstatus)) {
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+ hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index a8b2980..fd8a2c2 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -438,6 +438,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ }
+ }
+
++/* Updates Link Status for super Speed port */
++static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
++{
++ u32 pls = status_reg & PORT_PLS_MASK;
++
++ /* resume state is a xHCI internal state.
++ * Do not report it to usb core.
++ */
++ if (pls == XDEV_RESUME)
++ return;
++
++ /* When the CAS bit is set then warm reset
++ * should be performed on port
++ */
++ if (status_reg & PORT_CAS) {
++ /* The CAS bit can be set while the port is
++ * in any link state.
++ * Only roothubs have CAS bit, so we
++ * pretend to be in compliance mode
++ * unless we're already in compliance
++ * or the inactive state.
++ */
++ if (pls != USB_SS_PORT_LS_COMP_MOD &&
++ pls != USB_SS_PORT_LS_SS_INACTIVE) {
++ pls = USB_SS_PORT_LS_COMP_MOD;
++ }
++ /* Return also connection bit -
++ * hub state machine resets port
++ * when this bit is set.
++ */
++ pls |= USB_PORT_STAT_CONNECTION;
++ }
++ /* update status field */
++ *status |= pls;
++}
++
+ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+ {
+@@ -579,13 +615,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ else
+ status |= USB_PORT_STAT_POWER;
+ }
+- /* Port Link State */
++ /* Update Port Link State for super speed ports*/
+ if (hcd->speed == HCD_USB3) {
+- /* resume state is a xHCI internal state.
+- * Do not report it to usb core.
+- */
+- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
+- status |= (temp & PORT_PLS_MASK);
++ xhci_hub_report_link_state(&status, temp);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 363b141..7a56805 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -341,7 +341,11 @@ struct xhci_op_regs {
+ #define PORT_PLC (1 << 22)
+ /* port configure error change - port failed to configure its link partner */
+ #define PORT_CEC (1 << 23)
+-/* bit 24 reserved */
++/* Cold Attach Status - xHC can set this bit to report device attached during
++ * Sx state. Warm port reset should be perfomed to clear this bit and move port
++ * to connected state.
++ */
++#define PORT_CAS (1 << 24)
+ /* wake on connect (enable) */
+ #define PORT_WKCONN_E (1 << 25)
+ /* wake on disconnect (enable) */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 21a4734..5971c95 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -496,6 +496,15 @@ static void option_instat_callback(struct urb *urb);
+
+ /* MediaTek products */
+ #define MEDIATEK_VENDOR_ID 0x0e8d
++#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
++#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
++#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
++#define MEDIATEK_PRODUCT_7208_1COM 0x7101
++#define MEDIATEK_PRODUCT_7208_2COM 0x7102
++#define MEDIATEK_PRODUCT_FP_1COM 0x0003
++#define MEDIATEK_PRODUCT_FP_2COM 0x0023
++#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
++#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
+
+ /* Cellient products */
+ #define CELLIENT_VENDOR_ID 0x2692
+@@ -553,6 +562,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
+ .reserved = BIT(1),
+ };
+
++static const struct option_blacklist_info net_intf2_blacklist = {
++ .reserved = BIT(2),
++};
++
+ static const struct option_blacklist_info net_intf3_blacklist = {
+ .reserved = BIT(3),
+ };
+@@ -1093,6 +1106,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+@@ -1234,6 +1249,17 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index c14c42b..ae66278 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
+ if (work) {
+ __set_current_state(TASK_RUNNING);
+ work->fn(work);
++ if (need_resched())
++ schedule();
+ } else
+ schedule();
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index c807931..4115eca 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ static struct buffer_head *
+ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ {
++ int ret;
++ struct buffer_head *bh;
++
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ return NULL;
+ }
+
+- for (;;) {
+- struct buffer_head * bh;
+- int ret;
++retry:
++ bh = __find_get_block(bdev, block, size);
++ if (bh)
++ return bh;
+
++ ret = grow_buffers(bdev, block, size);
++ if (ret == 0) {
++ free_more_memory();
++ goto retry;
++ } else if (ret > 0) {
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
+-
+- ret = grow_buffers(bdev, block, size);
+- if (ret < 0)
+- return NULL;
+- if (ret == 0)
+- free_more_memory();
+ }
++ return NULL;
+ }
+
+ /*
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index b21670c..56c152d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2925,6 +2925,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
+ #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+
++/*
++ * On hosts with high memory, we can't currently support wsize/rsize that are
++ * larger than we can kmap at once. Cap the rsize/wsize at
++ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
++ * larger than that anyway.
++ */
++#ifdef CONFIG_HIGHMEM
++#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE)
++#else /* CONFIG_HIGHMEM */
++#define CIFS_KMAP_SIZE_LIMIT (1<<24)
++#endif /* CONFIG_HIGHMEM */
++
+ static unsigned int
+ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ {
+@@ -2955,6 +2967,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ wsize = min_t(unsigned int, wsize,
+ server->maxBuf - sizeof(WRITE_REQ) + 4);
+
++ /* limit to the amount that we can kmap at once */
++ wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
++
+ /* hard limit of CIFS_MAX_WSIZE */
+ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
+
+@@ -2996,6 +3011,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ if (!(server->capabilities & CAP_LARGE_READ_X))
+ rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
+
++ /* limit to the amount that we can kmap at once */
++ rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
++
+ /* hard limit of CIFS_MAX_RSIZE */
+ rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index db4a138..4c37ed4 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+
+ dentry = d_lookup(parent, name);
+ if (dentry) {
+- /* FIXME: check for inode number changes? */
+- if (dentry->d_inode != NULL)
++ inode = dentry->d_inode;
++ /* update inode in place if i_ino didn't change */
++ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
++ cifs_fattr_to_inode(inode, fattr);
+ return dentry;
++ }
+ d_drop(dentry);
+ dput(dentry);
+ }
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index 69f994a..0dbe58a 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
+ if (!IS_ERR(*lower_file))
+ goto out;
+- if (flags & O_RDONLY) {
++ if ((flags & O_ACCMODE) == O_RDONLY) {
+ rc = PTR_ERR((*lower_file));
+ goto out;
+ }
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index 0dc5a3d..de42310 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ /* TODO: Just use file->private_data? */
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon) {
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EINVAL;
++ }
+ mutex_lock(&daemon->mux);
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+ if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
+ goto out_unlock_daemon;
+ }
+ daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
++ file->private_data = daemon;
+ atomic_inc(&ecryptfs_num_miscdev_opens);
+ out_unlock_daemon:
+ mutex_unlock(&daemon->mux);
+@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
+
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon)
++ daemon = file->private_data;
+ mutex_lock(&daemon->mux);
+- BUG_ON(daemon->pid != task_pid(current));
+ BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
+ daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
+ atomic_dec(&ecryptfs_num_miscdev_opens);
+@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
+ struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+ u16 msg_flags, struct ecryptfs_daemon *daemon)
+ {
+- int rc = 0;
++ struct ecryptfs_message *msg;
+
+- mutex_lock(&msg_ctx->mux);
+- msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
+- GFP_KERNEL);
+- if (!msg_ctx->msg) {
+- rc = -ENOMEM;
++ msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
++ if (!msg) {
+ printk(KERN_ERR "%s: Out of memory whilst attempting "
+ "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
+- (sizeof(*msg_ctx->msg) + data_size));
+- goto out_unlock;
++ (sizeof(*msg) + data_size));
++ return -ENOMEM;
+ }
++
++ mutex_lock(&msg_ctx->mux);
++ msg_ctx->msg = msg;
+ msg_ctx->msg->index = msg_ctx->index;
+ msg_ctx->msg->data_len = data_size;
+ msg_ctx->type = msg_type;
+ memcpy(msg_ctx->msg->data, data, data_size);
+ msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
+- mutex_lock(&daemon->mux);
+ list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
++ mutex_unlock(&msg_ctx->mux);
++
++ mutex_lock(&daemon->mux);
+ daemon->num_queued_msg_ctx++;
+ wake_up_interruptible(&daemon->wait);
+ mutex_unlock(&daemon->mux);
+-out_unlock:
+- mutex_unlock(&msg_ctx->mux);
+- return rc;
++
++ return 0;
+ }
+
+ /**
+@@ -246,8 +251,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ /* TODO: Just use file->private_data? */
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon) {
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EINVAL;
++ }
+ mutex_lock(&daemon->mux);
++ if (task_pid(current) != daemon->pid) {
++ mutex_unlock(&daemon->mux);
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EPERM;
++ }
+ if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+ rc = 0;
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+@@ -284,9 +297,6 @@ check_list:
+ * message from the queue; try again */
+ goto check_list;
+ }
+- BUG_ON(euid != daemon->euid);
+- BUG_ON(current_user_ns() != daemon->user_ns);
+- BUG_ON(task_pid(current) != daemon->pid);
+ msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
+ struct ecryptfs_msg_ctx, daemon_out_list);
+ BUG_ON(!msg_ctx);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 4d9d3a4..a6f3763 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ if (op == EPOLL_CTL_ADD) {
+ if (is_file_epoll(tfile)) {
+ error = -ELOOP;
+- if (ep_loop_check(ep, tfile) != 0)
++ if (ep_loop_check(ep, tfile) != 0) {
++ clear_tfile_check_list();
+ goto error_tgt_fput;
++ }
+ } else
+ list_add(&tfile->f_tfile_llink, &tfile_check_list);
+ }
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 49cf230..24a49d4 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
+ out:
+ ios->numdevs = devs_in_group;
+ ios->pages_consumed = cur_pg;
+- if (unlikely(ret)) {
+- if (length == ios->length)
+- return ret;
+- else
+- ios->length -= length;
+- }
+- return 0;
++ return ret;
+ }
+
+ int ore_create(struct ore_io_state *ios)
+diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
+index d222c77..fff2070 100644
+--- a/fs/exofs/ore_raid.c
++++ b/fs/exofs/ore_raid.c
+@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
+ * ios->sp2d[p][*], xor is calculated the same way. These pages are
+ * allocated/freed and don't go through cache
+ */
+-static int _read_4_write(struct ore_io_state *ios)
++static int _read_4_write_first_stripe(struct ore_io_state *ios)
+ {
+- struct ore_io_state *ios_read;
+ struct ore_striping_info read_si;
+ struct __stripe_pages_2d *sp2d = ios->sp2d;
+ u64 offset = ios->si.first_stripe_start;
+- u64 last_stripe_end;
+- unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+- unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+- int ret;
++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
+ if (offset == ios->offset) /* Go to start collect $200 */
+ goto read_last_stripe;
+@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
+ min_p = _sp2d_min_pg(sp2d);
+ max_p = _sp2d_max_pg(sp2d);
+
++ ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
++ offset, ios->offset, min_p, max_p);
++
+ for (c = 0; ; c++) {
+ ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
+ read_si.obj_offset += min_p * PAGE_SIZE;
+@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
+ }
+
+ read_last_stripe:
++ return 0;
++}
++
++static int _read_4_write_last_stripe(struct ore_io_state *ios)
++{
++ struct ore_striping_info read_si;
++ struct __stripe_pages_2d *sp2d = ios->sp2d;
++ u64 offset;
++ u64 last_stripe_end;
++ unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
++
+ offset = ios->offset + ios->length;
+ if (offset % PAGE_SIZE)
+ _add_to_r4w_last_page(ios, &offset);
+@@ -527,15 +538,15 @@ read_last_stripe:
+ c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
+ ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
+
+- BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
+- /* unaligned IO must be within a single stripe */
+-
+ if (min_p == sp2d->pages_in_unit) {
+ /* Didn't do it yet */
+ min_p = _sp2d_min_pg(sp2d);
+ max_p = _sp2d_max_pg(sp2d);
+ }
+
++ ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
++ offset, last_stripe_end, min_p, max_p);
++
+ while (offset < last_stripe_end) {
+ struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
+@@ -568,6 +579,15 @@ read_last_stripe:
+ }
+
+ read_it:
++ return 0;
++}
++
++static int _read_4_write_execute(struct ore_io_state *ios)
++{
++ struct ore_io_state *ios_read;
++ unsigned i;
++ int ret;
++
+ ios_read = ios->ios_read_4_write;
+ if (!ios_read)
+ return 0;
+@@ -591,6 +611,8 @@ read_it:
+ }
+
+ _mark_read4write_pages_uptodate(ios_read, ret);
++ ore_put_io_state(ios_read);
++ ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
+ return 0;
+ }
+
+@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+ /* If first stripe, Read in all read4write pages
+ * (if needed) before we calculate the first parity.
+ */
+- _read_4_write(ios);
++ _read_4_write_first_stripe(ios);
+ }
++ if (!cur_len) /* If last stripe r4w pages of last stripe */
++ _read_4_write_last_stripe(ios);
++ _read_4_write_execute(ios);
+
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = _raid_page_alloc();
+@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+
+ int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
+ {
+- struct ore_layout *layout = ios->layout;
+-
+ if (ios->parity_pages) {
++ struct ore_layout *layout = ios->layout;
+ unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
+- unsigned stripe_size = ios->si.bytes_in_stripe;
+- u64 last_stripe, first_stripe;
+
+ if (_sp2d_alloc(pages_in_unit, layout->group_width,
+ layout->parity, &ios->sp2d)) {
+ return -ENOMEM;
+ }
+-
+- /* Round io down to last full strip */
+- first_stripe = div_u64(ios->offset, stripe_size);
+- last_stripe = div_u64(ios->offset + ios->length, stripe_size);
+-
+- /* If an IO spans more then a single stripe it must end at
+- * a stripe boundary. The reminder at the end is pushed into the
+- * next IO.
+- */
+- if (last_stripe != first_stripe) {
+- ios->length = last_stripe * stripe_size - ios->offset;
+-
+- BUG_ON(!ios->length);
+- ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
+- PAGE_SIZE;
+- ios->si.length = ios->length; /*make it consistent */
+- }
+ }
+ return 0;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ab7aa3f..a93486e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1097,7 +1097,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
+ }
+ if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
+ seq_printf(seq, ",max_batch_time=%u",
+- (unsigned) sbi->s_min_batch_time);
++ (unsigned) sbi->s_max_batch_time);
+ }
+
+ /*
+diff --git a/fs/fifo.c b/fs/fifo.c
+index b1a524d..cf6f434 100644
+--- a/fs/fifo.c
++++ b/fs/fifo.c
+@@ -14,7 +14,7 @@
+ #include <linux/sched.h>
+ #include <linux/pipe_fs_i.h>
+
+-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
++static int wait_for_partner(struct inode* inode, unsigned int *cnt)
+ {
+ int cur = *cnt;
+
+@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+ if (signal_pending(current))
+ break;
+ }
++ return cur == *cnt ? -ERESTARTSYS : 0;
+ }
+
+ static void wake_up_partner(struct inode* inode)
+@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ * seen a writer */
+ filp->f_version = pipe->w_counter;
+ } else {
+- wait_for_partner(inode, &pipe->w_counter);
+- if(signal_pending(current))
++ if (wait_for_partner(inode, &pipe->w_counter))
+ goto err_rd;
+ }
+ }
+@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ wake_up_partner(inode);
+
+ if (!pipe->readers) {
+- wait_for_partner(inode, &pipe->r_counter);
+- if (signal_pending(current))
++ if (wait_for_partner(inode, &pipe->r_counter))
+ goto err_wr;
+ }
+ break;
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 2d0ca24..ebc2f4d 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -592,9 +592,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ spin_lock(&sbinfo->stat_lock);
+ /* If no limits set, just report 0 for max/free/used
+ * blocks, like simple_statfs() */
+- if (sbinfo->max_blocks >= 0) {
+- buf->f_blocks = sbinfo->max_blocks;
+- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
++ if (sbinfo->spool) {
++ long free_pages;
++
++ spin_lock(&sbinfo->spool->lock);
++ buf->f_blocks = sbinfo->spool->max_hpages;
++ free_pages = sbinfo->spool->max_hpages
++ - sbinfo->spool->used_hpages;
++ buf->f_bavail = buf->f_bfree = free_pages;
++ spin_unlock(&sbinfo->spool->lock);
+ buf->f_files = sbinfo->max_inodes;
+ buf->f_ffree = sbinfo->free_inodes;
+ }
+@@ -610,6 +616,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
+
+ if (sbi) {
+ sb->s_fs_info = NULL;
++
++ if (sbi->spool)
++ hugepage_put_subpool(sbi->spool);
++
+ kfree(sbi);
+ }
+ }
+@@ -841,10 +851,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_fs_info = sbinfo;
+ sbinfo->hstate = config.hstate;
+ spin_lock_init(&sbinfo->stat_lock);
+- sbinfo->max_blocks = config.nr_blocks;
+- sbinfo->free_blocks = config.nr_blocks;
+ sbinfo->max_inodes = config.nr_inodes;
+ sbinfo->free_inodes = config.nr_inodes;
++ sbinfo->spool = NULL;
++ if (config.nr_blocks != -1) {
++ sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
++ if (!sbinfo->spool)
++ goto out_free;
++ }
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = huge_page_size(config.hstate);
+ sb->s_blocksize_bits = huge_page_shift(config.hstate);
+@@ -864,38 +878,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_root = root;
+ return 0;
+ out_free:
++ if (sbinfo->spool)
++ kfree(sbinfo->spool);
+ kfree(sbinfo);
+ return -ENOMEM;
+ }
+
+-int hugetlb_get_quota(struct address_space *mapping, long delta)
+-{
+- int ret = 0;
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- if (sbinfo->free_blocks - delta >= 0)
+- sbinfo->free_blocks -= delta;
+- else
+- ret = -ENOMEM;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-
+- return ret;
+-}
+-
+-void hugetlb_put_quota(struct address_space *mapping, long delta)
+-{
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- sbinfo->free_blocks += delta;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-}
+-
+ static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d68f1f..6a64f15 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
+ case F_WRLCK:
+ return generic_add_lease(filp, arg, flp);
+ default:
+- BUG();
++ return -EINVAL;
+ }
+ }
+ EXPORT_SYMBOL(generic_setlease);
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 47d1c6f..b122af8 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -318,12 +318,12 @@ struct idmap_hashent {
+ unsigned long ih_expires;
+ __u32 ih_id;
+ size_t ih_namelen;
+- char ih_name[IDMAP_NAMESZ];
++ const char *ih_name;
+ };
+
+ struct idmap_hashtable {
+ __u8 h_type;
+- struct idmap_hashent h_entries[IDMAP_HASH_SZ];
++ struct idmap_hashent *h_entries;
+ };
+
+ struct idmap {
+@@ -378,6 +378,28 @@ nfs_idmap_new(struct nfs_client *clp)
+ return 0;
+ }
+
++static void
++idmap_alloc_hashtable(struct idmap_hashtable *h)
++{
++ if (h->h_entries != NULL)
++ return;
++ h->h_entries = kcalloc(IDMAP_HASH_SZ,
++ sizeof(*h->h_entries),
++ GFP_KERNEL);
++}
++
++static void
++idmap_free_hashtable(struct idmap_hashtable *h)
++{
++ int i;
++
++ if (h->h_entries == NULL)
++ return;
++ for (i = 0; i < IDMAP_HASH_SZ; i++)
++ kfree(h->h_entries[i].ih_name);
++ kfree(h->h_entries);
++}
++
+ void
+ nfs_idmap_delete(struct nfs_client *clp)
+ {
+@@ -387,6 +409,8 @@ nfs_idmap_delete(struct nfs_client *clp)
+ return;
+ rpc_unlink(idmap->idmap_dentry);
+ clp->cl_idmap = NULL;
++ idmap_free_hashtable(&idmap->idmap_user_hash);
++ idmap_free_hashtable(&idmap->idmap_group_hash);
+ kfree(idmap);
+ }
+
+@@ -396,6 +420,8 @@ nfs_idmap_delete(struct nfs_client *clp)
+ static inline struct idmap_hashent *
+ idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len)
+ {
++ if (h->h_entries == NULL)
++ return NULL;
+ return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
+ }
+
+@@ -404,6 +430,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+ {
+ struct idmap_hashent *he = idmap_name_hash(h, name, len);
+
++ if (he == NULL)
++ return NULL;
+ if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
+ return NULL;
+ if (time_after(jiffies, he->ih_expires))
+@@ -414,6 +442,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+ static inline struct idmap_hashent *
+ idmap_id_hash(struct idmap_hashtable* h, __u32 id)
+ {
++ if (h->h_entries == NULL)
++ return NULL;
+ return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
+ }
+
+@@ -421,6 +451,9 @@ static struct idmap_hashent *
+ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+ {
+ struct idmap_hashent *he = idmap_id_hash(h, id);
++
++ if (he == NULL)
++ return NULL;
+ if (he->ih_id != id || he->ih_namelen == 0)
+ return NULL;
+ if (time_after(jiffies, he->ih_expires))
+@@ -436,12 +469,14 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+ static inline struct idmap_hashent *
+ idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
+ {
++ idmap_alloc_hashtable(h);
+ return idmap_name_hash(h, name, len);
+ }
+
+ static inline struct idmap_hashent *
+ idmap_alloc_id(struct idmap_hashtable *h, __u32 id)
+ {
++ idmap_alloc_hashtable(h);
+ return idmap_id_hash(h, id);
+ }
+
+@@ -449,9 +484,14 @@ static void
+ idmap_update_entry(struct idmap_hashent *he, const char *name,
+ size_t namelen, __u32 id)
+ {
++ char *str = kmalloc(namelen + 1, GFP_KERNEL);
++ if (str == NULL)
++ return;
++ kfree(he->ih_name);
+ he->ih_id = id;
+- memcpy(he->ih_name, name, namelen);
+- he->ih_name[namelen] = '\0';
++ memcpy(str, name, namelen);
++ str[namelen] = '\0';
++ he->ih_name = str;
+ he->ih_namelen = namelen;
+ he->ih_expires = jiffies + nfs_idmap_cache_timeout;
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 66020ac..07354b7 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1186,8 +1186,9 @@ restart:
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
+- printk("%s: Lock reclaim failed!\n",
+- __func__);
++ pr_warn_ratelimited("NFS: "
++ "%s: Lock reclaim "
++ "failed!\n", __func__);
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_open_state(state);
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index 55d0128..a03ee52 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -433,7 +433,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
+ objios->ios->done = _read_done;
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ rdata->args.offset, rdata->args.count);
+- return ore_read(objios->ios);
++ ret = ore_read(objios->ios);
++ if (unlikely(ret))
++ objio_free_result(&objios->oir);
++ return ret;
+ }
+
+ /*
+@@ -464,8 +467,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+ struct objio_state *objios = priv;
+ struct nfs_write_data *wdata = objios->oir.rpcdata;
+ pgoff_t index = offset / PAGE_SIZE;
+- struct page *page = find_get_page(wdata->inode->i_mapping, index);
++ struct page *page;
++ loff_t i_size = i_size_read(wdata->inode);
++
++ if (offset >= i_size) {
++ *uptodate = true;
++ dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
++ return ZERO_PAGE(0);
++ }
+
++ page = find_get_page(wdata->inode->i_mapping, index);
+ if (!page) {
+ page = find_or_create_page(wdata->inode->i_mapping,
+ index, GFP_NOFS);
+@@ -486,8 +497,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+
+ static void __r4w_put_page(void *priv, struct page *page)
+ {
+- dprintk("%s: index=0x%lx\n", __func__, page->index);
+- page_cache_release(page);
++ dprintk("%s: index=0x%lx\n", __func__,
++ (page == ZERO_PAGE(0)) ? -1UL : page->index);
++ if (ZERO_PAGE(0) != page)
++ page_cache_release(page);
+ return;
+ }
+
+@@ -517,8 +530,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ wdata->args.offset, wdata->args.count);
+ ret = ore_write(objios->ios);
+- if (unlikely(ret))
++ if (unlikely(ret)) {
++ objio_free_result(&objios->oir);
+ return ret;
++ }
+
+ if (objios->sync)
+ _write_done(objios->ios, objios);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 07ee5b4..1c7d45e 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ if (ret < 0)
+ mlog_errno(ret);
+
+- if (file->f_flags & O_SYNC)
++ if (file && (file->f_flags & O_SYNC))
+ handle->h_sync = 1;
+
+ ocfs2_commit_trans(osb, handle);
+diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
+index fbb0b47..d5378d0 100644
+--- a/fs/ramfs/file-nommu.c
++++ b/fs/ramfs/file-nommu.c
+@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
+
+ /* prevent the page from being discarded on memory pressure */
+ SetPageDirty(page);
++ SetPageUptodate(page);
+
+ unlock_page(page);
+ put_page(page);
+diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
+index 6094c5a..b73ecd8 100644
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -715,8 +715,12 @@ static int fixup_free_space(struct ubifs_info *c)
+ lnum = ubifs_next_log_lnum(c, lnum);
+ }
+
+- /* Fixup the current log head */
+- err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
++ /*
++ * Fixup the log head which contains the only a CS node at the
++ * beginning.
++ */
++ err = fixup_leb(c, c->lhead_lnum,
++ ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
+ if (err)
+ goto out;
+
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index bd21ecd..a3ce901 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -268,6 +268,7 @@ header-y += netfilter_ipv4.h
+ header-y += netfilter_ipv6.h
+ header-y += netlink.h
+ header-y += netrom.h
++header-y += nfc.h
+ header-y += nfs.h
+ header-y += nfs2.h
+ header-y += nfs3.h
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index fd0dc30..cc07d27 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -165,6 +165,7 @@ enum hrtimer_base_type {
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @active_bases: Bitfield to mark bases with active timers
++ * @clock_was_set: Indicates that clock was set from irq context.
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @hres_active: State of high resolution mode
+@@ -177,7 +178,8 @@ enum hrtimer_base_type {
+ */
+ struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+- unsigned long active_bases;
++ unsigned int active_bases;
++ unsigned int clock_was_set;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires_next;
+ int hres_active;
+@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
+ # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+ # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
++extern void clock_was_set_delayed(void);
++
+ #else
+
+ # define MONOTONIC_RES_NSEC LOW_RES_NSEC
+@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+ {
+ return 0;
+ }
++
++static inline void clock_was_set_delayed(void) { }
++
+ #endif
+
+ extern void clock_was_set(void);
+@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
+ extern ktime_t ktime_get_real(void);
+ extern ktime_t ktime_get_boottime(void);
+ extern ktime_t ktime_get_monotonic_offset(void);
++extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
+
+ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index d9d6c86..c5ed2f1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -14,6 +14,15 @@ struct user_struct;
+ #include <linux/shm.h>
+ #include <asm/tlbflush.h>
+
++struct hugepage_subpool {
++ spinlock_t lock;
++ long count;
++ long max_hpages, used_hpages;
++};
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
++void hugepage_put_subpool(struct hugepage_subpool *spool);
++
+ int PageHuge(struct page *page);
+
+ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+@@ -138,12 +147,11 @@ struct hugetlbfs_config {
+ };
+
+ struct hugetlbfs_sb_info {
+- long max_blocks; /* blocks allowed */
+- long free_blocks; /* blocks free */
+ long max_inodes; /* inodes allowed */
+ long free_inodes; /* inodes free */
+ spinlock_t stat_lock;
+ struct hstate *hstate;
++ struct hugepage_subpool *spool;
+ };
+
+
+@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
+ extern const struct vm_operations_struct hugetlb_vm_ops;
+ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
+ struct user_struct **user, int creat_flags);
+-int hugetlb_get_quota(struct address_space *mapping, long delta);
+-void hugetlb_put_quota(struct address_space *mapping, long delta);
+
+ static inline int is_file_hugepages(struct file *file)
+ {
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 188cb2f..905b1e1 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -652,7 +652,7 @@ typedef struct pglist_data {
+ range, including holes */
+ int node_id;
+ wait_queue_head_t kswapd_wait;
+- struct task_struct *kswapd;
++ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
+ int kswapd_max_order;
+ enum zone_type classzone_idx;
+ } pg_data_t;
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index c0cfa0d..7cda65b 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -176,8 +176,6 @@ enum pci_dev_flags {
+ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+ /* Provide indication device is assigned by a Virtual Machine Manager */
+ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
+- /* Device causes system crash if in D3 during S3 sleep */
+- PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
+ };
+
+ enum pci_irq_reroute_variant {
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1c4f3e9..5afa2a3 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1892,6 +1892,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ }
+ #endif
+
++#ifdef CONFIG_NO_HZ
++void calc_load_enter_idle(void);
++void calc_load_exit_idle(void);
++#else
++static inline void calc_load_enter_idle(void) { }
++static inline void calc_load_exit_idle(void) { }
++#endif /* CONFIG_NO_HZ */
++
+ #ifndef CONFIG_CPUMASK_OFFSTACK
+ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+ {
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index bdb4590..53dc7e7 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -213,11 +213,8 @@ enum {
+ /* device driver is going to provide hardware time stamp */
+ SKBTX_IN_PROGRESS = 1 << 2,
+
+- /* ensure the originating sk reference is available on driver level */
+- SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
+-
+ /* device driver supports TX zero-copy buffers */
+- SKBTX_DEV_ZEROCOPY = 1 << 4,
++ SKBTX_DEV_ZEROCOPY = 1 << 3,
+ };
+
+ /*
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index aa60fe7..08e90fb 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -266,7 +266,7 @@ static inline int ntp_synced(void)
+ /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
+ extern u64 tick_length;
+
+-extern void second_overflow(void);
++extern int second_overflow(unsigned long secs);
+ extern void update_ntp_one_tick(void);
+ extern int do_adjtimex(struct timex *);
+ extern void hardpps(const struct timespec *, const struct timespec *);
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index 6a308d4..1e100c6 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -159,6 +159,8 @@ enum ata_command_set {
+ ATAPI_COMMAND_SET = 1,
+ };
+
++#define ATA_RESP_FIS_SIZE 24
++
+ struct sata_device {
+ enum ata_command_set command_set;
+ struct smp_resp rps_resp; /* report_phy_sata_resp */
+@@ -170,7 +172,7 @@ struct sata_device {
+
+ struct ata_port *ap;
+ struct ata_host ata_host;
+- struct ata_taskfile tf;
++ u8 fis[ATA_RESP_FIS_SIZE];
+ u32 sstatus;
+ u32 serror;
+ u32 scontrol;
+@@ -486,7 +488,7 @@ enum exec_status {
+ */
+ struct ata_task_resp {
+ u16 frame_len;
+- u8 ending_fis[24]; /* dev to host or data-in */
++ u8 ending_fis[ATA_RESP_FIS_SIZE]; /* dev to host or data-in */
+ u32 sstatus;
+ u32 serror;
+ u32 scontrol;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index ae34bf5..6db7a5e 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+ return 0;
+ }
+
++static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
++{
++ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
++ ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
++
++ return ktime_get_update_offsets(offs_real, offs_boot);
++}
++
+ /*
+ * Retrigger next event is called after clock was set
+ *
+@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+ static void retrigger_next_event(void *arg)
+ {
+ struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+- struct timespec realtime_offset, xtim, wtm, sleep;
+
+ if (!hrtimer_hres_active())
+ return;
+
+- /* Optimized out for !HIGH_RES */
+- get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
+- set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
+-
+- /* Adjust CLOCK_REALTIME offset */
+ raw_spin_lock(&base->lock);
+- base->clock_base[HRTIMER_BASE_REALTIME].offset =
+- timespec_to_ktime(realtime_offset);
+- base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
+- timespec_to_ktime(sleep);
+-
++ hrtimer_update_base(base);
+ hrtimer_force_reprogram(base, 0);
+ raw_spin_unlock(&base->lock);
+ }
+@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
+ base->clock_base[i].resolution = KTIME_HIGH_RES;
+
+ tick_setup_sched_timer();
+-
+ /* "Retrigger" the interrupt to get things going */
+ retrigger_next_event(NULL);
+ local_irq_restore(flags);
+ return 1;
+ }
+
++/*
++ * Called from timekeeping code to reprogramm the hrtimer interrupt
++ * device. If called from the timer interrupt context we defer it to
++ * softirq context.
++ */
++void clock_was_set_delayed(void)
++{
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
++
++ cpu_base->clock_was_set = 1;
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++}
++
+ #else
+
+ static inline int hrtimer_hres_active(void) { return 0; }
+@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ cpu_base->nr_events++;
+ dev->next_event.tv64 = KTIME_MAX;
+
+- entry_time = now = ktime_get();
++ raw_spin_lock(&cpu_base->lock);
++ entry_time = now = hrtimer_update_base(cpu_base);
+ retry:
+ expires_next.tv64 = KTIME_MAX;
+-
+- raw_spin_lock(&cpu_base->lock);
+ /*
+ * We set expires_next to KTIME_MAX here with cpu_base->lock
+ * held to prevent that a timer is enqueued in our queue via
+@@ -1330,8 +1339,12 @@ retry:
+ * We need to prevent that we loop forever in the hrtimer
+ * interrupt routine. We give it 3 attempts to avoid
+ * overreacting on some spurious event.
++ *
++ * Acquire base lock for updating the offsets and retrieving
++ * the current time.
+ */
+- now = ktime_get();
++ raw_spin_lock(&cpu_base->lock);
++ now = hrtimer_update_base(cpu_base);
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+ goto retry;
+@@ -1343,6 +1356,7 @@ retry:
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
++ raw_spin_unlock(&cpu_base->lock);
+ delta = ktime_sub(now, entry_time);
+ if (delta.tv64 > cpu_base->max_hang_time.tv64)
+ cpu_base->max_hang_time = delta;
+@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
+
+ static void run_hrtimer_softirq(struct softirq_action *h)
+ {
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
++
++ if (cpu_base->clock_was_set) {
++ cpu_base->clock_was_set = 0;
++ clock_was_set();
++ }
++
+ hrtimer_peek_ahead_timers();
+ }
+
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index b313086..64f8f97 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -6,7 +6,7 @@
+ *
+ * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
++ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
+ *
+ * This file is released under the GPLv2.
+ *
+@@ -283,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+ return -ENOSPC;
+
+ if (bio_chain) {
+- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++ src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
++ __GFP_NORETRY);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+ ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
+ if (ret)
+ return ret;
+- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++ src = (void *)__get_free_page(__GFP_WAIT |
++ __GFP_NOWARN |
++ __GFP_NORETRY);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+@@ -368,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+ clear_page(handle->cur);
+ handle->cur_swap = offset;
+ handle->k = 0;
+- }
+- if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
+- error = hib_wait_on_bio_chain(bio_chain);
+- if (error)
+- goto out;
+- handle->reqd_free_pages = reqd_free_pages();
++
++ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
++ error = hib_wait_on_bio_chain(bio_chain);
++ if (error)
++ goto out;
++ /*
++ * Recalculate the number of required free pages, to
++ * make sure we never take more than half.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
++ }
+ }
+ out:
+ return error;
+@@ -420,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
+ /* Maximum number of threads for compression/decompression. */
+ #define LZO_THREADS 3
+
+-/* Maximum number of pages for read buffering. */
+-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
++/* Minimum/maximum number of pages for read buffering. */
++#define LZO_MIN_RD_PAGES 1024
++#define LZO_MAX_RD_PAGES 8192
+
+
+ /**
+@@ -632,12 +641,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of free pages after all allocations have been done.
+- * We don't want to run out of pages when writing.
+- */
+- handle->reqd_free_pages = reqd_free_pages();
+-
+- /*
+ * Start the CRC32 thread.
+ */
+ init_waitqueue_head(&crc->go);
+@@ -658,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ goto out_clean;
+ }
+
++ /*
++ * Adjust the number of required free pages after all allocations have
++ * been done. We don't want to run out of pages when writing.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
++
+ printk(KERN_INFO
+ "PM: Using %u thread(s) for compression.\n"
+ "PM: Compressing and saving image data (%u pages) ... ",
+@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ unsigned i, thr, run_threads, nr_threads;
+ unsigned ring = 0, pg = 0, ring_size = 0,
+ have = 0, want, need, asked = 0;
+- unsigned long read_pages;
++ unsigned long read_pages = 0;
+ unsigned char **page = NULL;
+ struct dec_data *data = NULL;
+ struct crc_data *crc = NULL;
+@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ nr_threads = num_online_cpus() - 1;
+ nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+
+- page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
++ page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ if (!page) {
+ printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ ret = -ENOMEM;
+@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of pages for read buffering, in case we are short.
++ * Set the number of pages for read buffering.
++ * This is complete guesswork, because we'll only know the real
++ * picture once prepare_image() is called, which is much later on
++ * during the image load phase. We'll assume the worst case and
++ * say that none of the image pages are from high memory.
+ */
+- read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
+- read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
++ if (low_free_pages() > snapshot_get_image_size())
++ read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
++ read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
+
+ for (i = 0; i < read_pages; i++) {
+ page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
+ __GFP_WAIT | __GFP_HIGH :
+- __GFP_WAIT);
++ __GFP_WAIT | __GFP_NOWARN |
++ __GFP_NORETRY);
++
+ if (!page[i]) {
+ if (i < LZO_CMP_PAGES) {
+ ring_size = i;
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 576a27f..52ac69b 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1885,7 +1885,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+
+ #endif
+
+-static void calc_load_account_idle(struct rq *this_rq);
+ static void update_sysctl(void);
+ static int get_update_sysctl_factor(void);
+ static void update_cpu_load(struct rq *this_rq);
+@@ -3401,11 +3400,73 @@ unsigned long this_cpu_load(void)
+ }
+
+
++/*
++ * Global load-average calculations
++ *
++ * We take a distributed and async approach to calculating the global load-avg
++ * in order to minimize overhead.
++ *
++ * The global load average is an exponentially decaying average of nr_running +
++ * nr_uninterruptible.
++ *
++ * Once every LOAD_FREQ:
++ *
++ * nr_active = 0;
++ * for_each_possible_cpu(cpu)
++ * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
++ *
++ * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
++ *
++ * Due to a number of reasons the above turns in the mess below:
++ *
++ * - for_each_possible_cpu() is prohibitively expensive on machines with
++ * serious number of cpus, therefore we need to take a distributed approach
++ * to calculating nr_active.
++ *
++ * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
++ * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
++ *
++ * So assuming nr_active := 0 when we start out -- true per definition, we
++ * can simply take per-cpu deltas and fold those into a global accumulate
++ * to obtain the same result. See calc_load_fold_active().
++ *
++ * Furthermore, in order to avoid synchronizing all per-cpu delta folding
++ * across the machine, we assume 10 ticks is sufficient time for every
++ * cpu to have completed this task.
++ *
++ * This places an upper-bound on the IRQ-off latency of the machine. Then
++ * again, being late doesn't loose the delta, just wrecks the sample.
++ *
++ * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
++ * this would add another cross-cpu cacheline miss and atomic operation
++ * to the wakeup path. Instead we increment on whatever cpu the task ran
++ * when it went into uninterruptible state and decrement on whatever cpu
++ * did the wakeup. This means that only the sum of nr_uninterruptible over
++ * all cpus yields the correct result.
++ *
++ * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
++ */
++
+ /* Variables and functions for calc_load */
+ static atomic_long_t calc_load_tasks;
+ static unsigned long calc_load_update;
+ unsigned long avenrun[3];
+-EXPORT_SYMBOL(avenrun);
++EXPORT_SYMBOL(avenrun); /* should be removed */
++
++/**
++ * get_avenrun - get the load average array
++ * @loads: pointer to dest load array
++ * @offset: offset to add
++ * @shift: shift count to shift the result left
++ *
++ * These values are estimates at best, so no need for locking.
++ */
++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
++{
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++}
+
+ static long calc_load_fold_active(struct rq *this_rq)
+ {
+@@ -3422,6 +3483,9 @@ static long calc_load_fold_active(struct rq *this_rq)
+ return delta;
+ }
+
++/*
++ * a1 = a0 * e + a * (1 - e)
++ */
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+ {
+@@ -3433,30 +3497,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+
+ #ifdef CONFIG_NO_HZ
+ /*
+- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
++ * Handle NO_HZ for the global load-average.
++ *
++ * Since the above described distributed algorithm to compute the global
++ * load-average relies on per-cpu sampling from the tick, it is affected by
++ * NO_HZ.
++ *
++ * The basic idea is to fold the nr_active delta into a global idle-delta upon
++ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
++ * when we read the global state.
++ *
++ * Obviously reality has to ruin such a delightfully simple scheme:
++ *
++ * - When we go NO_HZ idle during the window, we can negate our sample
++ * contribution, causing under-accounting.
++ *
++ * We avoid this by keeping two idle-delta counters and flipping them
++ * when the window starts, thus separating old and new NO_HZ load.
++ *
++ * The only trick is the slight shift in index flip for read vs write.
++ *
++ * 0s 5s 10s 15s
++ * +10 +10 +10 +10
++ * |-|-----------|-|-----------|-|-----------|-|
++ * r:0 0 1 1 0 0 1 1 0
++ * w:0 1 1 0 0 1 1 0 0
++ *
++ * This ensures we'll fold the old idle contribution in this window while
++ * accumlating the new one.
++ *
++ * - When we wake up from NO_HZ idle during the window, we push up our
++ * contribution, since we effectively move our sample point to a known
++ * busy state.
++ *
++ * This is solved by pushing the window forward, and thus skipping the
++ * sample, for this cpu (effectively using the idle-delta for this cpu which
++ * was in effect at the time the window opened). This also solves the issue
++ * of having to deal with a cpu having been in NOHZ idle for multiple
++ * LOAD_FREQ intervals.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+-static atomic_long_t calc_load_tasks_idle;
++static atomic_long_t calc_load_idle[2];
++static int calc_load_idx;
+
+-static void calc_load_account_idle(struct rq *this_rq)
++static inline int calc_load_write_idx(void)
+ {
++ int idx = calc_load_idx;
++
++ /*
++ * See calc_global_nohz(), if we observe the new index, we also
++ * need to observe the new update time.
++ */
++ smp_rmb();
++
++ /*
++ * If the folding window started, make sure we start writing in the
++ * next idle-delta.
++ */
++ if (!time_before(jiffies, calc_load_update))
++ idx++;
++
++ return idx & 1;
++}
++
++static inline int calc_load_read_idx(void)
++{
++ return calc_load_idx & 1;
++}
++
++void calc_load_enter_idle(void)
++{
++ struct rq *this_rq = this_rq();
+ long delta;
+
++ /*
++ * We're going into NOHZ mode, if there's any pending delta, fold it
++ * into the pending idle delta.
++ */
+ delta = calc_load_fold_active(this_rq);
+- if (delta)
+- atomic_long_add(delta, &calc_load_tasks_idle);
++ if (delta) {
++ int idx = calc_load_write_idx();
++ atomic_long_add(delta, &calc_load_idle[idx]);
++ }
+ }
+
+-static long calc_load_fold_idle(void)
++void calc_load_exit_idle(void)
+ {
+- long delta = 0;
++ struct rq *this_rq = this_rq();
++
++ /*
++ * If we're still before the sample window, we're done.
++ */
++ if (time_before(jiffies, this_rq->calc_load_update))
++ return;
+
+ /*
+- * Its got a race, we don't care...
++ * We woke inside or after the sample window, this means we're already
++ * accounted through the nohz accounting, so skip the entire deal and
++ * sync up for the next window.
+ */
+- if (atomic_long_read(&calc_load_tasks_idle))
+- delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
++ this_rq->calc_load_update = calc_load_update;
++ if (time_before(jiffies, this_rq->calc_load_update + 10))
++ this_rq->calc_load_update += LOAD_FREQ;
++}
++
++static long calc_load_fold_idle(void)
++{
++ int idx = calc_load_read_idx();
++ long delta = 0;
++
++ if (atomic_long_read(&calc_load_idle[idx]))
++ delta = atomic_long_xchg(&calc_load_idle[idx], 0);
+
+ return delta;
+ }
+@@ -3542,66 +3694,39 @@ static void calc_global_nohz(void)
+ {
+ long delta, active, n;
+
+- /*
+- * If we crossed a calc_load_update boundary, make sure to fold
+- * any pending idle changes, the respective CPUs might have
+- * missed the tick driven calc_load_account_active() update
+- * due to NO_HZ.
+- */
+- delta = calc_load_fold_idle();
+- if (delta)
+- atomic_long_add(delta, &calc_load_tasks);
+-
+- /*
+- * It could be the one fold was all it took, we done!
+- */
+- if (time_before(jiffies, calc_load_update + 10))
+- return;
+-
+- /*
+- * Catch-up, fold however many we are behind still
+- */
+- delta = jiffies - calc_load_update - 10;
+- n = 1 + (delta / LOAD_FREQ);
++ if (!time_before(jiffies, calc_load_update + 10)) {
++ /*
++ * Catch-up, fold however many we are behind still
++ */
++ delta = jiffies - calc_load_update - 10;
++ n = 1 + (delta / LOAD_FREQ);
+
+- active = atomic_long_read(&calc_load_tasks);
+- active = active > 0 ? active * FIXED_1 : 0;
++ active = atomic_long_read(&calc_load_tasks);
++ active = active > 0 ? active * FIXED_1 : 0;
+
+- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
++ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
++ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
++ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+
+- calc_load_update += n * LOAD_FREQ;
+-}
+-#else
+-static void calc_load_account_idle(struct rq *this_rq)
+-{
+-}
++ calc_load_update += n * LOAD_FREQ;
++ }
+
+-static inline long calc_load_fold_idle(void)
+-{
+- return 0;
++ /*
++ * Flip the idle index...
++ *
++ * Make sure we first write the new time then flip the index, so that
++ * calc_load_write_idx() will see the new time when it reads the new
++ * index, this avoids a double flip messing things up.
++ */
++ smp_wmb();
++ calc_load_idx++;
+ }
++#else /* !CONFIG_NO_HZ */
+
+-static void calc_global_nohz(void)
+-{
+-}
+-#endif
++static inline long calc_load_fold_idle(void) { return 0; }
++static inline void calc_global_nohz(void) { }
+
+-/**
+- * get_avenrun - get the load average array
+- * @loads: pointer to dest load array
+- * @offset: offset to add
+- * @shift: shift count to shift the result left
+- *
+- * These values are estimates at best, so no need for locking.
+- */
+-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+-{
+- loads[0] = (avenrun[0] + offset) << shift;
+- loads[1] = (avenrun[1] + offset) << shift;
+- loads[2] = (avenrun[2] + offset) << shift;
+-}
++#endif /* CONFIG_NO_HZ */
+
+ /*
+ * calc_load - update the avenrun load estimates 10 ticks after the
+@@ -3609,11 +3734,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ */
+ void calc_global_load(unsigned long ticks)
+ {
+- long active;
++ long active, delta;
+
+ if (time_before(jiffies, calc_load_update + 10))
+ return;
+
++ /*
++ * Fold the 'old' idle-delta to include all NO_HZ cpus.
++ */
++ delta = calc_load_fold_idle();
++ if (delta)
++ atomic_long_add(delta, &calc_load_tasks);
++
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
+
+@@ -3624,12 +3756,7 @@ void calc_global_load(unsigned long ticks)
+ calc_load_update += LOAD_FREQ;
+
+ /*
+- * Account one period with whatever state we found before
+- * folding in the nohz state and ageing the entire idle period.
+- *
+- * This avoids loosing a sample when we go idle between
+- * calc_load_account_active() (10 ticks ago) and now and thus
+- * under-accounting.
++ * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+ */
+ calc_global_nohz();
+ }
+@@ -3646,7 +3773,6 @@ static void calc_load_account_active(struct rq *this_rq)
+ return;
+
+ delta = calc_load_fold_active(this_rq);
+- delta += calc_load_fold_idle();
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+
+@@ -3654,6 +3780,10 @@ static void calc_load_account_active(struct rq *this_rq)
+ }
+
+ /*
++ * End of global load-average stuff
++ */
++
++/*
+ * The exact cpuload at various idx values, calculated at every tick would be
+ * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
+ *
+diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
+index 0a51882..be92bfe 100644
+--- a/kernel/sched_idletask.c
++++ b/kernel/sched_idletask.c
+@@ -23,7 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
+ static struct task_struct *pick_next_task_idle(struct rq *rq)
+ {
+ schedstat_inc(rq, sched_goidle);
+- calc_load_account_idle(rq);
+ return rq->idle;
+ }
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 4b85a7a..f1eb182 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -31,8 +31,6 @@ unsigned long tick_nsec;
+ u64 tick_length;
+ static u64 tick_length_base;
+
+-static struct hrtimer leap_timer;
+-
+ #define MAX_TICKADJ 500LL /* usecs */
+ #define MAX_TICKADJ_SCALED \
+ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+@@ -350,60 +348,60 @@ void ntp_clear(void)
+ }
+
+ /*
+- * Leap second processing. If in leap-insert state at the end of the
+- * day, the system clock is set back one second; if in leap-delete
+- * state, the system clock is set ahead one second.
++ * this routine handles the overflow of the microsecond field
++ *
++ * The tricky bits of code to handle the accurate clock support
++ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
++ * They were originally developed for SUN and DEC kernels.
++ * All the kudos should go to Dave for this stuff.
++ *
++ * Also handles leap second processing, and returns leap offset
+ */
+-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
++int second_overflow(unsigned long secs)
+ {
+- enum hrtimer_restart res = HRTIMER_NORESTART;
+-
+- write_seqlock(&xtime_lock);
++ int leap = 0;
++ s64 delta;
+
++ /*
++ * Leap second processing. If in leap-insert state at the end of the
++ * day, the system clock is set back one second; if in leap-delete
++ * state, the system clock is set ahead one second.
++ */
+ switch (time_state) {
+ case TIME_OK:
++ if (time_status & STA_INS)
++ time_state = TIME_INS;
++ else if (time_status & STA_DEL)
++ time_state = TIME_DEL;
+ break;
+ case TIME_INS:
+- timekeeping_leap_insert(-1);
+- time_state = TIME_OOP;
+- printk(KERN_NOTICE
+- "Clock: inserting leap second 23:59:60 UTC\n");
+- hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
+- res = HRTIMER_RESTART;
++ if (secs % 86400 == 0) {
++ leap = -1;
++ time_state = TIME_OOP;
++ time_tai++;
++ printk(KERN_NOTICE
++ "Clock: inserting leap second 23:59:60 UTC\n");
++ }
+ break;
+ case TIME_DEL:
+- timekeeping_leap_insert(1);
+- time_tai--;
+- time_state = TIME_WAIT;
+- printk(KERN_NOTICE
+- "Clock: deleting leap second 23:59:59 UTC\n");
++ if ((secs + 1) % 86400 == 0) {
++ leap = 1;
++ time_tai--;
++ time_state = TIME_WAIT;
++ printk(KERN_NOTICE
++ "Clock: deleting leap second 23:59:59 UTC\n");
++ }
+ break;
+ case TIME_OOP:
+- time_tai++;
+ time_state = TIME_WAIT;
+- /* fall through */
++ break;
++
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ break;
+ }
+
+- write_sequnlock(&xtime_lock);
+-
+- return res;
+-}
+-
+-/*
+- * this routine handles the overflow of the microsecond field
+- *
+- * The tricky bits of code to handle the accurate clock support
+- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+- * They were originally developed for SUN and DEC kernels.
+- * All the kudos should go to Dave for this stuff.
+- */
+-void second_overflow(void)
+-{
+- s64 delta;
+
+ /* Bump the maxerror field */
+ time_maxerror += MAXFREQ / NSEC_PER_USEC;
+@@ -423,23 +421,25 @@ void second_overflow(void)
+ pps_dec_valid();
+
+ if (!time_adjust)
+- return;
++ goto out;
+
+ if (time_adjust > MAX_TICKADJ) {
+ time_adjust -= MAX_TICKADJ;
+ tick_length += MAX_TICKADJ_SCALED;
+- return;
++ goto out;
+ }
+
+ if (time_adjust < -MAX_TICKADJ) {
+ time_adjust += MAX_TICKADJ;
+ tick_length -= MAX_TICKADJ_SCALED;
+- return;
++ goto out;
+ }
+
+ tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
+ << NTP_SCALE_SHIFT;
+ time_adjust = 0;
++out:
++ return leap;
+ }
+
+ #ifdef CONFIG_GENERIC_CMOS_UPDATE
+@@ -501,27 +501,6 @@ static void notify_cmos_timer(void)
+ static inline void notify_cmos_timer(void) { }
+ #endif
+
+-/*
+- * Start the leap seconds timer:
+- */
+-static inline void ntp_start_leap_timer(struct timespec *ts)
+-{
+- long now = ts->tv_sec;
+-
+- if (time_status & STA_INS) {
+- time_state = TIME_INS;
+- now += 86400 - now % 86400;
+- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+-
+- return;
+- }
+-
+- if (time_status & STA_DEL) {
+- time_state = TIME_DEL;
+- now += 86400 - (now + 1) % 86400;
+- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+- }
+-}
+
+ /*
+ * Propagate a new txc->status value into the NTP state:
+@@ -546,22 +525,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
+ time_status &= STA_RONLY;
+ time_status |= txc->status & ~STA_RONLY;
+
+- switch (time_state) {
+- case TIME_OK:
+- ntp_start_leap_timer(ts);
+- break;
+- case TIME_INS:
+- case TIME_DEL:
+- time_state = TIME_OK;
+- ntp_start_leap_timer(ts);
+- case TIME_WAIT:
+- if (!(time_status & (STA_INS | STA_DEL)))
+- time_state = TIME_OK;
+- break;
+- case TIME_OOP:
+- hrtimer_restart(&leap_timer);
+- break;
+- }
+ }
+ /*
+ * Called with the xtime lock held, so we can access and modify
+@@ -643,9 +606,6 @@ int do_adjtimex(struct timex *txc)
+ (txc->tick < 900000/USER_HZ ||
+ txc->tick > 1100000/USER_HZ))
+ return -EINVAL;
+-
+- if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+- hrtimer_cancel(&leap_timer);
+ }
+
+ if (txc->modes & ADJ_SETOFFSET) {
+@@ -967,6 +927,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
+ void __init ntp_init(void)
+ {
+ ntp_clear();
+- hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+- leap_timer.function = ntp_leap_second;
+ }
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index c923640..9955ebd 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -430,6 +430,7 @@ void tick_nohz_stop_sched_tick(int inidle)
+ */
+ if (!ts->tick_stopped) {
+ select_nohz_load_balancer(1);
++ calc_load_enter_idle();
+
+ ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
+ ts->tick_stopped = 1;
+@@ -563,6 +564,7 @@ void tick_nohz_restart_sched_tick(void)
+ account_idle_ticks(ticks);
+ #endif
+
++ calc_load_exit_idle();
+ touch_softlockup_watchdog();
+ /*
+ * Cancel the scheduled timer and restore the tick
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 2378413..03e67d4 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -161,23 +161,43 @@ static struct timespec xtime __attribute__ ((aligned (16)));
+ static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+ static struct timespec total_sleep_time;
+
++/* Offset clock monotonic -> clock realtime */
++static ktime_t offs_real;
++
++/* Offset clock monotonic -> clock boottime */
++static ktime_t offs_boot;
++
+ /*
+ * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
+ */
+ static struct timespec raw_time;
+
+-/* flag for if timekeeping is suspended */
+-int __read_mostly timekeeping_suspended;
++/* must hold write on xtime_lock */
++static void update_rt_offset(void)
++{
++ struct timespec tmp, *wtm = &wall_to_monotonic;
+
+-/* must hold xtime_lock */
+-void timekeeping_leap_insert(int leapsecond)
++ set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
++ offs_real = timespec_to_ktime(tmp);
++}
++
++/* must hold write on xtime_lock */
++static void timekeeping_update(bool clearntp)
+ {
+- xtime.tv_sec += leapsecond;
+- wall_to_monotonic.tv_sec -= leapsecond;
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ if (clearntp) {
++ timekeeper.ntp_error = 0;
++ ntp_clear();
++ }
++ update_rt_offset();
++ update_vsyscall(&xtime, &wall_to_monotonic,
++ timekeeper.clock, timekeeper.mult);
+ }
+
++
++
++/* flag for if timekeeping is suspended */
++int __read_mostly timekeeping_suspended;
++
+ /**
+ * timekeeping_forward_now - update clock to the current time
+ *
+@@ -375,11 +395,7 @@ int do_settimeofday(const struct timespec *tv)
+
+ xtime = *tv;
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+-
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -412,11 +428,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+ xtime = timespec_add(xtime, *ts);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+-
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -591,6 +603,7 @@ void __init timekeeping_init(void)
+ }
+ set_normalized_timespec(&wall_to_monotonic,
+ -boot.tv_sec, -boot.tv_nsec);
++ update_rt_offset();
+ total_sleep_time.tv_sec = 0;
+ total_sleep_time.tv_nsec = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+@@ -599,6 +612,12 @@ void __init timekeeping_init(void)
+ /* time in seconds when suspend began */
+ static struct timespec timekeeping_suspend_time;
+
++static void update_sleep_time(struct timespec t)
++{
++ total_sleep_time = t;
++ offs_boot = timespec_to_ktime(t);
++}
++
+ /**
+ * __timekeeping_inject_sleeptime - Internal function to add sleep interval
+ * @delta: pointer to a timespec delta value
+@@ -616,7 +635,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
+
+ xtime = timespec_add(xtime, *delta);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
+- total_sleep_time = timespec_add(total_sleep_time, *delta);
++ update_sleep_time(timespec_add(total_sleep_time, *delta));
+ }
+
+
+@@ -645,10 +664,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
+
+ __timekeeping_inject_sleeptime(delta);
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -683,6 +699,7 @@ static void timekeeping_resume(void)
+ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.ntp_error = 0;
+ timekeeping_suspended = 0;
++ timekeeping_update(false);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ touch_softlockup_watchdog();
+@@ -942,9 +959,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+
+ timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
+ while (timekeeper.xtime_nsec >= nsecps) {
++ int leap;
+ timekeeper.xtime_nsec -= nsecps;
+ xtime.tv_sec++;
+- second_overflow();
++ leap = second_overflow(xtime.tv_sec);
++ xtime.tv_sec += leap;
++ wall_to_monotonic.tv_sec -= leap;
++ if (leap)
++ clock_was_set_delayed();
+ }
+
+ /* Accumulate raw time */
+@@ -1050,14 +1072,17 @@ static void update_wall_time(void)
+ * xtime.tv_nsec isn't larger then NSEC_PER_SEC
+ */
+ if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
++ int leap;
+ xtime.tv_nsec -= NSEC_PER_SEC;
+ xtime.tv_sec++;
+- second_overflow();
++ leap = second_overflow(xtime.tv_sec);
++ xtime.tv_sec += leap;
++ wall_to_monotonic.tv_sec -= leap;
++ if (leap)
++ clock_was_set_delayed();
+ }
+
+- /* check to see if there is a new clocksource to use */
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(false);
+ }
+
+ /**
+@@ -1216,6 +1241,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
+ } while (read_seqretry(&xtime_lock, seq));
+ }
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++/**
++ * ktime_get_update_offsets - hrtimer helper
++ * @real: pointer to storage for monotonic -> realtime offset
++ * @_boot: pointer to storage for monotonic -> boottime offset
++ *
++ * Returns current monotonic time and updates the offsets
++ * Called from hrtimer_interupt() or retrigger_next_event()
++ */
++ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
++{
++ ktime_t now;
++ unsigned int seq;
++ u64 secs, nsecs;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++
++ secs = xtime.tv_sec;
++ nsecs = xtime.tv_nsec;
++ nsecs += timekeeping_get_ns();
++ /* If arch requires, add in gettimeoffset() */
++ nsecs += arch_gettimeoffset();
++
++ *real = offs_real;
++ *boot = offs_boot;
++ } while (read_seqretry(&xtime_lock, seq));
++
++ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
++ now = ktime_sub(now, *real);
++ return now;
++}
++#endif
++
+ /**
+ * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
+ */
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 8fb8a40..50f1c60 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -592,8 +592,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ if (err) {
+ putback_lru_pages(&cc->migratepages);
+ cc->nr_migratepages = 0;
++ if (err == -ENOMEM) {
++ ret = COMPACT_PARTIAL;
++ goto out;
++ }
+ }
+-
+ }
+
+ out:
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 5f5c545..7c535b0 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
+ */
+ static DEFINE_SPINLOCK(hugetlb_lock);
+
++static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
++{
++ bool free = (spool->count == 0) && (spool->used_hpages == 0);
++
++ spin_unlock(&spool->lock);
++
++ /* If no pages are used, and no other handles to the subpool
++ * remain, free the subpool the subpool remain */
++ if (free)
++ kfree(spool);
++}
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
++{
++ struct hugepage_subpool *spool;
++
++ spool = kmalloc(sizeof(*spool), GFP_KERNEL);
++ if (!spool)
++ return NULL;
++
++ spin_lock_init(&spool->lock);
++ spool->count = 1;
++ spool->max_hpages = nr_blocks;
++ spool->used_hpages = 0;
++
++ return spool;
++}
++
++void hugepage_put_subpool(struct hugepage_subpool *spool)
++{
++ spin_lock(&spool->lock);
++ BUG_ON(!spool->count);
++ spool->count--;
++ unlock_or_release_subpool(spool);
++}
++
++static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ int ret = 0;
++
++ if (!spool)
++ return 0;
++
++ spin_lock(&spool->lock);
++ if ((spool->used_hpages + delta) <= spool->max_hpages) {
++ spool->used_hpages += delta;
++ } else {
++ ret = -ENOMEM;
++ }
++ spin_unlock(&spool->lock);
++
++ return ret;
++}
++
++static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ if (!spool)
++ return;
++
++ spin_lock(&spool->lock);
++ spool->used_hpages -= delta;
++ /* If hugetlbfs_put_super couldn't free spool due to
++ * an outstanding quota reference, free it now. */
++ unlock_or_release_subpool(spool);
++}
++
++static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
++{
++ return HUGETLBFS_SB(inode->i_sb)->spool;
++}
++
++static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
++{
++ return subpool_inode(vma->vm_file->f_dentry->d_inode);
++}
++
+ /*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+@@ -533,9 +611,9 @@ static void free_huge_page(struct page *page)
+ */
+ struct hstate *h = page_hstate(page);
+ int nid = page_to_nid(page);
+- struct address_space *mapping;
++ struct hugepage_subpool *spool =
++ (struct hugepage_subpool *)page_private(page);
+
+- mapping = (struct address_space *) page_private(page);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+ BUG_ON(page_count(page));
+@@ -551,8 +629,7 @@ static void free_huge_page(struct page *page)
+ enqueue_huge_page(h, page);
+ }
+ spin_unlock(&hugetlb_lock);
+- if (mapping)
+- hugetlb_put_quota(mapping, 1);
++ hugepage_subpool_put_pages(spool, 1);
+ }
+
+ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+@@ -966,11 +1043,12 @@ static void return_unused_surplus_pages(struct hstate *h,
+ /*
+ * Determine if the huge page at addr within the vma has an associated
+ * reservation. Where it does not we will need to logically increase
+- * reservation and actually increase quota before an allocation can occur.
+- * Where any new reservation would be required the reservation change is
+- * prepared, but not committed. Once the page has been quota'd allocated
+- * an instantiated the change should be committed via vma_commit_reservation.
+- * No action is required on failure.
++ * reservation and actually increase subpool usage before an allocation
++ * can occur. Where any new reservation would be required the
++ * reservation change is prepared, but not committed. Once the page
++ * has been allocated from the subpool and instantiated the change should
++ * be committed via vma_commit_reservation. No action is required on
++ * failure.
+ */
+ static long vma_needs_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+@@ -1019,24 +1097,24 @@ static void vma_commit_reservation(struct hstate *h,
+ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ struct hstate *h = hstate_vma(vma);
+ struct page *page;
+- struct address_space *mapping = vma->vm_file->f_mapping;
+- struct inode *inode = mapping->host;
+ long chg;
+
+ /*
+- * Processes that did not create the mapping will have no reserves and
+- * will not have accounted against quota. Check that the quota can be
+- * made before satisfying the allocation
+- * MAP_NORESERVE mappings may also need pages and quota allocated
+- * if no reserve mapping overlaps.
++ * Processes that did not create the mapping will have no
++ * reserves and will not have accounted against subpool
++ * limit. Check that the subpool limit can be made before
++ * satisfying the allocation MAP_NORESERVE mappings may also
++ * need pages and subpool limit allocated allocated if no reserve
++ * mapping overlaps.
+ */
+ chg = vma_needs_reservation(h, vma, addr);
+ if (chg < 0)
+ return ERR_PTR(-VM_FAULT_OOM);
+ if (chg)
+- if (hugetlb_get_quota(inode->i_mapping, chg))
++ if (hugepage_subpool_get_pages(spool, chg))
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+
+ spin_lock(&hugetlb_lock);
+@@ -1046,12 +1124,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ if (!page) {
+ page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
+ if (!page) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+ }
+ }
+
+- set_page_private(page, (unsigned long) mapping);
++ set_page_private(page, (unsigned long)spool);
+
+ vma_commit_reservation(h, vma, addr);
+
+@@ -2081,6 +2159,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ {
+ struct hstate *h = hstate_vma(vma);
+ struct resv_map *reservations = vma_resv_map(vma);
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ unsigned long reserve;
+ unsigned long start;
+ unsigned long end;
+@@ -2096,7 +2175,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+
+ if (reserve) {
+ hugetlb_acct_memory(h, -reserve);
+- hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
++ hugepage_subpool_put_pages(spool, reserve);
+ }
+ }
+ }
+@@ -2326,7 +2405,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ address = address & huge_page_mask(h);
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+ + (vma->vm_pgoff >> PAGE_SHIFT);
+- mapping = (struct address_space *)page_private(page);
++ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ /*
+ * Take the mapping lock for the duration of the table walk. As
+@@ -2865,11 +2944,12 @@ int hugetlb_reserve_pages(struct inode *inode,
+ {
+ long ret, chg;
+ struct hstate *h = hstate_inode(inode);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ /*
+ * Only apply hugepage reservation if asked. At fault time, an
+ * attempt will be made for VM_NORESERVE to allocate a page
+- * and filesystem quota without using reserves
++ * without using reserves
+ */
+ if (vm_flags & VM_NORESERVE)
+ return 0;
+@@ -2898,19 +2978,19 @@ int hugetlb_reserve_pages(struct inode *inode,
+ goto out_err;
+ }
+
+- /* There must be enough filesystem quota for the mapping */
+- if (hugetlb_get_quota(inode->i_mapping, chg)) {
++ /* There must be enough pages in the subpool for the mapping */
++ if (hugepage_subpool_get_pages(spool, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+
+ /*
+ * Check enough hugepages are available for the reservation.
+- * Hand back the quota if there are not
++ * Hand the pages back to the subpool if there are not
+ */
+ ret = hugetlb_acct_memory(h, chg);
+ if (ret < 0) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ goto out_err;
+ }
+
+@@ -2938,12 +3018,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+ {
+ struct hstate *h = hstate_inode(inode);
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks -= (blocks_per_huge_page(h) * freed);
+ spin_unlock(&inode->i_lock);
+
+- hugetlb_put_quota(inode->i_mapping, (chg - freed));
++ hugepage_subpool_put_pages(spool, (chg - freed));
+ hugetlb_acct_memory(h, -(chg - freed));
+ }
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index fbe2d2c..8342119 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2824,7 +2824,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+ * them before going back to sleep.
+ */
+ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
+- schedule();
++
++ if (!kthread_should_stop())
++ schedule();
++
+ set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
+ } else {
+ if (remaining)
+@@ -3090,14 +3093,17 @@ int kswapd_run(int nid)
+ }
+
+ /*
+- * Called by memory hotplug when all memory in a node is offlined.
++ * Called by memory hotplug when all memory in a node is offlined. Caller must
++ * hold lock_memory_hotplug().
+ */
+ void kswapd_stop(int nid)
+ {
+ struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+- if (kswapd)
++ if (kswapd) {
+ kthread_stop(kswapd);
++ NODE_DATA(nid)->kswapd = NULL;
++ }
+ }
+
+ static int __init kswapd_init(void)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index cde1b4a..46cca3a 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
+ if (err < 0)
+ goto free_skb;
+
+- /* to be able to check the received tx sock reference in raw_rcv() */
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+-
+ skb->dev = dev;
+ skb->sk = sk;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1cbddc9..5738654 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2079,25 +2079,6 @@ static int dev_gso_segment(struct sk_buff *skb, int features)
+ return 0;
+ }
+
+-/*
+- * Try to orphan skb early, right before transmission by the device.
+- * We cannot orphan skb if tx timestamp is requested or the sk-reference
+- * is needed on driver level for other reasons, e.g. see net/can/raw.c
+- */
+-static inline void skb_orphan_try(struct sk_buff *skb)
+-{
+- struct sock *sk = skb->sk;
+-
+- if (sk && !skb_shinfo(skb)->tx_flags) {
+- /* skb_tx_hash() wont be able to get sk.
+- * We copy sk_hash into skb->rxhash
+- */
+- if (!skb->rxhash)
+- skb->rxhash = sk->sk_hash;
+- skb_orphan(skb);
+- }
+-}
+-
+ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+ {
+ return ((features & NETIF_F_GEN_CSUM) ||
+@@ -2182,8 +2163,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
+- skb_orphan_try(skb);
+-
+ features = netif_skb_features(skb);
+
+ if (vlan_tx_tag_present(skb) &&
+@@ -2293,7 +2272,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+- hash = (__force u16) skb->protocol ^ skb->rxhash;
++ hash = (__force u16) skb->protocol;
+ hash = jhash_1word(hash, hashrnd);
+
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 9726927..32e6ca2 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+
+ if (th->syn) {
++ if (th->fin)
++ goto discard;
+ if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
+ return 1;
+
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 274d150..cf98d62 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -380,7 +380,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index d38815d..74d5292 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -813,7 +813,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
+ return -EBUSY;
+
+- if (ntype != otype) {
++ if (ntype != otype && netif_running(dev)) {
+ err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
+ ntype);
+ if (err)
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh
+index a272356..2ae4817 100755
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -9,12 +9,6 @@ fi
+ DEPMOD=$1
+ KERNELRELEASE=$2
+
+-if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
+- echo "Warning: you may need to install module-init-tools" >&2
+- echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
+- sleep 1
+-fi
+-
+ if ! test -r System.map -a -x "$DEPMOD"; then
+ exit 0
+ fi
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index 9f614b4..272407c 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
+ */
+ hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+ if (ei->type == KVM_IRQ_ROUTING_MSI ||
++ ue->type == KVM_IRQ_ROUTING_MSI ||
+ ue->u.irqchip.irqchip == ei->irqchip.irqchip)
+ return r;
+
diff --git a/3.2.54/1024_linux-3.2.25.patch b/3.2.54/1024_linux-3.2.25.patch
new file mode 100644
index 0000000..e95c213
--- /dev/null
+++ b/3.2.54/1024_linux-3.2.25.patch
@@ -0,0 +1,4503 @@
+diff --git a/Makefile b/Makefile
+index 80bb4fd..e13e4e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 559da19..578e5a0 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1016,7 +1016,8 @@
+ /* Macros for setting and retrieving special purpose registers */
+ #ifndef __ASSEMBLY__
+ #define mfmsr() ({unsigned long rval; \
+- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
++ asm volatile("mfmsr %0" : "=r" (rval) : \
++ : "memory"); rval;})
+ #ifdef CONFIG_PPC_BOOK3S_64
+ #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v) : "memory")
+diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
+index bf99cfa..6324008 100644
+--- a/arch/powerpc/kernel/ftrace.c
++++ b/arch/powerpc/kernel/ftrace.c
+@@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
+
+ /*
+ * On PPC32 the trampoline looks like:
+- * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
+- * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
+- * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
++ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
++ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
++ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
+ * 0x4e, 0x80, 0x04, 0x20 bctr
+ */
+
+@@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
+ pr_devel(" %08x %08x ", jmp[0], jmp[1]);
+
+ /* verify that this is what we expect it to be */
+- if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
+- ((jmp[1] & 0xffff0000) != 0x396b0000) ||
+- (jmp[2] != 0x7d6903a6) ||
++ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
++ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
++ (jmp[2] != 0x7d8903a6) ||
+ (jmp[3] != 0x4e800420)) {
+ printk(KERN_ERR "Not a trampoline\n");
+ return -EINVAL;
+diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
+index 6e0073e..07c7bf4 100644
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -26,12 +26,14 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
+ void __cpuinit cpu_init(void)
+ {
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
++ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+
+ get_cpu_id(id);
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
++ memset(idle, 0, sizeof(*idle));
+ }
+
+ /*
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 3ea8728..1df64a8 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -1020,14 +1020,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ unsigned int cpu = (unsigned int)(long)hcpu;
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
+- struct s390_idle_data *idle;
+ int err = 0;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- idle = &per_cpu(s390_idle, cpu);
+- memset(idle, 0, sizeof(struct s390_idle_data));
+ err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+ break;
+ case CPU_DEAD:
+diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
+index 563a09d..29c95d7 100644
+--- a/arch/x86/kernel/microcode_core.c
++++ b/arch/x86/kernel/microcode_core.c
+@@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev,
+ const char *buf, size_t size)
+ {
+ unsigned long val;
+- int cpu = dev->id;
+- int ret = 0;
+- char *end;
++ int cpu;
++ ssize_t ret = 0, tmp_ret;
+
+- val = simple_strtoul(buf, &end, 0);
+- if (end == buf)
++ /* allow reload only from the BSP */
++ if (boot_cpu_data.cpu_index != dev->id)
+ return -EINVAL;
+
+- if (val == 1) {
+- get_online_cpus();
+- if (cpu_online(cpu))
+- ret = reload_for_cpu(cpu);
+- put_online_cpus();
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
++
++ if (val != 1)
++ return size;
++
++ get_online_cpus();
++ for_each_online_cpu(cpu) {
++ tmp_ret = reload_for_cpu(cpu);
++ if (tmp_ret != 0)
++ pr_warn("Error reloading microcode on CPU %d\n", cpu);
++
++ /* save retval of the first encountered reload error */
++ if (!ret)
++ ret = tmp_ret;
+ }
++ put_online_cpus();
+
+ if (!ret)
+ ret = size;
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 6dd8955..0951b81 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -521,3 +521,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
+ }
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
++
++/*
++ * Twinhead H12Y needs us to block out a region otherwise we map devices
++ * there and any access kills the box.
++ *
++ * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
++ *
++ * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
++ */
++static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev)
++{
++ if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
++ pr_info("Reserving memory on Twinhead H12Y\n");
++ request_mem_region(0xFFB00000, 0x100000, "twinhead");
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 15de223..49d9e91 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -607,7 +607,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
+
+ int blk_get_queue(struct request_queue *q)
+ {
+- if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ if (likely(!blk_queue_dead(q))) {
+ kobject_get(&q->kobj);
+ return 0;
+ }
+@@ -754,7 +754,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
+ const bool is_sync = rw_is_sync(rw_flags) != 0;
+ int may_queue;
+
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ may_queue = elv_may_queue(q, rw_flags);
+@@ -874,7 +874,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
+ struct io_context *ioc;
+ struct request_list *rl = &q->rq;
+
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+index a1ebceb..6053285 100644
+--- a/block/blk-exec.c
++++ b/block/blk-exec.c
+@@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ {
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ if (unlikely(blk_queue_dead(q))) {
+ rq->errors = -ENXIO;
+ if (rq->end_io)
+ rq->end_io(rq, rq->errors);
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e7f9f65..f0b2ca8 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+ if (!entry->show)
+ return -EIO;
+ mutex_lock(&q->sysfs_lock);
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++ if (blk_queue_dead(q)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+
+ q = container_of(kobj, struct request_queue, kobj);
+ mutex_lock(&q->sysfs_lock);
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++ if (blk_queue_dead(q)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 4553245..5eed6a7 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+ struct request_queue *q = td->queue;
+
+ /* no throttling for dead queue */
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ rcu_read_lock();
+@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+ spin_lock_irq(q->queue_lock);
+
+ /* Make sure @q is still alive */
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ if (unlikely(blk_queue_dead(q))) {
+ kfree(tg);
+ return NULL;
+ }
+diff --git a/block/blk.h b/block/blk.h
+index 3f6551b..e38691d 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
+ q->flush_queue_delayed = 1;
+ return NULL;
+ }
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
++ if (unlikely(blk_queue_dead(q)) ||
+ !q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
+index 6512b20..d1fcbc0 100644
+--- a/drivers/acpi/ac.c
++++ b/drivers/acpi/ac.c
+@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
+ ac->charger.properties = ac_props;
+ ac->charger.num_properties = ARRAY_SIZE(ac_props);
+ ac->charger.get_property = get_ac_property;
+- power_supply_register(&ac->device->dev, &ac->charger);
++ result = power_supply_register(&ac->device->dev, &ac->charger);
++ if (result)
++ goto end;
+
+ printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+index eaf35f8..d894731 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+@@ -118,9 +118,9 @@ dispatch_dma:
+ // mthd 0x030c-0x0340, various stuff
+ .b16 0xc3 14
+ .b32 ctx_src_address_high ~0x000000ff
+-.b32 ctx_src_address_low ~0xfffffff0
++.b32 ctx_src_address_low ~0xffffffff
+ .b32 ctx_dst_address_high ~0x000000ff
+-.b32 ctx_dst_address_low ~0xfffffff0
++.b32 ctx_dst_address_low ~0xffffffff
+ .b32 ctx_src_pitch ~0x0007ffff
+ .b32 ctx_dst_pitch ~0x0007ffff
+ .b32 ctx_xcnt ~0x0000ffff
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+index 2731de2..e2a0e88 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+@@ -1,37 +1,72 @@
+-uint32_t nva3_pcopy_data[] = {
++u32 nva3_pcopy_data[] = {
++/* 0x0000: ctx_object */
+ 0x00000000,
++/* 0x0004: ctx_dma */
++/* 0x0004: ctx_dma_query */
+ 0x00000000,
++/* 0x0008: ctx_dma_src */
+ 0x00000000,
++/* 0x000c: ctx_dma_dst */
+ 0x00000000,
++/* 0x0010: ctx_query_address_high */
+ 0x00000000,
++/* 0x0014: ctx_query_address_low */
+ 0x00000000,
++/* 0x0018: ctx_query_counter */
+ 0x00000000,
++/* 0x001c: ctx_src_address_high */
+ 0x00000000,
++/* 0x0020: ctx_src_address_low */
+ 0x00000000,
++/* 0x0024: ctx_src_pitch */
+ 0x00000000,
++/* 0x0028: ctx_src_tile_mode */
+ 0x00000000,
++/* 0x002c: ctx_src_xsize */
+ 0x00000000,
++/* 0x0030: ctx_src_ysize */
+ 0x00000000,
++/* 0x0034: ctx_src_zsize */
+ 0x00000000,
++/* 0x0038: ctx_src_zoff */
+ 0x00000000,
++/* 0x003c: ctx_src_xoff */
+ 0x00000000,
++/* 0x0040: ctx_src_yoff */
+ 0x00000000,
++/* 0x0044: ctx_src_cpp */
+ 0x00000000,
++/* 0x0048: ctx_dst_address_high */
+ 0x00000000,
++/* 0x004c: ctx_dst_address_low */
+ 0x00000000,
++/* 0x0050: ctx_dst_pitch */
+ 0x00000000,
++/* 0x0054: ctx_dst_tile_mode */
+ 0x00000000,
++/* 0x0058: ctx_dst_xsize */
+ 0x00000000,
++/* 0x005c: ctx_dst_ysize */
+ 0x00000000,
++/* 0x0060: ctx_dst_zsize */
+ 0x00000000,
++/* 0x0064: ctx_dst_zoff */
+ 0x00000000,
++/* 0x0068: ctx_dst_xoff */
+ 0x00000000,
++/* 0x006c: ctx_dst_yoff */
+ 0x00000000,
++/* 0x0070: ctx_dst_cpp */
+ 0x00000000,
++/* 0x0074: ctx_format */
+ 0x00000000,
++/* 0x0078: ctx_swz_const0 */
+ 0x00000000,
++/* 0x007c: ctx_swz_const1 */
+ 0x00000000,
++/* 0x0080: ctx_xcnt */
+ 0x00000000,
++/* 0x0084: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+@@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
++/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+@@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00010162,
+ 0x00000000,
+ 0x00030060,
++/* 0x0128: dispatch_dma */
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+@@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
+ 0x0000001c,
+ 0xffffff00,
+ 0x00000020,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000048,
+ 0xffffff00,
+ 0x0000004c,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000024,
+ 0xfff80000,
+ 0x00000050,
+@@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00000800,
+ };
+
+-uint32_t nva3_pcopy_code[] = {
++u32 nva3_pcopy_code[] = {
++/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+@@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
++/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
++/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
++/* 0x0041: ih_no_chsw */
+ 0x0412c472,
+ 0xf4060bf4,
++/* 0x004a: ih_no_cmd */
+ 0x11c4c321,
+ 0x4001d00c,
++/* 0x0052: swctx */
+ 0x47f101f8,
+ 0x4bfe7700,
+ 0x0007fe00,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
++/* 0x006b: swctx_load */
+ 0xfa060ef4,
++/* 0x006e: swctx_done */
+ 0x03f80504,
++/* 0x0072: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+@@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
+ 0x1e3af052,
+ 0xf00023d0,
+ 0x24d00147,
++/* 0x0093: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x220bf41e,
+ 0xf40131f4,
+ 0x57f05221,
+ 0x0367f004,
++/* 0x00a8: chsw_load_ctx_dma */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
++/* 0x00bb: chsw_finish_load */
+ 0xf0f018f4,
+ 0x23d00237,
++/* 0x00c3: dispatch */
+ 0xf100f880,
+ 0xcf190037,
+ 0x33cf4032,
+@@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x1024b607,
+ 0x010057f1,
+ 0x74bd64bd,
++/* 0x00dc: dispatch_loop */
+ 0x58005658,
+ 0x50b60157,
+ 0x0446b804,
+@@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb60276bb,
+ 0x57bb0374,
+ 0xdf0ef400,
++/* 0x0100: dispatch_valid_mthd */
+ 0xb60246bb,
+ 0x45bb0344,
+ 0x01459800,
+@@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb0014658,
+ 0x1bf40064,
+ 0x00538009,
++/* 0x0127: dispatch_cmd */
+ 0xf4300ef4,
+ 0x55f90132,
+ 0xf40c01f4,
++/* 0x0132: dispatch_invalid_bitfield */
+ 0x25f0250e,
++/* 0x0135: dispatch_illegal_mthd */
+ 0x0125f002,
++/* 0x0138: dispatch_error */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x27f04043,
+ 0x0002d040,
++/* 0x0148: hostirq_wait */
+ 0xf08002cf,
+ 0x24b04024,
+ 0xf71bf400,
++/* 0x0154: dispatch_done */
+ 0x1d0027f1,
+ 0xd00137f0,
+ 0x00f80023,
++/* 0x0160: cmd_nop */
++/* 0x0162: cmd_pm_trigger */
+ 0x27f100f8,
+ 0x34bd2200,
+ 0xd00233f0,
+ 0x00f80023,
++/* 0x0170: cmd_dma */
+ 0x012842b7,
+ 0xf00145b6,
+ 0x43801e39,
+ 0x0040b701,
+ 0x0644b606,
+ 0xf80043d0,
++/* 0x0189: cmd_exec_set_format */
+ 0xf030f400,
+ 0xb00001b0,
+ 0x01b00101,
+@@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
+ 0x70b63847,
+ 0x0232f401,
+ 0x94bd84bd,
++/* 0x01b4: ncomp_loop */
+ 0xb60f4ac4,
+ 0xb4bd0445,
++/* 0x01bc: bpc_loop */
+ 0xf404a430,
+ 0xa5ff0f18,
+ 0x00cbbbc0,
+ 0xf40231f4,
++/* 0x01ce: cmp_c0 */
+ 0x1bf4220e,
+ 0x10c7f00c,
+ 0xf400cbbb,
++/* 0x01da: cmp_c1 */
+ 0xa430160e,
+ 0x0c18f406,
+ 0xbb14c7f0,
+ 0x0ef400cb,
++/* 0x01e9: cmp_zero */
+ 0x80c7f107,
++/* 0x01ed: bpc_next */
+ 0x01c83800,
+ 0xb60180b6,
+ 0xb5b801b0,
+@@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x98110680,
+ 0x68fd2008,
+ 0x0502f400,
++/* 0x0216: dst_xcnt */
+ 0x75fd64bd,
+ 0x1c078000,
+ 0xf10078fd,
+@@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x980056d0,
+ 0x56d01f06,
+ 0x1030f440,
++/* 0x0276: cmd_exec_set_surface_tiled */
+ 0x579800f8,
+ 0x6879c70a,
+ 0xb66478c7,
+@@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
+ 0x0e76b060,
+ 0xf0091bf4,
+ 0x0ef40477,
++/* 0x0291: xtile64 */
+ 0x027cf00f,
+ 0xfd1170b6,
+ 0x77f00947,
++/* 0x029d: xtileok */
+ 0x0f5a9806,
+ 0xfd115b98,
+ 0xb7f000ab,
+@@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x67d00600,
+ 0x0060b700,
+ 0x0068d004,
++/* 0x0382: cmd_exec_set_surface_linear */
+ 0x6cf000f8,
+ 0x0260b702,
+ 0x0864b602,
+@@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb70067d0,
+ 0x98040060,
+ 0x67d00957,
++/* 0x03ab: cmd_exec_wait */
+ 0xf900f800,
+ 0xf110f900,
+ 0xb6080007,
++/* 0x03b6: loop */
+ 0x01cf0604,
+ 0x0114f000,
+ 0xfcfa1bf4,
+ 0xf800fc10,
++/* 0x03c5: cmd_exec_query */
+ 0x0d34c800,
+ 0xf5701bf4,
+ 0xf103ab21,
+@@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
++/* 0x0438: query_counter */
+ 0x03ab21f5,
+ 0x080c47f1,
+ 0x980644b6,
+@@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
++/* 0x0492: cmd_exec */
+ 0x21f500f8,
+ 0x3fc803ab,
+ 0x0e0bf400,
+ 0x018921f5,
+ 0x020047f1,
++/* 0x04a7: cmd_exec_no_format */
+ 0xf11e0ef4,
+ 0xb6081067,
+ 0x77f00664,
+@@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
+ 0x981c0780,
+ 0x67d02007,
+ 0x4067d000,
++/* 0x04c2: cmd_exec_init_src_surface */
+ 0x32f444bd,
+ 0xc854bd02,
+ 0x0bf4043f,
+ 0x8221f50a,
+ 0x0a0ef403,
++/* 0x04d4: src_tiled */
+ 0x027621f5,
++/* 0x04db: cmd_exec_init_dst_surface */
+ 0xf40749f0,
+ 0x57f00231,
+ 0x083fc82c,
+ 0xf50a0bf4,
+ 0xf4038221,
++/* 0x04ee: dst_tiled */
+ 0x21f50a0e,
+ 0x49f00276,
++/* 0x04f5: cmd_exec_kick */
+ 0x0057f108,
+ 0x0654b608,
+ 0xd0210698,
+@@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
+ 0xc80054d0,
+ 0x0bf40c3f,
+ 0xc521f507,
++/* 0x0519: cmd_exec_done */
++/* 0x051b: cmd_wrcache_flush */
+ 0xf100f803,
+ 0xbd220027,
+ 0x0133f034,
+diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+index 4199038..9e87036 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+@@ -1,34 +1,65 @@
+-uint32_t nvc0_pcopy_data[] = {
++u32 nvc0_pcopy_data[] = {
++/* 0x0000: ctx_object */
+ 0x00000000,
++/* 0x0004: ctx_query_address_high */
+ 0x00000000,
++/* 0x0008: ctx_query_address_low */
+ 0x00000000,
++/* 0x000c: ctx_query_counter */
+ 0x00000000,
++/* 0x0010: ctx_src_address_high */
+ 0x00000000,
++/* 0x0014: ctx_src_address_low */
+ 0x00000000,
++/* 0x0018: ctx_src_pitch */
+ 0x00000000,
++/* 0x001c: ctx_src_tile_mode */
+ 0x00000000,
++/* 0x0020: ctx_src_xsize */
+ 0x00000000,
++/* 0x0024: ctx_src_ysize */
+ 0x00000000,
++/* 0x0028: ctx_src_zsize */
+ 0x00000000,
++/* 0x002c: ctx_src_zoff */
+ 0x00000000,
++/* 0x0030: ctx_src_xoff */
+ 0x00000000,
++/* 0x0034: ctx_src_yoff */
+ 0x00000000,
++/* 0x0038: ctx_src_cpp */
+ 0x00000000,
++/* 0x003c: ctx_dst_address_high */
+ 0x00000000,
++/* 0x0040: ctx_dst_address_low */
+ 0x00000000,
++/* 0x0044: ctx_dst_pitch */
+ 0x00000000,
++/* 0x0048: ctx_dst_tile_mode */
+ 0x00000000,
++/* 0x004c: ctx_dst_xsize */
+ 0x00000000,
++/* 0x0050: ctx_dst_ysize */
+ 0x00000000,
++/* 0x0054: ctx_dst_zsize */
+ 0x00000000,
++/* 0x0058: ctx_dst_zoff */
+ 0x00000000,
++/* 0x005c: ctx_dst_xoff */
+ 0x00000000,
++/* 0x0060: ctx_dst_yoff */
+ 0x00000000,
++/* 0x0064: ctx_dst_cpp */
+ 0x00000000,
++/* 0x0068: ctx_format */
+ 0x00000000,
++/* 0x006c: ctx_swz_const0 */
+ 0x00000000,
++/* 0x0070: ctx_swz_const1 */
+ 0x00000000,
++/* 0x0074: ctx_xcnt */
+ 0x00000000,
++/* 0x0078: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+@@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
++/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+@@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+- 0x0000000f,
++ 0x00000000,
+ 0x0000003c,
+ 0xffffff00,
+ 0x00000040,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000018,
+ 0xfff80000,
+ 0x00000044,
+@@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000800,
+ };
+
+-uint32_t nvc0_pcopy_code[] = {
++u32 nvc0_pcopy_code[] = {
++/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+@@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
++/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
++/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
++/* 0x0041: ih_no_chsw */
+ 0x0412c4ca,
+ 0xf5070bf4,
++/* 0x004b: ih_no_cmd */
+ 0xc4010221,
+ 0x01d00c11,
++/* 0x0053: swctx */
+ 0xf101f840,
+ 0xfe770047,
+ 0x47f1004b,
+@@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
++/* 0x00c3: swctx_load */
+ 0xfa060ef4,
++/* 0x00c6: swctx_done */
+ 0x03f80504,
++/* 0x00ca: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+@@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1e3af053,
+ 0xf00023d0,
+ 0x24d00147,
++/* 0x00eb: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x090bf41e,
+ 0xf40131f4,
++/* 0x00fa: chsw_finish_load */
+ 0x37f05321,
+ 0x8023d002,
++/* 0x0102: dispatch */
+ 0x37f100f8,
+ 0x32cf1900,
+ 0x0033cf40,
+ 0x07ff24e4,
+ 0xf11024b6,
+ 0xbd010057,
++/* 0x011b: dispatch_loop */
+ 0x5874bd64,
+ 0x57580056,
+ 0x0450b601,
+@@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xbb0f08f4,
+ 0x74b60276,
+ 0x0057bb03,
++/* 0x013f: dispatch_valid_mthd */
+ 0xbbdf0ef4,
+ 0x44b60246,
+ 0x0045bb03,
+@@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x64b00146,
+ 0x091bf400,
+ 0xf4005380,
++/* 0x0166: dispatch_cmd */
+ 0x32f4300e,
+ 0xf455f901,
+ 0x0ef40c01,
++/* 0x0171: dispatch_invalid_bitfield */
+ 0x0225f025,
++/* 0x0174: dispatch_illegal_mthd */
++/* 0x0177: dispatch_error */
+ 0xf10125f0,
+ 0xd0100047,
+ 0x43d00042,
+ 0x4027f040,
++/* 0x0187: hostirq_wait */
+ 0xcf0002d0,
+ 0x24f08002,
+ 0x0024b040,
++/* 0x0193: dispatch_done */
+ 0xf1f71bf4,
+ 0xf01d0027,
+ 0x23d00137,
++/* 0x019f: cmd_nop */
+ 0xf800f800,
++/* 0x01a1: cmd_pm_trigger */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00233,
++/* 0x01af: cmd_exec_set_format */
+ 0xf400f800,
+ 0x01b0f030,
+ 0x0101b000,
+@@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x3847c701,
+ 0xf40170b6,
+ 0x84bd0232,
++/* 0x01da: ncomp_loop */
+ 0x4ac494bd,
+ 0x0445b60f,
++/* 0x01e2: bpc_loop */
+ 0xa430b4bd,
+ 0x0f18f404,
+ 0xbbc0a5ff,
+ 0x31f400cb,
+ 0x220ef402,
++/* 0x01f4: cmp_c0 */
+ 0xf00c1bf4,
+ 0xcbbb10c7,
+ 0x160ef400,
++/* 0x0200: cmp_c1 */
+ 0xf406a430,
+ 0xc7f00c18,
+ 0x00cbbb14,
++/* 0x020f: cmp_zero */
+ 0xf1070ef4,
++/* 0x0213: bpc_next */
+ 0x380080c7,
+ 0x80b601c8,
+ 0x01b0b601,
+@@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1d08980e,
+ 0xf40068fd,
+ 0x64bd0502,
++/* 0x023c: dst_xcnt */
+ 0x800075fd,
+ 0x78fd1907,
+ 0x1057f100,
+@@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1c069800,
+ 0xf44056d0,
+ 0x00f81030,
++/* 0x029c: cmd_exec_set_surface_tiled */
+ 0xc7075798,
+ 0x78c76879,
+ 0x0380b664,
+ 0xb06077c7,
+ 0x1bf40e76,
+ 0x0477f009,
++/* 0x02b7: xtile64 */
+ 0xf00f0ef4,
+ 0x70b6027c,
+ 0x0947fd11,
++/* 0x02c3: xtileok */
+ 0x980677f0,
+ 0x5b980c5a,
+ 0x00abfd0e,
+@@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xb70067d0,
+ 0xd0040060,
+ 0x00f80068,
++/* 0x03a8: cmd_exec_set_surface_linear */
+ 0xb7026cf0,
+ 0xb6020260,
+ 0x57980864,
+@@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x0060b700,
+ 0x06579804,
+ 0xf80067d0,
++/* 0x03d1: cmd_exec_wait */
+ 0xf900f900,
+ 0x0007f110,
+ 0x0604b608,
++/* 0x03dc: loop */
+ 0xf00001cf,
+ 0x1bf40114,
+ 0xfc10fcfa,
++/* 0x03eb: cmd_exec_query */
+ 0xc800f800,
+ 0x1bf40d34,
+ 0xd121f570,
+@@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
++/* 0x045e: query_counter */
+ 0x21f50045,
+ 0x47f103d1,
+ 0x44b6080c,
+@@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x080047f1,
+ 0xd00644b6,
+ 0x00f80045,
++/* 0x04b8: cmd_exec */
+ 0x03d121f5,
+ 0xf4003fc8,
+ 0x21f50e0b,
+ 0x47f101af,
+ 0x0ef40200,
++/* 0x04cd: cmd_exec_no_format */
+ 0x1067f11e,
+ 0x0664b608,
+ 0x800177f0,
+@@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1d079819,
+ 0xd00067d0,
+ 0x44bd4067,
++/* 0x04e8: cmd_exec_init_src_surface */
+ 0xbd0232f4,
+ 0x043fc854,
+ 0xf50a0bf4,
+ 0xf403a821,
++/* 0x04fa: src_tiled */
+ 0x21f50a0e,
+ 0x49f0029c,
++/* 0x0501: cmd_exec_init_dst_surface */
+ 0x0231f407,
+ 0xc82c57f0,
+ 0x0bf4083f,
+ 0xa821f50a,
+ 0x0a0ef403,
++/* 0x0514: dst_tiled */
+ 0x029c21f5,
++/* 0x051b: cmd_exec_kick */
+ 0xf10849f0,
+ 0xb6080057,
+ 0x06980654,
+@@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x54d00546,
+ 0x0c3fc800,
+ 0xf5070bf4,
++/* 0x053f: cmd_exec_done */
+ 0xf803eb21,
++/* 0x0541: cmd_wrcache_flush */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00133,
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 552b436..3254d51 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -22,6 +22,7 @@
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
++ * Jerome Glisse
+ */
+ #include "drmP.h"
+ #include "radeon_drm.h"
+@@ -634,7 +635,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE, 100);
+ if (ret <= 0) {
+- DRM_ERROR("displayport link status failed\n");
+ return false;
+ }
+
+@@ -812,8 +812,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+ else
+ mdelay(dp_info->rd_interval * 4);
+
+- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
++ DRM_ERROR("displayport link status failed\n");
+ break;
++ }
+
+ if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ clock_recovery = true;
+@@ -875,8 +877,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+ else
+ mdelay(dp_info->rd_interval * 4);
+
+- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
++ DRM_ERROR("displayport link status failed\n");
+ break;
++ }
+
+ if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ channel_eq = true;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 4a4493f..87d494d 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+
+ /* just deal with DP (not eDP) here. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+- int saved_dpms = connector->dpms;
+-
+- /* Only turn off the display it it's physically disconnected */
+- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+- else if (radeon_dp_needs_link_train(radeon_connector))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+- connector->dpms = saved_dpms;
++ struct radeon_connector_atom_dig *dig_connector =
++ radeon_connector->con_priv;
++
++ /* if existing sink type was not DP no need to retrain */
++ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ return;
++
++ /* first get sink type as it may be reset after (un)plug */
++ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
++ /* don't do anything if sink is not display port, i.e.,
++ * passive dp->(dvi|hdmi) adaptor
++ */
++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
++ int saved_dpms = connector->dpms;
++ /* Only turn off the display if it's physically disconnected */
++ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ } else if (radeon_dp_needs_link_train(radeon_connector)) {
++ /* set it to OFF so that drm_helper_connector_dpms()
++ * won't return immediately since the current state
++ * is ON at this point.
++ */
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
++ }
++ connector->dpms = saved_dpms;
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 986d608..2132109 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -257,8 +257,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ if (!(cursor_end & 0x7f))
+ w--;
+ }
+- if (w <= 0)
++ if (w <= 0) {
+ w = 1;
++ cursor_end = x - xorigin + w;
++ if (!(cursor_end & 0x7f)) {
++ x--;
++ WARN_ON_ONCE(x < 0);
++ }
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index f3ae607..39497c7 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -117,7 +117,6 @@ int radeon_bo_create(struct radeon_device *rdev,
+ return -ENOMEM;
+ }
+
+-retry:
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+@@ -130,6 +129,8 @@ retry:
+ bo->gem_base.driver_private = NULL;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
++
++retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index a1b8caa..0f074e0 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1865,6 +1865,11 @@ static int device_change_notifier(struct notifier_block *nb,
+
+ iommu_init_device(dev);
+
++ if (iommu_pass_through) {
++ attach_device(dev, pt_domain);
++ break;
++ }
++
+ domain = domain_for_device(dev);
+
+ /* allocate a protection domain if a device is added */
+@@ -1880,10 +1885,7 @@ static int device_change_notifier(struct notifier_block *nb,
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
+- if (!iommu_pass_through)
+- dev->archdata.dma_ops = &amd_iommu_dma_ops;
+- else
+- dev->archdata.dma_ops = &nommu_dma_ops;
++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
+index a7fa38f..e572ce5 100644
+--- a/drivers/media/video/cx25821/cx25821-core.c
++++ b/drivers/media/video/cx25821/cx25821-core.c
+@@ -914,9 +914,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
+ list_add_tail(&dev->devlist, &cx25821_devlist);
+ mutex_unlock(&cx25821_devlist_mutex);
+
+- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
+- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
+-
+ if (dev->pci->device != 0x8210) {
+ pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
+ __func__, dev->pci->device);
+diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
+index 2d2d009..bf54360 100644
+--- a/drivers/media/video/cx25821/cx25821.h
++++ b/drivers/media/video/cx25821/cx25821.h
+@@ -187,7 +187,7 @@ enum port {
+ };
+
+ struct cx25821_board {
+- char *name;
++ const char *name;
+ enum port porta;
+ enum port portb;
+ enum port portc;
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 6878a94..83b51b5 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -148,6 +148,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
+ static const struct sdhci_pci_fixes sdhci_cafe = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
++ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 9e61d6b..ed1be8a 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3770,6 +3770,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
++ case RTL_GIGA_MAC_VER_34:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ break;
+ default:
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 01dcb1a..727c129 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -545,9 +545,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
+
+ /*
+ * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
+- * MCS index values for us are 0 to 7.
++ * MCS index values for us are 0 to 15.
+ */
+- if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
++ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
+ sinfo->txrate.mcs = priv->tx_rate;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ /* 40MHz rate */
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 0ffa111..bdf960b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -876,6 +876,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x1482, 0x3c09) },
+ /* AirTies */
+ { USB_DEVICE(0x1eda, 0x2012) },
++ { USB_DEVICE(0x1eda, 0x2210) },
+ { USB_DEVICE(0x1eda, 0x2310) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070) },
+@@ -945,6 +946,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* DVICO */
+ { USB_DEVICE(0x0fe9, 0xb307) },
+ /* Edimax */
++ { USB_DEVICE(0x7392, 0x4085) },
+ { USB_DEVICE(0x7392, 0x7711) },
+ { USB_DEVICE(0x7392, 0x7717) },
+ { USB_DEVICE(0x7392, 0x7718) },
+@@ -1020,6 +1022,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Philips */
+ { USB_DEVICE(0x0471, 0x200f) },
+ /* Planex */
++ { USB_DEVICE(0x2019, 0x5201) },
+ { USB_DEVICE(0x2019, 0xab25) },
+ { USB_DEVICE(0x2019, 0xed06) },
+ /* Quanta */
+@@ -1088,6 +1091,12 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ #ifdef CONFIG_RT2800USB_RT33XX
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x945b) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c17) },
++ /* Panasonic */
++ { USB_DEVICE(0x083a, 0xb511) },
++ /* Philips */
++ { USB_DEVICE(0x0471, 0x20dd) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x3370) },
+ { USB_DEVICE(0x148f, 0x8070) },
+@@ -1099,6 +1108,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x8516, 0x3572) },
+ /* Askey */
+ { USB_DEVICE(0x1690, 0x0744) },
++ { USB_DEVICE(0x1690, 0x0761) },
++ { USB_DEVICE(0x1690, 0x0764) },
+ /* Cisco */
+ { USB_DEVICE(0x167b, 0x4001) },
+ /* EnGenius */
+@@ -1113,6 +1124,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0041) },
+ { USB_DEVICE(0x0df6, 0x0062) },
++ { USB_DEVICE(0x0df6, 0x0065) },
++ { USB_DEVICE(0x0df6, 0x0066) },
++ { USB_DEVICE(0x0df6, 0x0068) },
+ /* Toshiba */
+ { USB_DEVICE(0x0930, 0x0a07) },
+ /* Zinwell */
+@@ -1122,6 +1136,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Azurewave */
+ { USB_DEVICE(0x13d3, 0x3329) },
+ { USB_DEVICE(0x13d3, 0x3365) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c1c) },
++ { USB_DEVICE(0x2001, 0x3c1d) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x5370) },
+ { USB_DEVICE(0x148f, 0x5372) },
+@@ -1163,13 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c0b) },
+ { USB_DEVICE(0x07d1, 0x3c17) },
+- { USB_DEVICE(0x2001, 0x3c17) },
+- /* Edimax */
+- { USB_DEVICE(0x7392, 0x4085) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x14a1) },
+- /* Fujitsu Stylistic 550 */
+- { USB_DEVICE(0x1690, 0x0761) },
+ /* Gemtek */
+ { USB_DEVICE(0x15a9, 0x0010) },
+ /* Gigabyte */
+@@ -1190,7 +1202,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x05a6, 0x0101) },
+ { USB_DEVICE(0x1d4d, 0x0010) },
+ /* Planex */
+- { USB_DEVICE(0x2019, 0x5201) },
+ { USB_DEVICE(0x2019, 0xab24) },
+ /* Qcom */
+ { USB_DEVICE(0x18e8, 0x6259) },
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+index 2cf4c5f..de9faa9 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+@@ -3462,21 +3462,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
+ switch (rtlhal->macphymode) {
+ case DUALMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+- rtlhal->version |= CHIP_92D_SINGLEPHY;
++ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case SINGLEMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+- rtlhal->version |= CHIP_92D_SINGLEPHY;
++ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case DUALMAC_DUALPHY:
+ rtlphy->rf_type = RF_1T1R;
+- rtlhal->version &= (~CHIP_92D_SINGLEPHY);
++ rtlhal->version &= RF_TYPE_1T1R;
+ /* Now we let MAC0 run on 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ rtlhal->bandset = BAND_ON_5G;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 351dc0b..ee77a58 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -287,6 +287,7 @@ static void scsi_host_dev_release(struct device *dev)
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct device *parent = dev->parent;
+ struct request_queue *q;
++ void *queuedata;
+
+ scsi_proc_hostdir_rm(shost->hostt);
+
+@@ -296,9 +297,9 @@ static void scsi_host_dev_release(struct device *dev)
+ destroy_workqueue(shost->work_q);
+ q = shost->uspace_req_q;
+ if (q) {
+- kfree(q->queuedata);
+- q->queuedata = NULL;
+- scsi_free_queue(q);
++ queuedata = q->queuedata;
++ blk_cleanup_queue(q);
++ kfree(queuedata);
+ }
+
+ scsi_destroy_command_freelist(shost);
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index e48ba4b..dbe3568 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -774,7 +774,7 @@ static struct domain_device *sas_ex_discover_end_dev(
+ }
+
+ /* See if this phy is part of a wide port */
+-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
++static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+ {
+ struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
+ int i;
+@@ -790,11 +790,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+ sas_port_add_phy(ephy->port, phy->phy);
+ phy->port = ephy->port;
+ phy->phy_state = PHY_DEVICE_DISCOVERED;
+- return 0;
++ return true;
+ }
+ }
+
+- return -ENODEV;
++ return false;
+ }
+
+ static struct domain_device *sas_ex_discover_expander(
+@@ -932,8 +932,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+ return res;
+ }
+
+- res = sas_ex_join_wide_port(dev, phy_id);
+- if (!res) {
++ if (sas_ex_join_wide_port(dev, phy_id)) {
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
+ return res;
+@@ -978,8 +977,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
+ SAS_ADDR(child->sas_addr)) {
+ ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
+- res = sas_ex_join_wide_port(dev, i);
+- if (!res)
++ if (sas_ex_join_wide_port(dev, i))
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
+
+@@ -1849,32 +1847,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
+ {
+ struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
+ struct domain_device *child;
+- bool found = false;
+- int res, i;
++ int res;
+
+ SAS_DPRINTK("ex %016llx phy%d new device attached\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
+ res = sas_ex_phy_discover(dev, phy_id);
+ if (res)
+- goto out;
+- /* to support the wide port inserted */
+- for (i = 0; i < dev->ex_dev.num_phys; i++) {
+- struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
+- if (i == phy_id)
+- continue;
+- if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
+- SAS_ADDR(ex_phy->attached_sas_addr)) {
+- found = true;
+- break;
+- }
+- }
+- if (found) {
+- sas_ex_join_wide_port(dev, phy_id);
++ return res;
++
++ if (sas_ex_join_wide_port(dev, phy_id))
+ return 0;
+- }
++
+ res = sas_ex_discover_devices(dev, phy_id);
+- if (!res)
+- goto out;
++ if (res)
++ return res;
+ list_for_each_entry(child, &dev->ex_dev.children, siblings) {
+ if (SAS_ADDR(child->sas_addr) ==
+ SAS_ADDR(ex_phy->attached_sas_addr)) {
+@@ -1884,7 +1870,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
+ break;
+ }
+ }
+-out:
+ return res;
+ }
+
+@@ -1983,9 +1968,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
+ struct domain_device *dev = NULL;
+
+ res = sas_find_bcast_dev(port_dev, &dev);
+- if (res)
+- goto out;
+- if (dev) {
++ while (res == 0 && dev) {
+ struct expander_device *ex = &dev->ex_dev;
+ int i = 0, phy_id;
+
+@@ -1997,8 +1980,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
+ res = sas_rediscover(dev, phy_id);
+ i = phy_id + 1;
+ } while (i < ex->num_phys);
++
++ dev = NULL;
++ res = sas_find_bcast_dev(port_dev, &dev);
+ }
+-out:
+ return res;
+ }
+
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 2aeb2e9..831db24 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -785,7 +785,13 @@ static void scsi_done(struct scsi_cmnd *cmd)
+ /* Move this to a header if it becomes more generally useful */
+ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
+ {
+- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
++ struct scsi_driver **sdp;
++
++ sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
++ if (!sdp)
++ return NULL;
++
++ return *sdp;
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index dc6131e..456b131 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1673,6 +1673,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
+ * requests are started.
+ */
+ scsi_run_host_queues(shost);
++
++ /*
++ * if eh is active and host_eh_scheduled is pending we need to re-run
++ * recovery. we do this check after scsi_run_host_queues() to allow
++ * everything pent up since the last eh run a chance to make forward
++ * progress before we sync again. Either we'll immediately re-run
++ * recovery or scsi_device_unbusy() will wake us again when these
++ * pending commands complete.
++ */
++ spin_lock_irqsave(shost->host_lock, flags);
++ if (shost->host_eh_scheduled)
++ if (scsi_host_set_state(shost, SHOST_RECOVERY))
++ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
++ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index f0ab58e..6c4b620 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -406,10 +406,6 @@ static void scsi_run_queue(struct request_queue *q)
+ LIST_HEAD(starved_list);
+ unsigned long flags;
+
+- /* if the device is dead, sdev will be NULL, so no queue to run */
+- if (!sdev)
+- return;
+-
+ shost = sdev->host;
+ if (scsi_target(sdev)->single_lun)
+ scsi_single_lun_run(sdev);
+@@ -483,15 +479,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
+ */
+ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
+ {
++ struct scsi_device *sdev = cmd->device;
+ struct request *req = cmd->request;
+ unsigned long flags;
+
++ /*
++ * We need to hold a reference on the device to avoid the queue being
++ * killed after the unlock and before scsi_run_queue is invoked which
++ * may happen because scsi_unprep_request() puts the command which
++ * releases its reference on the device.
++ */
++ get_device(&sdev->sdev_gendev);
++
+ spin_lock_irqsave(q->queue_lock, flags);
+ scsi_unprep_request(req);
+ blk_requeue_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
++
++ put_device(&sdev->sdev_gendev);
+ }
+
+ void scsi_next_command(struct scsi_cmnd *cmd)
+@@ -1374,16 +1381,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
+ * may be changed after request stacking drivers call the function,
+ * regardless of taking lock or not.
+ *
+- * When scsi can't dispatch I/Os anymore and needs to kill I/Os
+- * (e.g. !sdev), scsi needs to return 'not busy'.
+- * Otherwise, request stacking drivers may hold requests forever.
++ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
++ * needs to return 'not busy'. Otherwise, request stacking drivers
++ * may hold requests forever.
+ */
+ static int scsi_lld_busy(struct request_queue *q)
+ {
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+
+- if (!sdev)
++ if (blk_queue_dead(q))
+ return 0;
+
+ shost = sdev->host;
+@@ -1494,12 +1501,6 @@ static void scsi_request_fn(struct request_queue *q)
+ struct scsi_cmnd *cmd;
+ struct request *req;
+
+- if (!sdev) {
+- while ((req = blk_peek_request(q)) != NULL)
+- scsi_kill_request(req, q);
+- return;
+- }
+-
+ if(!get_device(&sdev->sdev_gendev))
+ /* We must be tearing the block queue down already */
+ return;
+@@ -1701,20 +1702,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+ return q;
+ }
+
+-void scsi_free_queue(struct request_queue *q)
+-{
+- unsigned long flags;
+-
+- WARN_ON(q->queuedata);
+-
+- /* cause scsi_request_fn() to kill all non-finished requests */
+- spin_lock_irqsave(q->queue_lock, flags);
+- q->request_fn(q);
+- spin_unlock_irqrestore(q->queue_lock, flags);
+-
+- blk_cleanup_queue(q);
+-}
+-
+ /*
+ * Function: scsi_block_requests()
+ *
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 5b475d0..d58adca 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -85,7 +85,6 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
+ extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
+ extern void scsi_run_host_queues(struct Scsi_Host *shost);
+ extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+-extern void scsi_free_queue(struct request_queue *q);
+ extern int scsi_init_queue(void);
+ extern void scsi_exit_queue(void);
+ struct request_queue;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 6e7ea4a..a48b59c 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1710,6 +1710,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+ {
+ struct scsi_device *sdev;
+ shost_for_each_device(sdev, shost) {
++ /* target removed before the device could be added */
++ if (sdev->sdev_state == SDEV_DEL)
++ continue;
+ if (!scsi_host_scan_allowed(shost) ||
+ scsi_sysfs_add_sdev(sdev) != 0)
+ __scsi_remove_device(sdev);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 04c2a27..bb7c482 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -971,11 +971,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
+ sdev->host->hostt->slave_destroy(sdev);
+ transport_destroy_device(dev);
+
+- /* cause the request function to reject all I/O requests */
+- sdev->request_queue->queuedata = NULL;
+-
+ /* Freeing the queue signals to block that we're done */
+- scsi_free_queue(sdev->request_queue);
++ blk_cleanup_queue(sdev->request_queue);
+ put_device(dev);
+ }
+
+@@ -1000,7 +997,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ struct scsi_device *sdev;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+- starget->reap_ref++;
+ restart:
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->channel != starget->channel ||
+@@ -1014,14 +1010,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ goto restart;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+- scsi_target_reap(starget);
+-}
+-
+-static int __remove_child (struct device * dev, void * data)
+-{
+- if (scsi_is_target_device(dev))
+- __scsi_remove_target(to_scsi_target(dev));
+- return 0;
+ }
+
+ /**
+@@ -1034,14 +1022,34 @@ static int __remove_child (struct device * dev, void * data)
+ */
+ void scsi_remove_target(struct device *dev)
+ {
+- if (scsi_is_target_device(dev)) {
+- __scsi_remove_target(to_scsi_target(dev));
+- return;
++ struct Scsi_Host *shost = dev_to_shost(dev->parent);
++ struct scsi_target *starget, *found;
++ unsigned long flags;
++
++ restart:
++ found = NULL;
++ spin_lock_irqsave(shost->host_lock, flags);
++ list_for_each_entry(starget, &shost->__targets, siblings) {
++ if (starget->state == STARGET_DEL)
++ continue;
++ if (starget->dev.parent == dev || &starget->dev == dev) {
++ found = starget;
++ found->reap_ref++;
++ break;
++ }
+ }
++ spin_unlock_irqrestore(shost->host_lock, flags);
+
+- get_device(dev);
+- device_for_each_child(dev, NULL, __remove_child);
+- put_device(dev);
++ if (found) {
++ __scsi_remove_target(found);
++ scsi_target_reap(found);
++ /* in the case where @dev has multiple starget children,
++ * continue removing.
++ *
++ * FIXME: does such a case exist?
++ */
++ goto restart;
++ }
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 0842cc7..2ff1255 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -427,19 +427,8 @@ int iscsit_reset_np_thread(
+
+ int iscsit_del_np_comm(struct iscsi_np *np)
+ {
+- if (!np->np_socket)
+- return 0;
+-
+- /*
+- * Some network transports allocate their own struct sock->file,
+- * see if we need to free any additional allocated resources.
+- */
+- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+- kfree(np->np_socket->file);
+- np->np_socket->file = NULL;
+- }
+-
+- sock_release(np->np_socket);
++ if (np->np_socket)
++ sock_release(np->np_socket);
+ return 0;
+ }
+
+@@ -4105,13 +4094,8 @@ int iscsit_close_connection(
+ kfree(conn->conn_ops);
+ conn->conn_ops = NULL;
+
+- if (conn->sock) {
+- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+- kfree(conn->sock->file);
+- conn->sock->file = NULL;
+- }
++ if (conn->sock)
+ sock_release(conn->sock);
+- }
+ conn->thread_set = NULL;
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 7da2d6a..0f68197 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
+ /* Used for struct iscsi_np->np_flags */
+ enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+- NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
+ };
+
+ /* Used for struct iscsi_np->np_thread_state */
+@@ -511,7 +510,6 @@ struct iscsi_conn {
+ u16 local_port;
+ int net_size;
+ u32 auth_id;
+-#define CONNFLAG_SCTP_STRUCT_FILE 0x01
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ u32 login_itt;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index bd2adec..2ec5339 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -793,22 +793,6 @@ int iscsi_target_setup_login_socket(
+ }
+ np->np_socket = sock;
+ /*
+- * The SCTP stack needs struct socket->file.
+- */
+- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+- (np->np_network_transport == ISCSI_SCTP_UDP)) {
+- if (!sock->file) {
+- sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
+- if (!sock->file) {
+- pr_err("Unable to allocate struct"
+- " file for SCTP\n");
+- ret = -ENOMEM;
+- goto fail;
+- }
+- np->np_flags |= NPF_SCTP_STRUCT_FILE;
+- }
+- }
+- /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+@@ -857,21 +841,15 @@ int iscsi_target_setup_login_socket(
+
+ fail:
+ np->np_socket = NULL;
+- if (sock) {
+- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+- kfree(sock->file);
+- sock->file = NULL;
+- }
+-
++ if (sock)
+ sock_release(sock);
+- }
+ return ret;
+ }
+
+ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ {
+ u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+- int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
++ int err, ret = 0, ip_proto, sock_type, stop;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_login *login;
+ struct iscsi_portal_group *tpg = NULL;
+@@ -882,7 +860,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ struct sockaddr_in6 sock_in6;
+
+ flush_signals(current);
+- set_sctp_conn_flag = 0;
+ sock = np->np_socket;
+ ip_proto = np->np_ip_proto;
+ sock_type = np->np_sock_type;
+@@ -907,35 +884,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ spin_unlock_bh(&np->np_thread_lock);
+ goto out;
+ }
+- /*
+- * The SCTP stack needs struct socket->file.
+- */
+- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+- (np->np_network_transport == ISCSI_SCTP_UDP)) {
+- if (!new_sock->file) {
+- new_sock->file = kzalloc(
+- sizeof(struct file), GFP_KERNEL);
+- if (!new_sock->file) {
+- pr_err("Unable to allocate struct"
+- " file for SCTP\n");
+- sock_release(new_sock);
+- /* Get another socket */
+- return 1;
+- }
+- set_sctp_conn_flag = 1;
+- }
+- }
+-
+ iscsi_start_login_thread_timer(np);
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+- if (set_sctp_conn_flag) {
+- kfree(new_sock->file);
+- new_sock->file = NULL;
+- }
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+@@ -945,9 +899,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ conn->sock = new_sock;
+
+- if (set_sctp_conn_flag)
+- conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
+-
+ pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+ conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+@@ -1195,13 +1146,8 @@ old_sess_out:
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+- if (conn->sock) {
+- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+- kfree(conn->sock->file);
+- conn->sock->file = NULL;
+- }
++ if (conn->sock)
+ sock_release(conn->sock);
+- }
+ kfree(conn);
+
+ if (tpg) {
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 93b9406..717a8d4 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -1114,11 +1114,11 @@ int target_emulate_unmap(struct se_task *task)
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf, *ptr = NULL;
+- unsigned char *cdb = &cmd->t_task_cdb[0];
+ sector_t lba;
+- unsigned int size = cmd->data_length, range;
+- int ret = 0, offset;
+- unsigned short dl, bd_dl;
++ int size = cmd->data_length;
++ u32 range;
++ int ret = 0;
++ int dl, bd_dl;
+
+ if (!dev->transport->do_discard) {
+ pr_err("UNMAP emulation not supported for: %s\n",
+@@ -1127,24 +1127,41 @@ int target_emulate_unmap(struct se_task *task)
+ return -ENOSYS;
+ }
+
+- /* First UNMAP block descriptor starts at 8 byte offset */
+- offset = 8;
+- size -= 8;
+- dl = get_unaligned_be16(&cdb[0]);
+- bd_dl = get_unaligned_be16(&cdb[2]);
+-
+ buf = transport_kmap_data_sg(cmd);
+
+- ptr = &buf[offset];
+- pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
++ dl = get_unaligned_be16(&buf[0]);
++ bd_dl = get_unaligned_be16(&buf[2]);
++
++ size = min(size - 8, bd_dl);
++ if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
++ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* First UNMAP block descriptor starts at 8 byte offset */
++ ptr = &buf[8];
++ pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+- while (size) {
++ while (size >= 16) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
++ if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
++ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (lba + range > dev->transport->get_blocks(dev) + 1) {
++ cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
++ ret = -EINVAL;
++ goto err;
++ }
++
+ ret = dev->transport->do_discard(dev, lba, range);
+ if (ret < 0) {
+ pr_err("blkdev_issue_discard() failed: %d\n",
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 5660916..94c03d2 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1820,6 +1820,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
++ case TCM_ADDRESS_OUT_OF_RANGE:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
+@@ -4496,6 +4497,15 @@ int transport_send_check_condition_and_sense(
+ /* WRITE PROTECTED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+ break;
++ case TCM_ADDRESS_OUT_OF_RANGE:
++ /* CURRENT ERROR */
++ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
++ /* ILLEGAL REQUEST */
++ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
++ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
++ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
++ break;
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index f6ff837..a9df218 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1555,10 +1555,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
+ void __user *addr = as->userurb;
+ unsigned int i;
+
+- if (as->userbuffer && urb->actual_length)
+- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
+- urb->actual_length))
++ if (as->userbuffer && urb->actual_length) {
++ if (urb->number_of_packets > 0) /* Isochronous */
++ i = urb->transfer_buffer_length;
++ else /* Non-Isoc */
++ i = urb->actual_length;
++ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
+ return -EFAULT;
++ }
+ if (put_user(as->status, &userurb->status))
+ return -EFAULT;
+ if (put_user(urb->actual_length, &userurb->actual_length))
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 29c854b..4e1f0aa 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -796,12 +796,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+
+ SET_ETHTOOL_OPS(net, &ops);
+
+- /* two kinds of host-initiated state changes:
+- * - iff DATA transfer is active, carrier is "on"
+- * - tx queueing enabled if open *and* carrier is "on"
+- */
+- netif_carrier_off(net);
+-
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
+@@ -815,6 +809,12 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ the_dev = dev;
++
++ /* two kinds of host-initiated state changes:
++ * - iff DATA transfer is active, carrier is "on"
++ * - tx queueing enabled if open *and* carrier is "on"
++ */
++ netif_carrier_off(net);
+ }
+
+ return status;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5971c95..d89aac1 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -932,8 +932,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 0b39458..03321e5 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -206,10 +206,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
+
+ work->ordered_func(work);
+
+- /* now take the lock again and call the freeing code */
++ /* now take the lock again and drop our item from the list */
+ spin_lock(&workers->order_lock);
+ list_del(&work->order_list);
++ spin_unlock(&workers->order_lock);
++
++ /*
++ * we don't want to call the ordered free functions
++ * with the lock held though
++ */
+ work->ordered_free(work);
++ spin_lock(&workers->order_lock);
+ }
+
+ spin_unlock(&workers->order_lock);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index f44b392..6b2a724 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -872,7 +872,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+
+ #ifdef CONFIG_MIGRATION
+ static int btree_migratepage(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ enum migrate_mode mode)
+ {
+ /*
+ * we can't safely write a btree page from here,
+@@ -887,7 +888,7 @@ static int btree_migratepage(struct address_space *mapping,
+ if (page_has_private(page) &&
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+ }
+ #endif
+
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 6aa7457..c858a29 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -89,6 +89,32 @@ static struct {
+ /* Forward declarations */
+ static void cifs_readv_complete(struct work_struct *work);
+
++#ifdef CONFIG_HIGHMEM
++/*
++ * On arches that have high memory, kmap address space is limited. By
++ * serializing the kmap operations on those arches, we ensure that we don't
++ * end up with a bunch of threads in writeback with partially mapped page
++ * arrays, stuck waiting for kmap to come back. That situation prevents
++ * progress and can deadlock.
++ */
++static DEFINE_MUTEX(cifs_kmap_mutex);
++
++static inline void
++cifs_kmap_lock(void)
++{
++ mutex_lock(&cifs_kmap_mutex);
++}
++
++static inline void
++cifs_kmap_unlock(void)
++{
++ mutex_unlock(&cifs_kmap_mutex);
++}
++#else /* !CONFIG_HIGHMEM */
++#define cifs_kmap_lock() do { ; } while(0)
++#define cifs_kmap_unlock() do { ; } while(0)
++#endif /* CONFIG_HIGHMEM */
++
+ /* Mark as invalid, all open files on tree connections since they
+ were closed when session to server was lost */
+ static void mark_open_files_invalid(struct cifs_tcon *pTcon)
+@@ -1540,6 +1566,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+ cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
+
++ cifs_kmap_lock();
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ if (remaining >= PAGE_CACHE_SIZE) {
+ /* enough data to fill the page */
+@@ -1589,6 +1616,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ page_cache_release(page);
+ }
+ }
++ cifs_kmap_unlock();
+
+ /* issue the read if we have any iovecs left to fill */
+ if (rdata->nr_iov > 1) {
+@@ -2171,6 +2199,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
+ iov[0].iov_base = smb;
+
+ /* marshal up the pages into iov array */
++ cifs_kmap_lock();
+ wdata->bytes = 0;
+ for (i = 0; i < wdata->nr_pages; i++) {
+ iov[i + 1].iov_len = min(inode->i_size -
+@@ -2179,6 +2208,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
+ iov[i + 1].iov_base = kmap(wdata->pages[i]);
+ wdata->bytes += iov[i + 1].iov_len;
+ }
++ cifs_kmap_unlock();
+
+ cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
+
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 914bf9e..d6970f7 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -557,7 +557,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
+ if (bitmap_bh == NULL)
+ continue;
+
+- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
++ x = ext4_count_free(bitmap_bh->b_data,
++ EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
+ i, ext4_free_group_clusters(sb, gdp), x);
+ bitmap_count += x;
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index fa3af81..bbde5d5 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -11,21 +11,15 @@
+ #include <linux/jbd2.h>
+ #include "ext4.h"
+
+-#ifdef EXT4FS_DEBUG
+-
+ static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
+
+-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
++unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
+ {
+ unsigned int i, sum = 0;
+
+- if (!map)
+- return 0;
+ for (i = 0; i < numchars; i++)
+- sum += nibblemap[map->b_data[i] & 0xf] +
+- nibblemap[(map->b_data[i] >> 4) & 0xf];
++ sum += nibblemap[bitmap[i] & 0xf] +
++ nibblemap[(bitmap[i] >> 4) & 0xf];
+ return sum;
+ }
+
+-#endif /* EXT4FS_DEBUG */
+-
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 7b1cd5c..8cb184c 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1123,8 +1123,7 @@ struct ext4_sb_info {
+ unsigned long s_desc_per_block; /* Number of group descriptors per block */
+ ext4_group_t s_groups_count; /* Number of groups in the fs */
+ ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
+- unsigned long s_overhead_last; /* Last calculated overhead */
+- unsigned long s_blocks_last; /* Last seen block count */
++ unsigned long s_overhead; /* # of fs overhead clusters */
+ unsigned int s_cluster_ratio; /* Number of blocks per cluster */
+ unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
+ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
+@@ -1757,7 +1756,7 @@ struct mmpd_data {
+ # define NORET_AND noreturn,
+
+ /* bitmap.c */
+-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
++extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
+
+ /* balloc.c */
+ extern unsigned int ext4_block_group(struct super_block *sb,
+@@ -1925,6 +1924,7 @@ extern int ext4_group_extend(struct super_block *sb,
+ ext4_fsblk_t n_blocks_count);
+
+ /* super.c */
++extern int ext4_calculate_overhead(struct super_block *sb);
+ extern void *ext4_kvmalloc(size_t size, gfp_t flags);
+ extern void *ext4_kvzalloc(size_t size, gfp_t flags);
+ extern void ext4_kvfree(void *ptr);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 8fb6844..6266799 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1057,7 +1057,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
+ if (!bitmap_bh)
+ continue;
+
+- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
++ x = ext4_count_free(bitmap_bh->b_data,
++ EXT4_INODES_PER_GROUP(sb) / 8);
+ printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
+ (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
+ bitmap_count += x;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3ce7613..8b01f9f 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -277,6 +277,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
+ used = ei->i_reserved_data_blocks;
+ }
+
++ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
++ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
++ "with only %d reserved metadata blocks\n", __func__,
++ inode->i_ino, ei->i_allocated_meta_blocks,
++ ei->i_reserved_meta_blocks);
++ WARN_ON(1);
++ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
++ }
++
+ /* Update per-inode reservations */
+ ei->i_reserved_data_blocks -= used;
+ ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
+@@ -1102,6 +1111,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+ int ret;
++ ext4_lblk_t save_last_lblock;
++ int save_len;
++
++ /*
++ * We will charge metadata quota at writeout time; this saves
++ * us from metadata over-estimation, though we may go over by
++ * a small amount in the end. Here we just reserve for data.
++ */
++ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
++ if (ret)
++ return ret;
+
+ /*
+ * recalculate the amount of metadata blocks to reserve
+@@ -1110,32 +1130,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ */
+ repeat:
+ spin_lock(&ei->i_block_reservation_lock);
++ /*
++ * ext4_calc_metadata_amount() has side effects, which we have
++ * to be prepared undo if we fail to claim space.
++ */
++ save_len = ei->i_da_metadata_calc_len;
++ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
+ md_needed = EXT4_NUM_B2C(sbi,
+ ext4_calc_metadata_amount(inode, lblock));
+ trace_ext4_da_reserve_space(inode, md_needed);
+- spin_unlock(&ei->i_block_reservation_lock);
+
+ /*
+- * We will charge metadata quota at writeout time; this saves
+- * us from metadata over-estimation, though we may go over by
+- * a small amount in the end. Here we just reserve for data.
+- */
+- ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+- if (ret)
+- return ret;
+- /*
+ * We do still charge estimated metadata to the sb though;
+ * we cannot afford to run out of free blocks.
+ */
+ if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
+- dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
++ ei->i_da_metadata_calc_len = save_len;
++ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
++ spin_unlock(&ei->i_block_reservation_lock);
+ if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+ yield();
+ goto repeat;
+ }
++ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ return -ENOSPC;
+ }
+- spin_lock(&ei->i_block_reservation_lock);
+ ei->i_reserved_data_blocks++;
+ ei->i_reserved_meta_blocks += md_needed;
+ spin_unlock(&ei->i_block_reservation_lock);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 996780a..4eac337 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -952,6 +952,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+ &sbi->s_flex_groups[flex_group].free_inodes);
+ }
+
++ /*
++ * Update the fs overhead information
++ */
++ ext4_calculate_overhead(sb);
++
+ ext4_handle_dirty_super(handle, sb);
+
+ exit_journal:
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a93486e..a071348 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3083,6 +3083,114 @@ static void ext4_destroy_lazyinit_thread(void)
+ kthread_stop(ext4_lazyinit_task);
+ }
+
++/*
++ * Note: calculating the overhead so we can be compatible with
++ * historical BSD practice is quite difficult in the face of
++ * clusters/bigalloc. This is because multiple metadata blocks from
++ * different block group can end up in the same allocation cluster.
++ * Calculating the exact overhead in the face of clustered allocation
++ * requires either O(all block bitmaps) in memory or O(number of block
++ * groups**2) in time. We will still calculate the superblock for
++ * older file systems --- and if we come across with a bigalloc file
++ * system with zero in s_overhead_clusters the estimate will be close to
++ * correct especially for very large cluster sizes --- but for newer
++ * file systems, it's better to calculate this figure once at mkfs
++ * time, and store it in the superblock. If the superblock value is
++ * present (even for non-bigalloc file systems), we will use it.
++ */
++static int count_overhead(struct super_block *sb, ext4_group_t grp,
++ char *buf)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_group_desc *gdp;
++ ext4_fsblk_t first_block, last_block, b;
++ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
++ int s, j, count = 0;
++
++ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
++ (grp * EXT4_BLOCKS_PER_GROUP(sb));
++ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
++ for (i = 0; i < ngroups; i++) {
++ gdp = ext4_get_group_desc(sb, i, NULL);
++ b = ext4_block_bitmap(sb, gdp);
++ if (b >= first_block && b <= last_block) {
++ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
++ count++;
++ }
++ b = ext4_inode_bitmap(sb, gdp);
++ if (b >= first_block && b <= last_block) {
++ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
++ count++;
++ }
++ b = ext4_inode_table(sb, gdp);
++ if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
++ for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
++ int c = EXT4_B2C(sbi, b - first_block);
++ ext4_set_bit(c, buf);
++ count++;
++ }
++ if (i != grp)
++ continue;
++ s = 0;
++ if (ext4_bg_has_super(sb, grp)) {
++ ext4_set_bit(s++, buf);
++ count++;
++ }
++ for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
++ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
++ count++;
++ }
++ }
++ if (!count)
++ return 0;
++ return EXT4_CLUSTERS_PER_GROUP(sb) -
++ ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
++}
++
++/*
++ * Compute the overhead and stash it in sbi->s_overhead
++ */
++int ext4_calculate_overhead(struct super_block *sb)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_super_block *es = sbi->s_es;
++ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
++ ext4_fsblk_t overhead = 0;
++ char *buf = (char *) get_zeroed_page(GFP_KERNEL);
++
++ memset(buf, 0, PAGE_SIZE);
++ if (!buf)
++ return -ENOMEM;
++
++ /*
++ * Compute the overhead (FS structures). This is constant
++ * for a given filesystem unless the number of block groups
++ * changes so we cache the previous value until it does.
++ */
++
++ /*
++ * All of the blocks before first_data_block are overhead
++ */
++ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
++
++ /*
++ * Add the overhead found in each block group
++ */
++ for (i = 0; i < ngroups; i++) {
++ int blks;
++
++ blks = count_overhead(sb, i, buf);
++ overhead += blks;
++ if (blks)
++ memset(buf, 0, PAGE_SIZE);
++ cond_resched();
++ }
++ sbi->s_overhead = overhead;
++ smp_wmb();
++ free_page((unsigned long) buf);
++ return 0;
++}
++
+ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ char *orig_data = kstrdup(data, GFP_KERNEL);
+@@ -3695,6 +3803,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+
+ no_journal:
+ /*
++ * Get the # of file system overhead blocks from the
++ * superblock if present.
++ */
++ if (es->s_overhead_clusters)
++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
++ else {
++ ret = ext4_calculate_overhead(sb);
++ if (ret)
++ goto failed_mount_wq;
++ }
++
++ /*
+ * The maximum number of concurrent works can be high and
+ * concurrency isn't really necessary. Limit it to 1.
+ */
+@@ -4568,67 +4688,21 @@ restore_opts:
+ return err;
+ }
+
+-/*
+- * Note: calculating the overhead so we can be compatible with
+- * historical BSD practice is quite difficult in the face of
+- * clusters/bigalloc. This is because multiple metadata blocks from
+- * different block group can end up in the same allocation cluster.
+- * Calculating the exact overhead in the face of clustered allocation
+- * requires either O(all block bitmaps) in memory or O(number of block
+- * groups**2) in time. We will still calculate the superblock for
+- * older file systems --- and if we come across with a bigalloc file
+- * system with zero in s_overhead_clusters the estimate will be close to
+- * correct especially for very large cluster sizes --- but for newer
+- * file systems, it's better to calculate this figure once at mkfs
+- * time, and store it in the superblock. If the superblock value is
+- * present (even for non-bigalloc file systems), we will use it.
+- */
+ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ struct super_block *sb = dentry->d_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+- struct ext4_group_desc *gdp;
++ ext4_fsblk_t overhead = 0;
+ u64 fsid;
+ s64 bfree;
+
+- if (test_opt(sb, MINIX_DF)) {
+- sbi->s_overhead_last = 0;
+- } else if (es->s_overhead_clusters) {
+- sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
+- } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
+- ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+- ext4_fsblk_t overhead = 0;
+-
+- /*
+- * Compute the overhead (FS structures). This is constant
+- * for a given filesystem unless the number of block groups
+- * changes so we cache the previous value until it does.
+- */
+-
+- /*
+- * All of the blocks before first_data_block are
+- * overhead
+- */
+- overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+-
+- /*
+- * Add the overhead found in each block group
+- */
+- for (i = 0; i < ngroups; i++) {
+- gdp = ext4_get_group_desc(sb, i, NULL);
+- overhead += ext4_num_overhead_clusters(sb, i, gdp);
+- cond_resched();
+- }
+- sbi->s_overhead_last = overhead;
+- smp_wmb();
+- sbi->s_blocks_last = ext4_blocks_count(es);
+- }
++ if (!test_opt(sb, MINIX_DF))
++ overhead = sbi->s_overhead;
+
+ buf->f_type = EXT4_SUPER_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+- buf->f_blocks = (ext4_blocks_count(es) -
+- EXT4_C2B(sbi, sbi->s_overhead_last));
++ buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
+ bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
+ percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
+ /* prevent underflow in case that few free space is available */
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index ebc2f4d..0aa424a 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -569,7 +569,8 @@ static int hugetlbfs_set_page_dirty(struct page *page)
+ }
+
+ static int hugetlbfs_migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ enum migrate_mode mode)
+ {
+ int rc;
+
+diff --git a/fs/locks.c b/fs/locks.c
+index 6a64f15..fcc50ab 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
+ return 0;
+ }
+
+-static int assign_type(struct file_lock *fl, int type)
++static int assign_type(struct file_lock *fl, long type)
+ {
+ switch (type) {
+ case F_RDLCK:
+@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
+ /*
+ * Initialize a lease, use the default lock manager operations
+ */
+-static int lease_init(struct file *filp, int type, struct file_lock *fl)
++static int lease_init(struct file *filp, long type, struct file_lock *fl)
+ {
+ if (assign_type(fl, type) != 0)
+ return -EINVAL;
+@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
+ }
+
+ /* Allocate a file_lock initialised to this type of lease */
+-static struct file_lock *lease_alloc(struct file *filp, int type)
++static struct file_lock *lease_alloc(struct file *filp, long type)
+ {
+ struct file_lock *fl = locks_alloc_lock();
+ int error = -ENOMEM;
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 3f4d957..68b3f20 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -330,7 +330,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data);
+
+ #ifdef CONFIG_MIGRATION
+ extern int nfs_migrate_page(struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *, enum migrate_mode);
+ #else
+ #define nfs_migrate_page NULL
+ #endif
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 4efd421..c6e523a 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1711,7 +1711,7 @@ out_error:
+
+ #ifdef CONFIG_MIGRATION
+ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+- struct page *page)
++ struct page *page, enum migrate_mode mode)
+ {
+ /*
+ * If PagePrivate is set, then the page is currently associated with
+@@ -1726,7 +1726,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+
+ nfs_fscache_release_page(page, GFP_KERNEL);
+
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+ }
+ #endif
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 270e135..516b7f0 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1285,7 +1285,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ BUG_ON(ident != TAG_IDENT_LVD);
+ lvd = (struct logicalVolDesc *)bh->b_data;
+ table_len = le32_to_cpu(lvd->mapTableLength);
+- if (sizeof(*lvd) + table_len > sb->s_blocksize) {
++ if (table_len > sb->s_blocksize - sizeof(*lvd)) {
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Partition table too long (%u > %lu)\n", table_len,
+ sb->s_blocksize - sizeof(*lvd));
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 0ed1eb0..ff039f0 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+
+ #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
+ #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
++#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+ #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+ #define blk_queue_noxmerges(q) \
+ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 6cb60fd..c692acc 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -66,8 +66,9 @@ enum {
+ /* migration should happen before other stuff but after perf */
+ CPU_PRI_PERF = 20,
+ CPU_PRI_MIGRATION = 10,
+- /* prepare workqueues for other notifiers */
+- CPU_PRI_WORKQUEUE = 5,
++ /* bring up workqueues before normal notifiers and down after */
++ CPU_PRI_WORKQUEUE_UP = 5,
++ CPU_PRI_WORKQUEUE_DOWN = -5,
+ };
+
+ #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index e9eaec5..7a7e5fd 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void);
+ extern void cpuset_print_task_mems_allowed(struct task_struct *p);
+
+ /*
+- * reading current mems_allowed and mempolicy in the fastpath must protected
+- * by get_mems_allowed()
++ * get_mems_allowed is required when making decisions involving mems_allowed
++ * such as during page allocation. mems_allowed can be updated in parallel
++ * and depending on the new value an operation can fail potentially causing
++ * process failure. A retry loop with get_mems_allowed and put_mems_allowed
++ * prevents these artificial failures.
+ */
+-static inline void get_mems_allowed(void)
++static inline unsigned int get_mems_allowed(void)
+ {
+- current->mems_allowed_change_disable++;
+-
+- /*
+- * ensure that reading mems_allowed and mempolicy happens after the
+- * update of ->mems_allowed_change_disable.
+- *
+- * the write-side task finds ->mems_allowed_change_disable is not 0,
+- * and knows the read-side task is reading mems_allowed or mempolicy,
+- * so it will clear old bits lazily.
+- */
+- smp_mb();
++ return read_seqcount_begin(&current->mems_allowed_seq);
+ }
+
+-static inline void put_mems_allowed(void)
++/*
++ * If this returns false, the operation that took place after get_mems_allowed
++ * may have failed. It is up to the caller to retry the operation if
++ * appropriate.
++ */
++static inline bool put_mems_allowed(unsigned int seq)
+ {
+- /*
+- * ensure that reading mems_allowed and mempolicy before reducing
+- * mems_allowed_change_disable.
+- *
+- * the write-side task will know that the read-side task is still
+- * reading mems_allowed or mempolicy, don't clears old bits in the
+- * nodemask.
+- */
+- smp_mb();
+- --ACCESS_ONCE(current->mems_allowed_change_disable);
++ return !read_seqcount_retry(&current->mems_allowed_seq, seq);
+ }
+
+ static inline void set_mems_allowed(nodemask_t nodemask)
+ {
+ task_lock(current);
++ write_seqcount_begin(&current->mems_allowed_seq);
+ current->mems_allowed = nodemask;
++ write_seqcount_end(&current->mems_allowed_seq);
+ task_unlock(current);
+ }
+
+@@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ {
+ }
+
+-static inline void get_mems_allowed(void)
++static inline unsigned int get_mems_allowed(void)
+ {
++ return 0;
+ }
+
+-static inline void put_mems_allowed(void)
++static inline bool put_mems_allowed(unsigned int seq)
+ {
++ return true;
+ }
+
+ #endif /* !CONFIG_CPUSETS */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 43d36b7..29b6353 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -525,6 +525,7 @@ enum positive_aop_returns {
+ struct page;
+ struct address_space;
+ struct writeback_control;
++enum migrate_mode;
+
+ struct iov_iter {
+ const struct iovec *iov;
+@@ -609,9 +610,12 @@ struct address_space_operations {
+ loff_t offset, unsigned long nr_segs);
+ int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+ void **, unsigned long *);
+- /* migrate the contents of a page to the specified target */
++ /*
++ * migrate the contents of a page to the specified target. If sync
++ * is false, it must not block.
++ */
+ int (*migratepage) (struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *, enum migrate_mode);
+ int (*launder_page) (struct page *);
+ int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+ unsigned long);
+@@ -2586,7 +2590,8 @@ extern int generic_check_addressable(unsigned, u64);
+
+ #ifdef CONFIG_MIGRATION
+ extern int buffer_migrate_page(struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *,
++ enum migrate_mode);
+ #else
+ #define buffer_migrate_page NULL
+ #endif
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 32574ee..df53fdf 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -30,6 +30,13 @@ extern struct fs_struct init_fs;
+ #define INIT_THREADGROUP_FORK_LOCK(sig)
+ #endif
+
++#ifdef CONFIG_CPUSETS
++#define INIT_CPUSET_SEQ \
++ .mems_allowed_seq = SEQCNT_ZERO,
++#else
++#define INIT_CPUSET_SEQ
++#endif
++
+ #define INIT_SIGNALS(sig) { \
+ .nr_threads = 1, \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+@@ -193,6 +200,7 @@ extern struct cred init_cred;
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
++ INIT_CPUSET_SEQ \
+ }
+
+
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h
+index e39aeec..eaf8674 100644
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -6,18 +6,31 @@
+
+ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+
++/*
++ * MIGRATE_ASYNC means never block
++ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
++ * on most operations but not ->writepage as the potential stall time
++ * is too significant
++ * MIGRATE_SYNC will block when migrating pages
++ */
++enum migrate_mode {
++ MIGRATE_ASYNC,
++ MIGRATE_SYNC_LIGHT,
++ MIGRATE_SYNC,
++};
++
+ #ifdef CONFIG_MIGRATION
+ #define PAGE_MIGRATION 1
+
+ extern void putback_lru_pages(struct list_head *l);
+ extern int migrate_page(struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *, enum migrate_mode);
+ extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync);
++ enum migrate_mode mode);
+ extern int migrate_huge_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync);
++ enum migrate_mode mode);
+
+ extern int fail_migrate_page(struct address_space *,
+ struct page *, struct page *);
+@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
+ static inline void putback_lru_pages(struct list_head *l) {}
+ static inline int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync) { return -ENOSYS; }
++ enum migrate_mode mode) { return -ENOSYS; }
+ static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync) { return -ENOSYS; }
++ enum migrate_mode mode) { return -ENOSYS; }
+
+ static inline int migrate_prep(void) { return -ENOSYS; }
+ static inline int migrate_prep_local(void) { return -ENOSYS; }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 905b1e1..25842b6 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -173,6 +173,8 @@ static inline int is_unevictable_lru(enum lru_list l)
+ #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
+ /* Isolate unmapped file */
+ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
++/* Isolate for asynchronous migration */
++#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+
+ /* LRU Isolation modes. */
+ typedef unsigned __bitwise__ isolate_mode_t;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5afa2a3..d336c35 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
+
+
+ extern void calc_global_load(unsigned long ticks);
++extern void update_cpu_load_nohz(void);
+
+ extern unsigned long get_parent_ip(unsigned long addr);
+
+@@ -1481,7 +1482,7 @@ struct task_struct {
+ #endif
+ #ifdef CONFIG_CPUSETS
+ nodemask_t mems_allowed; /* Protected by alloc_lock */
+- int mems_allowed_change_disable;
++ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
+ int cpuset_mem_spread_rotor;
+ int cpuset_slab_spread_rotor;
+ #endif
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 94bbec3..6ee550e 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -157,6 +157,7 @@ enum tcm_sense_reason_table {
+ TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
+ TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
++ TCM_ADDRESS_OUT_OF_RANGE = 0x11,
+ };
+
+ struct se_obj {
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 0b1712d..46a1d3c 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -964,7 +964,6 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ {
+ bool need_loop;
+
+-repeat:
+ /*
+ * Allow tasks that have access to memory reserves because they have
+ * been OOM killed to get memory anywhere.
+@@ -983,45 +982,19 @@ repeat:
+ */
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
+- nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+- mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+
+- /*
+- * ensure checking ->mems_allowed_change_disable after setting all new
+- * allowed nodes.
+- *
+- * the read-side task can see an nodemask with new allowed nodes and
+- * old allowed nodes. and if it allocates page when cpuset clears newly
+- * disallowed ones continuous, it can see the new allowed bits.
+- *
+- * And if setting all new allowed nodes is after the checking, setting
+- * all new allowed nodes and clearing newly disallowed ones will be done
+- * continuous, and the read-side task may find no node to alloc page.
+- */
+- smp_mb();
++ if (need_loop)
++ write_seqcount_begin(&tsk->mems_allowed_seq);
+
+- /*
+- * Allocation of memory is very fast, we needn't sleep when waiting
+- * for the read-side.
+- */
+- while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+- task_unlock(tsk);
+- if (!task_curr(tsk))
+- yield();
+- goto repeat;
+- }
+-
+- /*
+- * ensure checking ->mems_allowed_change_disable before clearing all new
+- * disallowed nodes.
+- *
+- * if clearing newly disallowed bits before the checking, the read-side
+- * task may find no node to alloc page.
+- */
+- smp_mb();
++ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
++ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ tsk->mems_allowed = *newmems;
++
++ if (need_loop)
++ write_seqcount_end(&tsk->mems_allowed_seq);
++
+ task_unlock(tsk);
+ }
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 79ee71f..222457a 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -979,6 +979,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+ #ifdef CONFIG_CGROUPS
+ init_rwsem(&sig->threadgroup_fork_lock);
+ #endif
++#ifdef CONFIG_CPUSETS
++ seqcount_init(&tsk->mems_allowed_seq);
++#endif
+
+ sig->oom_adj = current->signal->oom_adj;
+ sig->oom_score_adj = current->signal->oom_score_adj;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 7c0d578..013bd2e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -367,6 +367,7 @@ int hibernation_snapshot(int platform_mode)
+ }
+
+ suspend_console();
++ ftrace_stop();
+ pm_restrict_gfp_mask();
+ error = dpm_suspend(PMSG_FREEZE);
+ if (error)
+@@ -392,6 +393,7 @@ int hibernation_snapshot(int platform_mode)
+ if (error || !in_suspend)
+ pm_restore_gfp_mask();
+
++ ftrace_start();
+ resume_console();
+ dpm_complete(msg);
+
+@@ -496,6 +498,7 @@ int hibernation_restore(int platform_mode)
+
+ pm_prepare_console();
+ suspend_console();
++ ftrace_stop();
+ pm_restrict_gfp_mask();
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+@@ -503,6 +506,7 @@ int hibernation_restore(int platform_mode)
+ dpm_resume_end(PMSG_RECOVER);
+ }
+ pm_restore_gfp_mask();
++ ftrace_start();
+ resume_console();
+ pm_restore_console();
+ return error;
+@@ -529,6 +533,7 @@ int hibernation_platform_enter(void)
+
+ entering_platform_hibernation = true;
+ suspend_console();
++ ftrace_stop();
+ error = dpm_suspend_start(PMSG_HIBERNATE);
+ if (error) {
+ if (hibernation_ops->recover)
+@@ -572,6 +577,7 @@ int hibernation_platform_enter(void)
+ Resume_devices:
+ entering_platform_hibernation = false;
+ dpm_resume_end(PMSG_RESTORE);
++ ftrace_start();
+ resume_console();
+
+ Close:
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 4953dc0..af48faa 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -25,6 +25,7 @@
+ #include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
++#include <linux/ftrace.h>
+ #include <trace/events/power.h>
+
+ #include "power.h"
+@@ -220,6 +221,7 @@ int suspend_devices_and_enter(suspend_state_t state)
+ goto Close;
+ }
+ suspend_console();
++ ftrace_stop();
+ suspend_test_start();
+ error = dpm_suspend_start(PMSG_SUSPEND);
+ if (error) {
+@@ -239,6 +241,7 @@ int suspend_devices_and_enter(suspend_state_t state)
+ suspend_test_start();
+ dpm_resume_end(PMSG_RESUME);
+ suspend_test_finish("resume devices");
++ ftrace_start();
+ resume_console();
+ Close:
+ if (suspend_ops->end)
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 52ac69b..9cd8ca7 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1887,7 +1887,7 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+
+ static void update_sysctl(void);
+ static int get_update_sysctl_factor(void);
+-static void update_cpu_load(struct rq *this_rq);
++static void update_idle_cpu_load(struct rq *this_rq);
+
+ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+ {
+@@ -3855,22 +3855,13 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
+ * scheduler tick (TICK_NSEC). With tickless idle this will not be called
+ * every tick. We fix it up based on jiffies.
+ */
+-static void update_cpu_load(struct rq *this_rq)
++static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
++ unsigned long pending_updates)
+ {
+- unsigned long this_load = this_rq->load.weight;
+- unsigned long curr_jiffies = jiffies;
+- unsigned long pending_updates;
+ int i, scale;
+
+ this_rq->nr_load_updates++;
+
+- /* Avoid repeated calls on same jiffy, when moving in and out of idle */
+- if (curr_jiffies == this_rq->last_load_update_tick)
+- return;
+-
+- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+- this_rq->last_load_update_tick = curr_jiffies;
+-
+ /* Update our load: */
+ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
+ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
+@@ -3895,9 +3886,78 @@ static void update_cpu_load(struct rq *this_rq)
+ sched_avg_update(this_rq);
+ }
+
++#ifdef CONFIG_NO_HZ
++/*
++ * There is no sane way to deal with nohz on smp when using jiffies because the
++ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
++ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
++ *
++ * Therefore we cannot use the delta approach from the regular tick since that
++ * would seriously skew the load calculation. However we'll make do for those
++ * updates happening while idle (nohz_idle_balance) or coming out of idle
++ * (tick_nohz_idle_exit).
++ *
++ * This means we might still be one tick off for nohz periods.
++ */
++
++/*
++ * Called from nohz_idle_balance() to update the load ratings before doing the
++ * idle balance.
++ */
++static void update_idle_cpu_load(struct rq *this_rq)
++{
++ unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
++ unsigned long load = this_rq->load.weight;
++ unsigned long pending_updates;
++
++ /*
++ * bail if there's load or we're actually up-to-date.
++ */
++ if (load || curr_jiffies == this_rq->last_load_update_tick)
++ return;
++
++ pending_updates = curr_jiffies - this_rq->last_load_update_tick;
++ this_rq->last_load_update_tick = curr_jiffies;
++
++ __update_cpu_load(this_rq, load, pending_updates);
++}
++
++/*
++ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
++ */
++void update_cpu_load_nohz(void)
++{
++ struct rq *this_rq = this_rq();
++ unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
++ unsigned long pending_updates;
++
++ if (curr_jiffies == this_rq->last_load_update_tick)
++ return;
++
++ raw_spin_lock(&this_rq->lock);
++ pending_updates = curr_jiffies - this_rq->last_load_update_tick;
++ if (pending_updates) {
++ this_rq->last_load_update_tick = curr_jiffies;
++ /*
++ * We were idle, this means load 0, the current load might be
++ * !0 due to remote wakeups and the sort.
++ */
++ __update_cpu_load(this_rq, 0, pending_updates);
++ }
++ raw_spin_unlock(&this_rq->lock);
++}
++#endif /* CONFIG_NO_HZ */
++
++/*
++ * Called from scheduler_tick()
++ */
+ static void update_cpu_load_active(struct rq *this_rq)
+ {
+- update_cpu_load(this_rq);
++ /*
++ * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
++ */
++ this_rq->last_load_update_tick = jiffies;
++ __update_cpu_load(this_rq, this_rq->load.weight, 1);
+
+ calc_load_account_active(this_rq);
+ }
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 8a39fa3..66e4576 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4735,7 +4735,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
+
+ raw_spin_lock_irq(&this_rq->lock);
+ update_rq_clock(this_rq);
+- update_cpu_load(this_rq);
++ update_idle_cpu_load(this_rq);
+ raw_spin_unlock_irq(&this_rq->lock);
+
+ rebalance_domains(balance_cpu, CPU_IDLE);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 9955ebd..793548c 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -549,6 +549,7 @@ void tick_nohz_restart_sched_tick(void)
+ /* Update jiffies first */
+ select_nohz_load_balancer(0);
+ tick_do_update_jiffies64(now);
++ update_cpu_load_nohz();
+
+ #ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ /*
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7947e16..a650bee 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3586,6 +3586,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+ return notifier_from_errno(0);
+ }
+
++/*
++ * Workqueues should be brought up before normal priority CPU notifiers.
++ * This will be registered high priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_CANCELED:
++ case CPU_DOWN_FAILED:
++ case CPU_ONLINE:
++ return workqueue_cpu_callback(nfb, action, hcpu);
++ }
++ return NOTIFY_OK;
++}
++
++/*
++ * Workqueues should be brought down after normal priority CPU notifiers.
++ * This will be registered as low priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_DOWN_PREPARE:
++ case CPU_DYING:
++ case CPU_POST_DEAD:
++ return workqueue_cpu_callback(nfb, action, hcpu);
++ }
++ return NOTIFY_OK;
++}
++
+ #ifdef CONFIG_SMP
+
+ struct work_for_cpu {
+@@ -3779,7 +3814,8 @@ static int __init init_workqueues(void)
+ unsigned int cpu;
+ int i;
+
+- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
++ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
++ cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+
+ /* initialize gcwqs */
+ for_each_gcwq_cpu(cpu) {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 50f1c60..46973fb 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -372,7 +372,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ }
+
+ if (!cc->sync)
+- mode |= ISOLATE_CLEAN;
++ mode |= ISOLATE_ASYNC_MIGRATE;
+
+ /* Try isolate the page */
+ if (__isolate_lru_page(page, mode, 0) != 0)
+@@ -577,7 +577,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ nr_migrate = cc->nr_migratepages;
+ err = migrate_pages(&cc->migratepages, compaction_alloc,
+ (unsigned long)cc, false,
+- cc->sync);
++ cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+ update_nr_listpages(cc);
+ nr_remaining = cc->nr_migratepages;
+
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 03c5b0e..556858c 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -500,10 +500,13 @@ struct page *__page_cache_alloc(gfp_t gfp)
+ struct page *page;
+
+ if (cpuset_do_page_mem_spread()) {
+- get_mems_allowed();
+- n = cpuset_mem_spread_node();
+- page = alloc_pages_exact_node(n, gfp, 0);
+- put_mems_allowed();
++ unsigned int cpuset_mems_cookie;
++ do {
++ cpuset_mems_cookie = get_mems_allowed();
++ n = cpuset_mem_spread_node();
++ page = alloc_pages_exact_node(n, gfp, 0);
++ } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
++
+ return page;
+ }
+ return alloc_pages(gfp, 0);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 7c535b0..b1e1bad 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -538,8 +538,10 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
+ struct zonelist *zonelist;
+ struct zone *zone;
+ struct zoneref *z;
++ unsigned int cpuset_mems_cookie;
+
+- get_mems_allowed();
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
+ zonelist = huge_zonelist(vma, address,
+ htlb_alloc_mask, &mpol, &nodemask);
+ /*
+@@ -566,10 +568,15 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
+ }
+ }
+ }
+-err:
++
+ mpol_cond_put(mpol);
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
+ return page;
++
++err:
++ mpol_cond_put(mpol);
++ return NULL;
+ }
+
+ static void update_and_free_page(struct hstate *h, struct page *page)
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 06d3479..5bd5bb1 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1427,8 +1427,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ /* Keep page count to indicate a given hugepage is isolated. */
+
+ list_add(&hpage->lru, &pagelist);
+- ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
+- true);
++ ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
++ MIGRATE_SYNC);
+ if (ret) {
+ struct page *page1, *page2;
+ list_for_each_entry_safe(page1, page2, &pagelist, lru)
+@@ -1557,7 +1557,7 @@ int soft_offline_page(struct page *page, int flags)
+ page_is_file_cache(page));
+ list_add(&page->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+- 0, true);
++ false, MIGRATE_SYNC);
+ if (ret) {
+ putback_lru_pages(&pagelist);
+ pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 2168489..6629faf 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -809,7 +809,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+ }
+ /* this function returns # of failed pages */
+ ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
+- true, true);
++ true, MIGRATE_SYNC);
+ if (ret)
+ putback_lru_pages(&source);
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index b26aae2..c0007f9 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -942,7 +942,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
+
+ if (!list_empty(&pagelist)) {
+ err = migrate_pages(&pagelist, new_node_page, dest,
+- false, true);
++ false, MIGRATE_SYNC);
+ if (err)
+ putback_lru_pages(&pagelist);
+ }
+@@ -1843,18 +1843,24 @@ struct page *
+ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, int node)
+ {
+- struct mempolicy *pol = get_vma_policy(current, vma, addr);
++ struct mempolicy *pol;
+ struct zonelist *zl;
+ struct page *page;
++ unsigned int cpuset_mems_cookie;
++
++retry_cpuset:
++ pol = get_vma_policy(current, vma, addr);
++ cpuset_mems_cookie = get_mems_allowed();
+
+- get_mems_allowed();
+ if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
+ unsigned nid;
+
+ nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
+ mpol_cond_put(pol);
+ page = alloc_page_interleave(gfp, order, nid);
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
++
+ return page;
+ }
+ zl = policy_zonelist(gfp, pol, node);
+@@ -1865,7 +1871,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ struct page *page = __alloc_pages_nodemask(gfp, order,
+ zl, policy_nodemask(gfp, pol));
+ __mpol_put(pol);
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
+ return page;
+ }
+ /*
+@@ -1873,7 +1880,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ */
+ page = __alloc_pages_nodemask(gfp, order, zl,
+ policy_nodemask(gfp, pol));
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
+ return page;
+ }
+
+@@ -1900,11 +1908,14 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+ {
+ struct mempolicy *pol = current->mempolicy;
+ struct page *page;
++ unsigned int cpuset_mems_cookie;
+
+ if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
+ pol = &default_policy;
+
+- get_mems_allowed();
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
++
+ /*
+ * No reference counting needed for current->mempolicy
+ * nor system default_policy
+@@ -1915,7 +1926,10 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+ page = __alloc_pages_nodemask(gfp, order,
+ policy_zonelist(gfp, pol, numa_node_id()),
+ policy_nodemask(gfp, pol));
+- put_mems_allowed();
++
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
++
+ return page;
+ }
+ EXPORT_SYMBOL(alloc_pages_current);
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 177aca4..180d97f 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -220,6 +220,56 @@ out:
+ pte_unmap_unlock(ptep, ptl);
+ }
+
++#ifdef CONFIG_BLOCK
++/* Returns true if all buffers are successfully locked */
++static bool buffer_migrate_lock_buffers(struct buffer_head *head,
++ enum migrate_mode mode)
++{
++ struct buffer_head *bh = head;
++
++ /* Simple case, sync compaction */
++ if (mode != MIGRATE_ASYNC) {
++ do {
++ get_bh(bh);
++ lock_buffer(bh);
++ bh = bh->b_this_page;
++
++ } while (bh != head);
++
++ return true;
++ }
++
++ /* async case, we cannot block on lock_buffer so use trylock_buffer */
++ do {
++ get_bh(bh);
++ if (!trylock_buffer(bh)) {
++ /*
++ * We failed to lock the buffer and cannot stall in
++ * async migration. Release the taken locks
++ */
++ struct buffer_head *failed_bh = bh;
++ put_bh(failed_bh);
++ bh = head;
++ while (bh != failed_bh) {
++ unlock_buffer(bh);
++ put_bh(bh);
++ bh = bh->b_this_page;
++ }
++ return false;
++ }
++
++ bh = bh->b_this_page;
++ } while (bh != head);
++ return true;
++}
++#else
++static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
++ enum migrate_mode mode)
++{
++ return true;
++}
++#endif /* CONFIG_BLOCK */
++
+ /*
+ * Replace the page in the mapping.
+ *
+@@ -229,7 +279,8 @@ out:
+ * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
+ */
+ static int migrate_page_move_mapping(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ struct buffer_head *head, enum migrate_mode mode)
+ {
+ int expected_count;
+ void **pslot;
+@@ -259,6 +310,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
+ }
+
+ /*
++ * In the async migration case of moving a page with buffers, lock the
++ * buffers using trylock before the mapping is moved. If the mapping
++ * was moved, we later failed to lock the buffers and could not move
++ * the mapping back due to an elevated page count, we would have to
++ * block waiting on other references to be dropped.
++ */
++ if (mode == MIGRATE_ASYNC && head &&
++ !buffer_migrate_lock_buffers(head, mode)) {
++ page_unfreeze_refs(page, expected_count);
++ spin_unlock_irq(&mapping->tree_lock);
++ return -EAGAIN;
++ }
++
++ /*
+ * Now we know that no one else is looking at the page.
+ */
+ get_page(newpage); /* add cache reference */
+@@ -415,13 +480,14 @@ EXPORT_SYMBOL(fail_migrate_page);
+ * Pages are locked upon entry and exit.
+ */
+ int migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ enum migrate_mode mode)
+ {
+ int rc;
+
+ BUG_ON(PageWriteback(page)); /* Writeback must be complete */
+
+- rc = migrate_page_move_mapping(mapping, newpage, page);
++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+
+ if (rc)
+ return rc;
+@@ -438,28 +504,28 @@ EXPORT_SYMBOL(migrate_page);
+ * exist.
+ */
+ int buffer_migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page, enum migrate_mode mode)
+ {
+ struct buffer_head *bh, *head;
+ int rc;
+
+ if (!page_has_buffers(page))
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+
+ head = page_buffers(page);
+
+- rc = migrate_page_move_mapping(mapping, newpage, page);
++ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+
+ if (rc)
+ return rc;
+
+- bh = head;
+- do {
+- get_bh(bh);
+- lock_buffer(bh);
+- bh = bh->b_this_page;
+-
+- } while (bh != head);
++ /*
++ * In the async case, migrate_page_move_mapping locked the buffers
++ * with an IRQ-safe spinlock held. In the sync case, the buffers
++ * need to be locked now
++ */
++ if (mode != MIGRATE_ASYNC)
++ BUG_ON(!buffer_migrate_lock_buffers(head, mode));
+
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+@@ -536,10 +602,14 @@ static int writeout(struct address_space *mapping, struct page *page)
+ * Default handling if a filesystem does not provide a migration function.
+ */
+ static int fallback_migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page, enum migrate_mode mode)
+ {
+- if (PageDirty(page))
++ if (PageDirty(page)) {
++ /* Only writeback pages in full synchronous migration */
++ if (mode != MIGRATE_SYNC)
++ return -EBUSY;
+ return writeout(mapping, page);
++ }
+
+ /*
+ * Buffers may be managed in a filesystem specific way.
+@@ -549,7 +619,7 @@ static int fallback_migrate_page(struct address_space *mapping,
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+ }
+
+ /*
+@@ -564,7 +634,7 @@ static int fallback_migrate_page(struct address_space *mapping,
+ * == 0 - success
+ */
+ static int move_to_new_page(struct page *newpage, struct page *page,
+- int remap_swapcache, bool sync)
++ int remap_swapcache, enum migrate_mode mode)
+ {
+ struct address_space *mapping;
+ int rc;
+@@ -585,29 +655,18 @@ static int move_to_new_page(struct page *newpage, struct page *page,
+
+ mapping = page_mapping(page);
+ if (!mapping)
+- rc = migrate_page(mapping, newpage, page);
+- else {
++ rc = migrate_page(mapping, newpage, page, mode);
++ else if (mapping->a_ops->migratepage)
+ /*
+- * Do not writeback pages if !sync and migratepage is
+- * not pointing to migrate_page() which is nonblocking
+- * (swapcache/tmpfs uses migratepage = migrate_page).
++ * Most pages have a mapping and most filesystems provide a
++ * migratepage callback. Anonymous pages are part of swap
++ * space which also has its own migratepage callback. This
++ * is the most common path for page migration.
+ */
+- if (PageDirty(page) && !sync &&
+- mapping->a_ops->migratepage != migrate_page)
+- rc = -EBUSY;
+- else if (mapping->a_ops->migratepage)
+- /*
+- * Most pages have a mapping and most filesystems
+- * should provide a migration function. Anonymous
+- * pages are part of swap space which also has its
+- * own migration function. This is the most common
+- * path for page migration.
+- */
+- rc = mapping->a_ops->migratepage(mapping,
+- newpage, page);
+- else
+- rc = fallback_migrate_page(mapping, newpage, page);
+- }
++ rc = mapping->a_ops->migratepage(mapping,
++ newpage, page, mode);
++ else
++ rc = fallback_migrate_page(mapping, newpage, page, mode);
+
+ if (rc) {
+ newpage->mapping = NULL;
+@@ -622,7 +681,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
+ }
+
+ static int __unmap_and_move(struct page *page, struct page *newpage,
+- int force, bool offlining, bool sync)
++ int force, bool offlining, enum migrate_mode mode)
+ {
+ int rc = -EAGAIN;
+ int remap_swapcache = 1;
+@@ -631,7 +690,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+ struct anon_vma *anon_vma = NULL;
+
+ if (!trylock_page(page)) {
+- if (!force || !sync)
++ if (!force || mode == MIGRATE_ASYNC)
+ goto out;
+
+ /*
+@@ -677,10 +736,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+
+ if (PageWriteback(page)) {
+ /*
+- * For !sync, there is no point retrying as the retry loop
+- * is expected to be too short for PageWriteback to be cleared
++ * Only in the case of a full syncronous migration is it
++ * necessary to wait for PageWriteback. In the async case,
++ * the retry loop is too short and in the sync-light case,
++ * the overhead of stalling is too much
+ */
+- if (!sync) {
++ if (mode != MIGRATE_SYNC) {
+ rc = -EBUSY;
+ goto uncharge;
+ }
+@@ -751,7 +812,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+
+ skip_unmap:
+ if (!page_mapped(page))
+- rc = move_to_new_page(newpage, page, remap_swapcache, sync);
++ rc = move_to_new_page(newpage, page, remap_swapcache, mode);
+
+ if (rc && remap_swapcache)
+ remove_migration_ptes(page, page);
+@@ -774,7 +835,8 @@ out:
+ * to the newly allocated page in newpage.
+ */
+ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+- struct page *page, int force, bool offlining, bool sync)
++ struct page *page, int force, bool offlining,
++ enum migrate_mode mode)
+ {
+ int rc = 0;
+ int *result = NULL;
+@@ -792,7 +854,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+ if (unlikely(split_huge_page(page)))
+ goto out;
+
+- rc = __unmap_and_move(page, newpage, force, offlining, sync);
++ rc = __unmap_and_move(page, newpage, force, offlining, mode);
+ out:
+ if (rc != -EAGAIN) {
+ /*
+@@ -840,7 +902,8 @@ out:
+ */
+ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ unsigned long private, struct page *hpage,
+- int force, bool offlining, bool sync)
++ int force, bool offlining,
++ enum migrate_mode mode)
+ {
+ int rc = 0;
+ int *result = NULL;
+@@ -853,7 +916,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ rc = -EAGAIN;
+
+ if (!trylock_page(hpage)) {
+- if (!force || !sync)
++ if (!force || mode != MIGRATE_SYNC)
+ goto out;
+ lock_page(hpage);
+ }
+@@ -864,7 +927,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+
+ if (!page_mapped(hpage))
+- rc = move_to_new_page(new_hpage, hpage, 1, sync);
++ rc = move_to_new_page(new_hpage, hpage, 1, mode);
+
+ if (rc)
+ remove_migration_ptes(hpage, hpage);
+@@ -907,7 +970,7 @@ out:
+ */
+ int migrate_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private, bool offlining,
+- bool sync)
++ enum migrate_mode mode)
+ {
+ int retry = 1;
+ int nr_failed = 0;
+@@ -928,7 +991,7 @@ int migrate_pages(struct list_head *from,
+
+ rc = unmap_and_move(get_new_page, private,
+ page, pass > 2, offlining,
+- sync);
++ mode);
+
+ switch(rc) {
+ case -ENOMEM:
+@@ -958,7 +1021,7 @@ out:
+
+ int migrate_huge_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private, bool offlining,
+- bool sync)
++ enum migrate_mode mode)
+ {
+ int retry = 1;
+ int nr_failed = 0;
+@@ -975,7 +1038,7 @@ int migrate_huge_pages(struct list_head *from,
+
+ rc = unmap_and_move_huge_page(get_new_page,
+ private, page, pass > 2, offlining,
+- sync);
++ mode);
+
+ switch(rc) {
+ case -ENOMEM:
+@@ -1104,7 +1167,7 @@ set_status:
+ err = 0;
+ if (!list_empty(&pagelist)) {
+ err = migrate_pages(&pagelist, new_page_node,
+- (unsigned long)pm, 0, true);
++ (unsigned long)pm, 0, MIGRATE_SYNC);
+ if (err)
+ putback_lru_pages(&pagelist);
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 485be89..065dbe8 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1886,14 +1886,20 @@ static struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, enum zone_type high_zoneidx,
+ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+- int migratetype, unsigned long *did_some_progress,
+- bool sync_migration)
++ int migratetype, bool sync_migration,
++ bool *deferred_compaction,
++ unsigned long *did_some_progress)
+ {
+ struct page *page;
+
+- if (!order || compaction_deferred(preferred_zone))
++ if (!order)
+ return NULL;
+
++ if (compaction_deferred(preferred_zone)) {
++ *deferred_compaction = true;
++ return NULL;
++ }
++
+ current->flags |= PF_MEMALLOC;
+ *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
+ nodemask, sync_migration);
+@@ -1921,7 +1927,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ * but not enough to satisfy watermarks.
+ */
+ count_vm_event(COMPACTFAIL);
+- defer_compaction(preferred_zone);
++
++ /*
++ * As async compaction considers a subset of pageblocks, only
++ * defer if the failure was a sync compaction failure.
++ */
++ if (sync_migration)
++ defer_compaction(preferred_zone);
+
+ cond_resched();
+ }
+@@ -1933,8 +1945,9 @@ static inline struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, enum zone_type high_zoneidx,
+ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+- int migratetype, unsigned long *did_some_progress,
+- bool sync_migration)
++ int migratetype, bool sync_migration,
++ bool *deferred_compaction,
++ unsigned long *did_some_progress)
+ {
+ return NULL;
+ }
+@@ -2084,6 +2097,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ unsigned long pages_reclaimed = 0;
+ unsigned long did_some_progress;
+ bool sync_migration = false;
++ bool deferred_compaction = false;
+
+ /*
+ * In the slowpath, we sanity check order to avoid ever trying to
+@@ -2164,12 +2178,22 @@ rebalance:
+ zonelist, high_zoneidx,
+ nodemask,
+ alloc_flags, preferred_zone,
+- migratetype, &did_some_progress,
+- sync_migration);
++ migratetype, sync_migration,
++ &deferred_compaction,
++ &did_some_progress);
+ if (page)
+ goto got_pg;
+ sync_migration = true;
+
++ /*
++ * If compaction is deferred for high-order allocations, it is because
++ * sync compaction recently failed. In this is the case and the caller
++ * has requested the system not be heavily disrupted, fail the
++ * allocation now instead of entering direct reclaim
++ */
++ if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
++ goto nopage;
++
+ /* Try direct reclaim and then allocating */
+ page = __alloc_pages_direct_reclaim(gfp_mask, order,
+ zonelist, high_zoneidx,
+@@ -2232,8 +2256,9 @@ rebalance:
+ zonelist, high_zoneidx,
+ nodemask,
+ alloc_flags, preferred_zone,
+- migratetype, &did_some_progress,
+- sync_migration);
++ migratetype, sync_migration,
++ &deferred_compaction,
++ &did_some_progress);
+ if (page)
+ goto got_pg;
+ }
+@@ -2257,8 +2282,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ {
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zone *preferred_zone;
+- struct page *page;
++ struct page *page = NULL;
+ int migratetype = allocflags_to_migratetype(gfp_mask);
++ unsigned int cpuset_mems_cookie;
+
+ gfp_mask &= gfp_allowed_mask;
+
+@@ -2277,15 +2303,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ if (unlikely(!zonelist->_zonerefs->zone))
+ return NULL;
+
+- get_mems_allowed();
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
++
+ /* The preferred zone is used for statistics later */
+ first_zones_zonelist(zonelist, high_zoneidx,
+ nodemask ? : &cpuset_current_mems_allowed,
+ &preferred_zone);
+- if (!preferred_zone) {
+- put_mems_allowed();
+- return NULL;
+- }
++ if (!preferred_zone)
++ goto out;
+
+ /* First allocation attempt */
+ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+@@ -2295,9 +2321,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ page = __alloc_pages_slowpath(gfp_mask, order,
+ zonelist, high_zoneidx, nodemask,
+ preferred_zone, migratetype);
+- put_mems_allowed();
+
+ trace_mm_page_alloc(page, order, gfp_mask, migratetype);
++
++out:
++ /*
++ * When updating a task's mems_allowed, it is possible to race with
++ * parallel threads in such a way that an allocation can fail while
++ * the mask is being updated. If a page allocation is about to fail,
++ * check if the cpuset changed during allocation and if so, retry.
++ */
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
++
+ return page;
+ }
+ EXPORT_SYMBOL(__alloc_pages_nodemask);
+@@ -2521,13 +2557,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
+ bool skip_free_areas_node(unsigned int flags, int nid)
+ {
+ bool ret = false;
++ unsigned int cpuset_mems_cookie;
+
+ if (!(flags & SHOW_MEM_FILTER_NODES))
+ goto out;
+
+- get_mems_allowed();
+- ret = !node_isset(nid, cpuset_current_mems_allowed);
+- put_mems_allowed();
++ do {
++ cpuset_mems_cookie = get_mems_allowed();
++ ret = !node_isset(nid, cpuset_current_mems_allowed);
++ } while (!put_mems_allowed(cpuset_mems_cookie));
+ out:
+ return ret;
+ }
+@@ -3407,25 +3445,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
+ if (page_to_nid(page) != zone_to_nid(zone))
+ continue;
+
+- /* Blocks with reserved pages will never free, skip them. */
+- block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
+- if (pageblock_is_reserved(pfn, block_end_pfn))
+- continue;
+-
+ block_migratetype = get_pageblock_migratetype(page);
+
+- /* If this block is reserved, account for it */
+- if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
+- reserve--;
+- continue;
+- }
++ /* Only test what is necessary when the reserves are not met */
++ if (reserve > 0) {
++ /*
++ * Blocks with reserved pages will never free, skip
++ * them.
++ */
++ block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
++ if (pageblock_is_reserved(pfn, block_end_pfn))
++ continue;
+
+- /* Suitable for reserving if this block is movable */
+- if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
+- set_pageblock_migratetype(page, MIGRATE_RESERVE);
+- move_freepages_block(zone, page, MIGRATE_RESERVE);
+- reserve--;
+- continue;
++ /* If this block is reserved, account for it */
++ if (block_migratetype == MIGRATE_RESERVE) {
++ reserve--;
++ continue;
++ }
++
++ /* Suitable for reserving if this block is movable */
++ if (block_migratetype == MIGRATE_MOVABLE) {
++ set_pageblock_migratetype(page,
++ MIGRATE_RESERVE);
++ move_freepages_block(zone, page,
++ MIGRATE_RESERVE);
++ reserve--;
++ continue;
++ }
+ }
+
+ /*
+diff --git a/mm/slab.c b/mm/slab.c
+index 83311c9a..cd3ab93 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3267,12 +3267,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+ if (in_interrupt() || (flags & __GFP_THISNODE))
+ return NULL;
+ nid_alloc = nid_here = numa_mem_id();
+- get_mems_allowed();
+ if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
+ nid_alloc = cpuset_slab_spread_node();
+ else if (current->mempolicy)
+ nid_alloc = slab_node(current->mempolicy);
+- put_mems_allowed();
+ if (nid_alloc != nid_here)
+ return ____cache_alloc_node(cachep, flags, nid_alloc);
+ return NULL;
+@@ -3295,14 +3293,17 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
+ enum zone_type high_zoneidx = gfp_zone(flags);
+ void *obj = NULL;
+ int nid;
++ unsigned int cpuset_mems_cookie;
+
+ if (flags & __GFP_THISNODE)
+ return NULL;
+
+- get_mems_allowed();
+- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
+
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
++ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
++
+ retry:
+ /*
+ * Look through allowed nodes for objects available
+@@ -3355,7 +3356,9 @@ retry:
+ }
+ }
+ }
+- put_mems_allowed();
++
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
++ goto retry_cpuset;
+ return obj;
+ }
+
+diff --git a/mm/slub.c b/mm/slub.c
+index af47188..5710788 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1582,6 +1582,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(flags);
+ void *object;
++ unsigned int cpuset_mems_cookie;
+
+ /*
+ * The defrag ratio allows a configuration of the tradeoffs between
+@@ -1605,23 +1606,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ get_cycles() % 1024 > s->remote_node_defrag_ratio)
+ return NULL;
+
+- get_mems_allowed();
+- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+- struct kmem_cache_node *n;
+-
+- n = get_node(s, zone_to_nid(zone));
+-
+- if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+- n->nr_partial > s->min_partial) {
+- object = get_partial_node(s, n, c);
+- if (object) {
+- put_mems_allowed();
+- return object;
++ do {
++ cpuset_mems_cookie = get_mems_allowed();
++ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
++ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
++ struct kmem_cache_node *n;
++
++ n = get_node(s, zone_to_nid(zone));
++
++ if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
++ n->nr_partial > s->min_partial) {
++ object = get_partial_node(s, n, c);
++ if (object) {
++ /*
++ * Return the object even if
++ * put_mems_allowed indicated that
++ * the cpuset mems_allowed was
++ * updated in parallel. It's a
++ * harmless race between the alloc
++ * and the cpuset update.
++ */
++ put_mems_allowed(cpuset_mems_cookie);
++ return object;
++ }
+ }
+ }
+- }
+- put_mems_allowed();
++ } while (!put_mems_allowed(cpuset_mems_cookie));
+ #endif
+ return NULL;
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 8342119..48febd7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -715,7 +715,13 @@ static enum page_references page_check_references(struct page *page,
+ */
+ SetPageReferenced(page);
+
+- if (referenced_page)
++ if (referenced_page || referenced_ptes > 1)
++ return PAGEREF_ACTIVATE;
++
++ /*
++ * Activate file-backed executable pages after first usage.
++ */
++ if (vm_flags & VM_EXEC)
+ return PAGEREF_ACTIVATE;
+
+ return PAGEREF_KEEP;
+@@ -1061,8 +1067,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+
+ ret = -EBUSY;
+
+- if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
+- return ret;
++ /*
++ * To minimise LRU disruption, the caller can indicate that it only
++ * wants to isolate pages it will be able to operate on without
++ * blocking - clean pages for the most part.
++ *
++ * ISOLATE_CLEAN means that only clean pages should be isolated. This
++ * is used by reclaim when it is cannot write to backing storage
++ *
++ * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
++ * that it is possible to migrate without blocking
++ */
++ if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
++ /* All the caller can do on PageWriteback is block */
++ if (PageWriteback(page))
++ return ret;
++
++ if (PageDirty(page)) {
++ struct address_space *mapping;
++
++ /* ISOLATE_CLEAN means only clean pages */
++ if (mode & ISOLATE_CLEAN)
++ return ret;
++
++ /*
++ * Only pages without mappings or that have a
++ * ->migratepage callback are possible to migrate
++ * without blocking
++ */
++ mapping = page_mapping(page);
++ if (mapping && !mapping->a_ops->migratepage)
++ return ret;
++ }
++ }
+
+ if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
+ return ret;
+@@ -1178,7 +1215,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
+ * anon page which don't already have a swap slot is
+ * pointless.
+ */
+- if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
++ if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
+ !PageSwapCache(cursor_page))
+ break;
+
+@@ -1874,7 +1911,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
+ * latencies, so it's better to scan a minimum amount there as
+ * well.
+ */
+- if (scanning_global_lru(sc) && current_is_kswapd())
++ if (scanning_global_lru(sc) && current_is_kswapd() &&
++ zone->all_unreclaimable)
+ force_scan = true;
+ if (!scanning_global_lru(sc))
+ force_scan = true;
+@@ -2012,8 +2050,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
+ * inactive lists are large enough, continue reclaiming
+ */
+ pages_for_compaction = (2UL << sc->order);
+- inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
+- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
++ inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
++ if (nr_swap_pages > 0)
++ inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+ if (sc->nr_reclaimed < pages_for_compaction &&
+ inactive_lru_pages > pages_for_compaction)
+ return true;
+@@ -2088,6 +2127,42 @@ restart:
+ throttle_vm_writeout(sc->gfp_mask);
+ }
+
++/* Returns true if compaction should go ahead for a high-order request */
++static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
++{
++ unsigned long balance_gap, watermark;
++ bool watermark_ok;
++
++ /* Do not consider compaction for orders reclaim is meant to satisfy */
++ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
++ return false;
++
++ /*
++ * Compaction takes time to run and there are potentially other
++ * callers using the pages just freed. Continue reclaiming until
++ * there is a buffer of free pages available to give compaction
++ * a reasonable chance of completing and allocating the page
++ */
++ balance_gap = min(low_wmark_pages(zone),
++ (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
++ KSWAPD_ZONE_BALANCE_GAP_RATIO);
++ watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
++ watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
++
++ /*
++ * If compaction is deferred, reclaim up to a point where
++ * compaction will have a chance of success when re-enabled
++ */
++ if (compaction_deferred(zone))
++ return watermark_ok;
++
++ /* If compaction is not ready to start, keep reclaiming */
++ if (!compaction_suitable(zone, sc->order))
++ return false;
++
++ return watermark_ok;
++}
++
+ /*
+ * This is the direct reclaim path, for page-allocating processes. We only
+ * try to reclaim pages from zones which will satisfy the caller's allocation
+@@ -2105,8 +2180,9 @@ restart:
+ * scan then give up on it.
+ *
+ * This function returns true if a zone is being reclaimed for a costly
+- * high-order allocation and compaction is either ready to begin or deferred.
+- * This indicates to the caller that it should retry the allocation or fail.
++ * high-order allocation and compaction is ready to begin. This indicates to
++ * the caller that it should consider retrying the allocation instead of
++ * further reclaim.
+ */
+ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ struct scan_control *sc)
+@@ -2115,7 +2191,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
+- bool should_abort_reclaim = false;
++ bool aborted_reclaim = false;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
+@@ -2140,10 +2216,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ * noticable problem, like transparent huge page
+ * allocations.
+ */
+- if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
+- (compaction_suitable(zone, sc->order) ||
+- compaction_deferred(zone))) {
+- should_abort_reclaim = true;
++ if (compaction_ready(zone, sc)) {
++ aborted_reclaim = true;
+ continue;
+ }
+ }
+@@ -2165,7 +2239,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ shrink_zone(priority, zone, sc);
+ }
+
+- return should_abort_reclaim;
++ return aborted_reclaim;
+ }
+
+ static bool zone_reclaimable(struct zone *zone)
+@@ -2219,8 +2293,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ struct zoneref *z;
+ struct zone *zone;
+ unsigned long writeback_threshold;
++ bool aborted_reclaim;
+
+- get_mems_allowed();
+ delayacct_freepages_start();
+
+ if (scanning_global_lru(sc))
+@@ -2230,8 +2304,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ sc->nr_scanned = 0;
+ if (!priority)
+ disable_swap_token(sc->mem_cgroup);
+- if (shrink_zones(priority, zonelist, sc))
+- break;
++ aborted_reclaim = shrink_zones(priority, zonelist, sc);
+
+ /*
+ * Don't shrink slabs when reclaiming memory from
+@@ -2285,7 +2358,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+
+ out:
+ delayacct_freepages_end();
+- put_mems_allowed();
+
+ if (sc->nr_reclaimed)
+ return sc->nr_reclaimed;
+@@ -2298,6 +2370,10 @@ out:
+ if (oom_killer_disabled)
+ return 0;
+
++ /* Aborted reclaim to try compaction? don't OOM, then */
++ if (aborted_reclaim)
++ return 1;
++
+ /* top priority shrink_zones still had more to do? don't OOM, then */
+ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
+ return 1;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index c505fd5..c119f33 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -868,7 +868,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec_per_pin *per_pin;
+ struct hdmi_eld *eld;
+ struct hdmi_spec_per_cvt *per_cvt = NULL;
+- int pinctl;
+
+ /* Validate hinfo */
+ pin_idx = hinfo_to_pin_index(spec, hinfo);
+@@ -904,11 +903,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mux_idx);
+- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- pinctl | PIN_OUT);
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+
+ /* Initially set the converter's capabilities */
+@@ -1147,11 +1141,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx = hinfo_to_pin_index(spec, hinfo);
+ hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
++ int pinctl;
+
+ hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+
+ hdmi_setup_audio_infoframe(codec, pin_idx, substream);
+
++ pinctl = snd_hda_codec_read(codec, pin_nid, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ snd_hda_codec_write(codec, pin_nid, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
++
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5f096a5..191fd78 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5989,6 +5989,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
++ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 90e93bf..0dc441c 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1381,7 +1381,15 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
+ }
+
+ list_for_each_entry(w, &card->widgets, list) {
+- list_del_init(&w->dirty);
++ switch (w->id) {
++ case snd_soc_dapm_pre:
++ case snd_soc_dapm_post:
++ /* These widgets always need to be powered */
++ break;
++ default:
++ list_del_init(&w->dirty);
++ break;
++ }
+
+ if (w->power) {
+ d = w->dapm;
diff --git a/3.2.54/1025_linux-3.2.26.patch b/3.2.54/1025_linux-3.2.26.patch
new file mode 100644
index 0000000..44065b9
--- /dev/null
+++ b/3.2.54/1025_linux-3.2.26.patch
@@ -0,0 +1,238 @@
+diff --git a/Makefile b/Makefile
+index e13e4e7..fa5acc83 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index bb3ee36..f7c89e2 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -99,7 +99,6 @@ struct cpuinfo_x86 {
+ u16 apicid;
+ u16 initial_apicid;
+ u16 x86_clflush_size;
+-#ifdef CONFIG_SMP
+ /* number of cores as seen by the OS: */
+ u16 booted_cores;
+ /* Physical processor id: */
+@@ -110,7 +109,6 @@ struct cpuinfo_x86 {
+ u8 compute_unit_id;
+ /* Index into per_cpu list: */
+ u16 cpu_index;
+-#endif
+ u32 microcode;
+ } __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index bae1efe..be16854 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -154,16 +154,14 @@ int amd_get_subcaches(int cpu)
+ {
+ struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+ unsigned int mask;
+- int cuid = 0;
++ int cuid;
+
+ if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return 0;
+
+ pci_read_config_dword(link, 0x1d4, &mask);
+
+-#ifdef CONFIG_SMP
+ cuid = cpu_data(cpu).compute_unit_id;
+-#endif
+ return (mask >> (4 * cuid)) & 0xf;
+ }
+
+@@ -172,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask)
+ static unsigned int reset, ban;
+ struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ unsigned int reg;
+- int cuid = 0;
++ int cuid;
+
+ if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
+ return -EINVAL;
+@@ -190,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask)
+ pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
+ }
+
+-#ifdef CONFIG_SMP
+ cuid = cpu_data(cpu).compute_unit_id;
+-#endif
+ mask <<= 4 * cuid;
+ mask |= (0xf ^ (1 << cuid)) << 26;
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 3524e1f..ff8557e 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -148,7 +148,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
+
+ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+ {
+-#ifdef CONFIG_SMP
+ /* calling is from identify_secondary_cpu() ? */
+ if (!c->cpu_index)
+ return;
+@@ -192,7 +191,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+
+ valid_k7:
+ ;
+-#endif
+ }
+
+ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index aa003b1..ca93cc7 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -676,9 +676,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ if (this_cpu->c_early_init)
+ this_cpu->c_early_init(c);
+
+-#ifdef CONFIG_SMP
+ c->cpu_index = 0;
+-#endif
+ filter_cpuid_features(c, false);
+
+ setup_smep(c);
+@@ -764,10 +762,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
+ c->apicid = c->initial_apicid;
+ # endif
+ #endif
+-
+-#ifdef CONFIG_X86_HT
+ c->phys_proc_id = c->initial_apicid;
+-#endif
+ }
+
+ setup_smep(c);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 5231312..3e6ff6c 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -181,7 +181,6 @@ static void __cpuinit trap_init_f00f_bug(void)
+
+ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+ {
+-#ifdef CONFIG_SMP
+ /* calling is from identify_secondary_cpu() ? */
+ if (!c->cpu_index)
+ return;
+@@ -198,7 +197,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+ WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
+ "with B stepping processors.\n");
+ }
+-#endif
+ }
+
+ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index b0f1271..3b67877 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -119,9 +119,7 @@ void mce_setup(struct mce *m)
+ m->time = get_seconds();
+ m->cpuvendor = boot_cpu_data.x86_vendor;
+ m->cpuid = cpuid_eax(1);
+-#ifdef CONFIG_SMP
+ m->socketid = cpu_data(m->extcpu).phys_proc_id;
+-#endif
+ m->apicid = cpu_data(m->extcpu).initial_apicid;
+ rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index 445a61c..d4444be 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -65,11 +65,9 @@ struct threshold_bank {
+ };
+ static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
+
+-#ifdef CONFIG_SMP
+ static unsigned char shared_bank[NR_BANKS] = {
+ 0, 0, 0, 0, 1
+ };
+-#endif
+
+ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
+
+@@ -227,10 +225,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
+
+ if (!block)
+ per_cpu(bank_map, cpu) |= (1 << bank);
+-#ifdef CONFIG_SMP
++
+ if (shared_bank[bank] && c->cpu_core_id)
+ break;
+-#endif
+
+ memset(&b, 0, sizeof(b));
+ b.cpu = cpu;
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 14b2314..8022c66 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
+ static int show_cpuinfo(struct seq_file *m, void *v)
+ {
+ struct cpuinfo_x86 *c = v;
+- unsigned int cpu = 0;
++ unsigned int cpu;
+ int i;
+
+-#ifdef CONFIG_SMP
+ cpu = c->cpu_index;
+-#endif
+ seq_printf(m, "processor\t: %u\n"
+ "vendor_id\t: %s\n"
+ "cpu family\t: %d\n"
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 18a1293..0db57b5 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1609,11 +1609,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+-#ifdef CONFIG_SMP
+ /* Only handle if it is the right mc controller */
+ if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
+ return NOTIFY_DONE;
+-#endif
+
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 0790c98..19b4412 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -57,16 +57,15 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+
+-#ifdef CONFIG_SMP
+ #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
+ #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
++#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
++
++#ifdef CONFIG_SMP
+ #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
+ #else
+-#define TO_PHYS_ID(cpu) (cpu)
+-#define TO_CORE_ID(cpu) (cpu)
+ #define for_each_sibling(i, cpu) for (i = 0; false; )
+ #endif
+-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+ /*
+ * Per-Core Temperature Data
diff --git a/3.2.54/1026_linux-3.2.27.patch b/3.2.54/1026_linux-3.2.27.patch
new file mode 100644
index 0000000..5878eb4
--- /dev/null
+++ b/3.2.54/1026_linux-3.2.27.patch
@@ -0,0 +1,3188 @@
+diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
+index edad99a..69820b2 100644
+--- a/Documentation/sound/alsa/HD-Audio-Models.txt
++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
+@@ -60,10 +60,11 @@ ALC267/268
+ ==========
+ N/A
+
+-ALC269
++ALC269/270/275/276/280/282
+ ======
+ laptop-amic Laptops with analog-mic input
+ laptop-dmic Laptops with digital-mic input
++ lenovo-dock Enables docking station I/O for some Lenovos
+
+ ALC662/663/272
+ ==============
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index e1f856b..22bf11b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -1,4 +1,4 @@
+-Everything you ever wanted to know about Linux 2.6 -stable releases.
++Everything you ever wanted to know about Linux -stable releases.
+
+ Rules on what kind of patches are accepted, and which ones are not, into the
+ "-stable" tree:
+@@ -41,10 +41,10 @@ Procedure for submitting patches to the -stable tree:
+ cherry-picked than this can be specified in the following format in
+ the sign-off area:
+
+- Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+- Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+- Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+- Cc: <stable@vger.kernel.org> # .32.x
++ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
++ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
++ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
++ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+ The tag sequence has the meaning of:
+@@ -78,6 +78,15 @@ Review cycle:
+ security kernel team, and not go through the normal review cycle.
+ Contact the kernel security team for more details on this procedure.
+
++Trees:
++
++ - The queues of patches, for both completed versions and in progress
++ versions can be found at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
++ - The finalized and tagged releases of all stable kernels can be found
++ in separate branches per version at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
++
+
+ Review committee:
+
+diff --git a/Makefile b/Makefile
+index fa5acc83..bdf851f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
+index 93226cf..b1479fd 100644
+--- a/arch/arm/include/asm/mutex.h
++++ b/arch/arm/include/asm/mutex.h
+@@ -7,121 +7,10 @@
+ */
+ #ifndef _ASM_MUTEX_H
+ #define _ASM_MUTEX_H
+-
+-#if __LINUX_ARM_ARCH__ < 6
+-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+-# include <asm-generic/mutex-xchg.h>
+-#else
+-
+ /*
+- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+- * atomic decrement (it is not a reliable atomic decrement but it satisfies
+- * the defined semantics for our purpose, while being smaller and faster
+- * than a real atomic decrement or atomic swap. The idea is to attempt
+- * decrementing the lock value only once. If once decremented it isn't zero,
+- * or if its store-back fails due to a dispute on the exclusive store, we
+- * simply bail out immediately through the slow path where the lock will be
+- * reattempted until it succeeds.
++ * On pre-ARMv6 hardware this results in a swp-based implementation,
++ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
++ * accesses instead.
+ */
+-static inline void
+-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- fail_fn(count);
+-}
+-
+-static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- __res = fail_fn(count);
+- return __res;
+-}
+-
+-/*
+- * Same trick is used for the unlock fast path. However the original value,
+- * rather than the result, is used to test for success in order to have
+- * better generated assembly.
+- */
+-static inline void
+-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "ldrex %0, [%3] \n\t"
+- "add %1, %0, #1 \n\t"
+- "strex %2, %1, [%3] "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __orig |= __ex_flag;
+- if (unlikely(__orig != 0))
+- fail_fn(count);
+-}
+-
+-/*
+- * If the unlock was done on a contended lock, or if the unlock simply fails
+- * then the mutex remains locked.
+- */
+-#define __mutex_slowpath_needs_to_unlock() 1
+-
+-/*
+- * For __mutex_fastpath_trylock we use another construct which could be
+- * described as a "single value cmpxchg".
+- *
+- * This provides the needed trylock semantics like cmpxchg would, but it is
+- * lighter and less generic than a true cmpxchg implementation.
+- */
+-static inline int
+-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "1: ldrex %0, [%3] \n\t"
+- "subs %1, %0, #1 \n\t"
+- "strexeq %2, %1, [%3] \n\t"
+- "movlt %0, #0 \n\t"
+- "cmpeq %2, #0 \n\t"
+- "bgt 1b "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&count->counter)
+- : "cc", "memory" );
+-
+- return __orig;
+-}
+-
+-#endif
++#include <asm-generic/mutex-xchg.h>
+ #endif
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index b145f16..ece0996 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -242,6 +242,19 @@ svc_preempt:
+ b 1b
+ #endif
+
++__und_fault:
++ @ Correct the PC such that it is pointing at the instruction
++ @ which caused the fault. If the faulting instruction was ARM
++ @ the PC will be pointing at the next instruction, and have to
++ @ subtract 4. Otherwise, it is Thumb, and the PC will be
++ @ pointing at the second half of the Thumb instruction. We
++ @ have to subtract 2.
++ ldr r2, [r0, #S_PC]
++ sub r2, r2, r1
++ str r2, [r0, #S_PC]
++ b do_undefinstr
++ENDPROC(__und_fault)
++
+ .align 5
+ __und_svc:
+ #ifdef CONFIG_KPROBES
+@@ -259,25 +272,32 @@ __und_svc:
+ @
+ @ r0 - instruction
+ @
+-#ifndef CONFIG_THUMB2_KERNEL
++#ifndef CONFIG_THUMB2_KERNEL
+ ldr r0, [r4, #-4]
+ #else
++ mov r1, #2
+ ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
+ cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
+- ldrhhs r9, [r4] @ bottom 16 bits
+- orrhs r0, r9, r0, lsl #16
++ blo __und_svc_fault
++ ldrh r9, [r4] @ bottom 16 bits
++ add r4, r4, #2
++ str r4, [sp, #S_PC]
++ orr r0, r9, r0, lsl #16
+ #endif
+- adr r9, BSYM(1f)
++ adr r9, BSYM(__und_svc_finish)
+ mov r2, r4
+ bl call_fpe
+
++ mov r1, #4 @ PC correction to apply
++__und_svc_fault:
+ mov r0, sp @ struct pt_regs *regs
+- bl do_undefinstr
++ bl __und_fault
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+-1: disable_irq_notrace
++__und_svc_finish:
++ disable_irq_notrace
+
+ @
+ @ restore SPSR and restart the instruction
+@@ -421,25 +441,33 @@ __und_usr:
+ mov r2, r4
+ mov r3, r5
+
++ @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
++ @ faulting instruction depending on Thumb mode.
++ @ r3 = regs->ARM_cpsr
+ @
+- @ fall through to the emulation code, which returns using r9 if
+- @ it has emulated the instruction, or the more conventional lr
+- @ if we are to treat this as a real undefined instruction
+- @
+- @ r0 - instruction
++ @ The emulation code returns using r9 if it has emulated the
++ @ instruction, or the more conventional lr if we are to treat
++ @ this as a real undefined instruction
+ @
+ adr r9, BSYM(ret_from_exception)
+- adr lr, BSYM(__und_usr_unknown)
++
+ tst r3, #PSR_T_BIT @ Thumb mode?
+- itet eq @ explicit IT needed for the 1f label
+- subeq r4, r2, #4 @ ARM instr at LR - 4
+- subne r4, r2, #2 @ Thumb instr at LR - 2
+-1: ldreqt r0, [r4]
++ bne __und_usr_thumb
++ sub r4, r2, #4 @ ARM instr at LR - 4
++1: ldrt r0, [r4]
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+- reveq r0, r0 @ little endian instruction
++ rev r0, r0 @ little endian instruction
+ #endif
+- beq call_fpe
++ @ r0 = 32-bit ARM instruction which caused the exception
++ @ r2 = PC value for the following instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the faulting instruction
++ @ lr = 32-bit undefined instruction function
++ adr lr, BSYM(__und_usr_fault_32)
++ b call_fpe
++
++__und_usr_thumb:
+ @ Thumb instruction
++ sub r4, r2, #2 @ First half of thumb instr at LR - 2
+ #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+ /*
+ * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
+@@ -453,7 +481,7 @@ __und_usr:
+ ldr r5, .LCcpu_architecture
+ ldr r5, [r5]
+ cmp r5, #CPU_ARCH_ARMv7
+- blo __und_usr_unknown
++ blo __und_usr_fault_16 @ 16bit undefined instruction
+ /*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless. Temporarily
+@@ -461,15 +489,18 @@ __und_usr:
+ */
+ .arch armv6t2
+ #endif
+-2:
+- ARM( ldrht r5, [r4], #2 )
+- THUMB( ldrht r5, [r4] )
+- THUMB( add r4, r4, #2 )
++2: ldrht r5, [r4]
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+- blo __und_usr_unknown
+-3: ldrht r0, [r4]
++ blo __und_usr_fault_16 @ 16bit undefined instruction
++3: ldrht r0, [r2]
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
++ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
+ orr r0, r0, r5, lsl #16
++ adr lr, BSYM(__und_usr_fault_32)
++ @ r0 = the two 16-bit Thumb instructions which caused the exception
++ @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the first 16-bit Thumb instruction
++ @ lr = 32bit undefined instruction function
+
+ #if __LINUX_ARM_ARCH__ < 7
+ /* If the target arch was overridden, change it back: */
+@@ -480,17 +511,13 @@ __und_usr:
+ #endif
+ #endif /* __LINUX_ARM_ARCH__ < 7 */
+ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
+- b __und_usr_unknown
++ b __und_usr_fault_16
+ #endif
+- UNWIND(.fnend )
++ UNWIND(.fnend)
+ ENDPROC(__und_usr)
+
+- @
+- @ fallthrough to call_fpe
+- @
+-
+ /*
+- * The out of line fixup for the ldrt above.
++ * The out of line fixup for the ldrt instructions above.
+ */
+ .pushsection .fixup, "ax"
+ 4: mov pc, r9
+@@ -521,11 +548,12 @@ ENDPROC(__und_usr)
+ * NEON handler code.
+ *
+ * Emulators may wish to make use of the following registers:
+- * r0 = instruction opcode.
+- * r2 = PC+4
++ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++ * r2 = PC value to resume execution after successful emulation
+ * r9 = normal "successful" return address
+- * r10 = this threads thread_info structure.
++ * r10 = this threads thread_info structure
+ * lr = unrecognised instruction return address
++ * IRQs disabled, FIQs enabled.
+ */
+ @
+ @ Fall-through from Thumb-2 __und_usr
+@@ -660,12 +688,17 @@ ENTRY(no_fp)
+ mov pc, lr
+ ENDPROC(no_fp)
+
+-__und_usr_unknown:
+- enable_irq
++__und_usr_fault_32:
++ mov r1, #4
++ b 1f
++__und_usr_fault_16:
++ mov r1, #2
++1: enable_irq
+ mov r0, sp
+ adr lr, BSYM(ret_from_exception)
+- b do_undefinstr
+-ENDPROC(__und_usr_unknown)
++ b __und_fault
++ENDPROC(__und_usr_fault_32)
++ENDPROC(__und_usr_fault_16)
+
+ .align 5
+ __pabt_usr:
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 3d0c6fb..e68d251 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -125,6 +125,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ */
+ mdelay(1000);
+ printk("Reboot failed -- System halted\n");
++ local_irq_disable();
+ while (1);
+ }
+
+@@ -240,6 +241,7 @@ void machine_shutdown(void)
+ void machine_halt(void)
+ {
+ machine_shutdown();
++ local_irq_disable();
+ while (1);
+ }
+
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 160cb16..8380bd1 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -362,18 +362,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+
+ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ {
+- unsigned int correction = thumb_mode(regs) ? 2 : 4;
+ unsigned int instr;
+ siginfo_t info;
+ void __user *pc;
+
+- /*
+- * According to the ARM ARM, PC is 2 or 4 bytes ahead,
+- * depending whether we're in Thumb mode or not.
+- * Correct this offset.
+- */
+- regs->ARM_pc -= correction;
+-
+ pc = (void __user *)instruction_pointer(regs);
+
+ if (processor_mode(regs) == SVC_MODE) {
+diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
+index 845f461..c202113 100644
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
++#ifdef CONFIG_ARM_ERRATA_720789
++ mov r3, #0
++#else
+ asid r3, r3 @ mask ASID
++#endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
+ add r0, r0, #PAGE_SZ
+@@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
+ mov r0, r0, lsl #PAGE_SHIFT
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+ add r0, r0, #PAGE_SZ
+ cmp r0, r1
+diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
+index 4fa9903..cc926c9 100644
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -7,18 +7,20 @@
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+- *
+- * Basic entry code, called from the kernel's undefined instruction trap.
+- * r0 = faulted instruction
+- * r5 = faulted PC+4
+- * r9 = successful return
+- * r10 = thread_info structure
+- * lr = failure return
+ */
+ #include <asm/thread_info.h>
+ #include <asm/vfpmacros.h>
+ #include "../kernel/entry-header.S"
+
++@ VFP entry point.
++@
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
++@ r10 = this threads thread_info structure
++@ lr = unrecognised instruction return address
++@ IRQs disabled.
++@
+ ENTRY(do_vfp)
+ #ifdef CONFIG_PREEMPT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
+index 2d30c7f..3a0efaa 100644
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -61,13 +61,13 @@
+
+ @ VFP hardware support entry point.
+ @
+-@ r0 = faulted instruction
+-@ r2 = faulted PC+4
+-@ r9 = successful return
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
+ @ r10 = vfp_state union
+ @ r11 = CPU number
+-@ lr = failure return
+-
++@ lr = unrecognised instruction return address
++@ IRQs enabled.
+ ENTRY(vfp_support_entry)
+ DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+
+@@ -161,9 +161,12 @@ vfp_hw_state_valid:
+ @ exception before retrying branch
+ @ out before setting an FPEXC that
+ @ stops us reading stuff
+- VFPFMXR FPEXC, r1 @ restore FPEXC last
+- sub r2, r2, #4
+- str r2, [sp, #S_PC] @ retry the instruction
++ VFPFMXR FPEXC, r1 @ Restore FPEXC last
++ sub r2, r2, #4 @ Retry current instruction - if Thumb
++ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
++ @ else it's one 32-bit instruction, so
++ @ always subtract 4 from the following
++ @ instruction address.
+ #ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 8ea07e4..ad83dad 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -453,10 +453,16 @@ static int vfp_pm_suspend(void)
+
+ /* disable, just in case */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
++ } else if (vfp_current_hw_state[ti->cpu]) {
++#ifndef CONFIG_SMP
++ fmxr(FPEXC, fpexc | FPEXC_EN);
++ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
++ fmxr(FPEXC, fpexc);
++#endif
+ }
+
+ /* clear any information we had about last context state */
+- memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
++ vfp_current_hw_state[ti->cpu] = NULL;
+
+ return 0;
+ }
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 3fad89e..2fc214b 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -18,8 +18,8 @@
+ #include <asm/system.h>
+
+
+-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
++#define ATOMIC_INIT(i) { (i) }
++#define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
+diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
+index c3c5a86..8798ebc 100644
+--- a/arch/m68k/include/asm/entry.h
++++ b/arch/m68k/include/asm/entry.h
+@@ -33,8 +33,8 @@
+
+ /* the following macro is used when enabling interrupts */
+ #if defined(MACH_ATARI_ONLY)
+- /* block out HSYNC on the atari */
+-#define ALLOWINT (~0x400)
++ /* block out HSYNC = ipl 2 on the atari */
++#define ALLOWINT (~0x500)
+ #define MAX_NOINT_IPL 3
+ #else
+ /* portable version */
+diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
+index 8623f8d..9a5932e 100644
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+ goto bad_access;
+ }
+
+- mem_value = *mem;
++ /*
++ * No need to check for EFAULT; we know that the page is
++ * present and writable.
++ */
++ __get_user(mem_value, mem);
+ if (mem_value == oldval)
+- *mem = newval;
++ __put_user(newval, mem);
+
+ pte_unmap_unlock(pte, ptl);
+ up_read(&mm->mmap_sem);
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 5682f16..20f0e01 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -12,7 +12,6 @@
+ #include <asm/pgalloc.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+-#include <asm-generic/mm_hooks.h>
+
+ static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+@@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev,
+ switch_mm(prev, next, current);
+ }
+
++static inline void arch_dup_mmap(struct mm_struct *oldmm,
++ struct mm_struct *mm)
++{
++#ifdef CONFIG_64BIT
++ if (oldmm->context.asce_limit < mm->context.asce_limit)
++ crst_table_downgrade(mm, oldmm->context.asce_limit);
++#endif
++}
++
++static inline void arch_exit_mmap(struct mm_struct *mm)
++{
++}
++
+ #endif /* __S390_MMU_CONTEXT_H */
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 5f33d37..172550d 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -130,7 +130,9 @@ struct stack_frame {
+ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
+ regs->gprs[15] = new_stackp; \
++ __tlb_flush_mm(current->mm); \
+ crst_table_downgrade(current->mm, 1UL << 31); \
++ update_mm(current->mm, current); \
+ } while (0)
+
+ /* Forward declaration, a strange C thing */
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index b28aaa4..0fc0a7e 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -453,6 +453,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ struct pt_regs regs;
+ int access, fault;
+
++ /* Emulate a uaccess fault from kernel mode. */
+ regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+ if (!irqs_disabled())
+ regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
+@@ -461,12 +462,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ uaddr &= PAGE_MASK;
+ access = write ? VM_WRITE : VM_READ;
+ fault = do_exception(&regs, access, uaddr | 2);
+- if (unlikely(fault)) {
+- if (fault & VM_FAULT_OOM)
+- return -EFAULT;
+- else if (fault & VM_FAULT_SIGBUS)
+- do_sigbus(&regs, pgm_int_code, uaddr);
+- }
++ /*
++ * Since the fault happened in kernel mode while performing a uaccess
++ * all we need to do now is emulating a fixup in case "fault" is not
++ * zero.
++ * For the calling uaccess functions this results always in -EFAULT.
++ */
+ return fault ? -EFAULT : 0;
+ }
+
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index a0155c0..c70b3d8 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
+
+ int s390_mmap_check(unsigned long addr, unsigned long len)
+ {
++ int rc;
++
+ if (!is_compat_task() &&
+- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+- return crst_table_upgrade(current->mm, 1UL << 53);
++ len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
++ rc = crst_table_upgrade(current->mm, 1UL << 53);
++ if (rc)
++ return rc;
++ update_mm(current->mm, current);
++ }
+ return 0;
+ }
+
+@@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+ }
+ return area;
+@@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area_topdown(filp, addr, len,
+ pgoff, flags);
+ }
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index f8ceac4..f8e92f8 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -97,7 +97,6 @@ repeat:
+ crst_table_free(mm, table);
+ if (mm->context.asce_limit < limit)
+ goto repeat;
+- update_mm(mm, current);
+ return 0;
+ }
+
+@@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ {
+ pgd_t *pgd;
+
+- if (mm->context.asce_limit <= limit)
+- return;
+- __tlb_flush_mm(mm);
+ while (mm->context.asce_limit > limit) {
+ pgd = mm->pgd;
+ switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+@@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+ }
+- update_mm(mm, current);
+ }
+ #endif
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 1f84794..73ef56c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -219,7 +219,7 @@ void __init arch_init_ideal_nops(void)
+ ideal_nops = intel_nops;
+ #endif
+ }
+-
++ break;
+ default:
+ #ifdef CONFIG_X86_64
+ ideal_nops = k8_nops;
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 1b267e7..00a0385 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -686,6 +686,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
+ unsigned long uninitialized_var(address);
+ unsigned level;
+ pte_t *ptep = NULL;
++ int ret = 0;
+
+ pfn = page_to_pfn(page);
+ if (!PageHighMem(page)) {
+@@ -721,6 +722,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
+ list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
+ spin_unlock_irqrestore(&m2p_override_lock, flags);
+
++ /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
++ * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
++ * pfn so that the following mfn_to_pfn(mfn) calls will return the
++ * pfn from the m2p_override (the backend pfn) instead.
++ * We need to do this because the pages shared by the frontend
++ * (xen-blkfront) can be already locked (lock_page, called by
++ * do_read_cache_page); when the userspace backend tries to use them
++ * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
++ * do_blockdev_direct_IO is going to try to lock the same pages
++ * again resulting in a deadlock.
++ * As a side effect get_user_pages_fast might not be safe on the
++ * frontend pages while they are being shared with the backend,
++ * because mfn_to_pfn (that ends up being called by GUPF) will
++ * return the backend pfn rather than the frontend pfn. */
++ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
++ if (ret == 0 && get_phys_to_machine(pfn) == mfn)
++ set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(m2p_add_override);
+@@ -732,6 +751,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
+ unsigned long uninitialized_var(address);
+ unsigned level;
+ pte_t *ptep = NULL;
++ int ret = 0;
+
+ pfn = page_to_pfn(page);
+ mfn = get_phys_to_machine(pfn);
+@@ -801,6 +821,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
+ } else
+ set_phys_to_machine(pfn, page->index);
+
++ /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
++ * somewhere in this domain, even before being added to the
++ * m2p_override (see comment above in m2p_add_override).
++ * If there are no other entries in the m2p_override corresponding
++ * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
++ * the original pfn (the one shared by the frontend): the backend
++ * cannot do any IO on this page anymore because it has been
++ * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
++ * the original pfn causes mfn_to_pfn(mfn) to return the frontend
++ * pfn again. */
++ mfn &= ~FOREIGN_FRAME_BIT;
++ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
++ if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
++ m2p_find_override(mfn) == NULL)
++ set_phys_to_machine(pfn, mfn);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(m2p_remove_override);
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 9955a53..c864add 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4369,8 +4369,14 @@ out_unreg_blkdev:
+ out_put_disk:
+ while (dr--) {
+ del_timer_sync(&motor_off_timer[dr]);
+- if (disks[dr]->queue)
++ if (disks[dr]->queue) {
+ blk_cleanup_queue(disks[dr]->queue);
++ /*
++ * put_disk() is not paired with add_disk() and
++ * will put queue reference one extra time. fix it.
++ */
++ disks[dr]->queue = NULL;
++ }
+ put_disk(disks[dr]);
+ }
+ return err;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index e46f2f7..650a308 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -20,8 +20,6 @@ struct workqueue_struct *virtblk_wq;
+
+ struct virtio_blk
+ {
+- spinlock_t lock;
+-
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+
+@@ -62,7 +60,7 @@ static void blk_done(struct virtqueue *vq)
+ unsigned int len;
+ unsigned long flags;
+
+- spin_lock_irqsave(&vblk->lock, flags);
++ spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
+ int error;
+
+@@ -97,7 +95,7 @@ static void blk_done(struct virtqueue *vq)
+ }
+ /* In case queue is stopped waiting for more buffers. */
+ blk_start_queue(vblk->disk->queue);
+- spin_unlock_irqrestore(&vblk->lock, flags);
++ spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
+ }
+
+ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+@@ -384,7 +382,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ }
+
+ INIT_LIST_HEAD(&vblk->reqs);
+- spin_lock_init(&vblk->lock);
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+ sg_init_table(vblk->sg, vblk->sg_elems);
+@@ -410,7 +407,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ goto out_mempool;
+ }
+
+- q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
++ q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
+ if (!q) {
+ err = -ENOMEM;
+ goto out_put_disk;
+diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
+index 5c0d96a..b12ffea 100644
+--- a/drivers/char/mspec.c
++++ b/drivers/char/mspec.c
+@@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ vdata->flags = flags;
+ vdata->type = type;
+ spin_lock_init(&vdata->lock);
+- vdata->refcnt = ATOMIC_INIT(1);
++ atomic_set(&vdata->refcnt, 1);
+ vma->vm_private_data = vdata;
+
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 6035ab8..631d4f6 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -125,21 +125,26 @@
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
++ * void add_device_randomness(const void *buf, unsigned int size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+- * void add_interrupt_randomness(int irq);
++ * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
++ * add_device_randomness() is for adding data to the random pool that
++ * is likely to differ between two devices (or possibly even per boot).
++ * This would be things like MAC addresses or serial numbers, or the
++ * read-out of the RTC. This does *not* add any actual entropy to the
++ * pool, but it initializes the pool to different values for devices
++ * that might otherwise be identical and have very little entropy
++ * available to them (particularly common in the embedded world).
++ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+- * add_interrupt_randomness() uses the inter-interrupt timing as random
+- * inputs to the entropy pool. Note that not all interrupts are good
+- * sources of randomness! For example, the timer interrupts is not a
+- * good choice, because the periodicity of the interrupts is too
+- * regular, and hence predictable to an attacker. Network Interface
+- * Controller interrupts are a better measure, since the timing of the
+- * NIC interrupts are more unpredictable.
++ * add_interrupt_randomness() uses the interrupt timing as random
++ * inputs to the entropy pool. Using the cycle counters and the irq source
++ * as inputs, it feeds the randomness roughly once a second.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+@@ -248,6 +253,8 @@
+ #include <linux/percpu.h>
+ #include <linux/cryptohash.h>
+ #include <linux/fips.h>
++#include <linux/ptrace.h>
++#include <linux/kmemcheck.h>
+
+ #ifdef CONFIG_GENERIC_HARDIRQS
+ # include <linux/irq.h>
+@@ -256,6 +263,7 @@
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+ #include <asm/irq.h>
++#include <asm/irq_regs.h>
+ #include <asm/io.h>
+
+ /*
+@@ -266,6 +274,8 @@
+ #define SEC_XFER_SIZE 512
+ #define EXTRACT_SIZE 10
+
++#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
++
+ /*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random. Should be enough to do a significant reseed.
+@@ -420,8 +430,10 @@ struct entropy_store {
+ /* read-write data: */
+ spinlock_t lock;
+ unsigned add_ptr;
++ unsigned input_rotate;
+ int entropy_count;
+- int input_rotate;
++ int entropy_total;
++ unsigned int initialized:1;
+ __u8 last_data[EXTRACT_SIZE];
+ };
+
+@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_pool = {
+ .pool = nonblocking_pool_data
+ };
+
++static __u32 const twist_table[8] = {
++ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
++ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++
+ /*
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_pool = {
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+- int nbytes, __u8 out[64])
++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
+ {
+- static __u32 const twist_table[8] = {
+- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+- unsigned long flags;
+
+- /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
+
+- spin_lock_irqsave(&r->lock, flags);
+- input_rotate = r->input_rotate;
+- i = r->add_ptr;
++ smp_rmb();
++ input_rotate = ACCESS_ONCE(r->input_rotate);
++ i = ACCESS_ONCE(r->add_ptr);
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+@@ -513,19 +524,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ input_rotate += i ? 7 : 14;
+ }
+
+- r->input_rotate = input_rotate;
+- r->add_ptr = i;
++ ACCESS_ONCE(r->input_rotate) = input_rotate;
++ ACCESS_ONCE(r->add_ptr) = i;
++ smp_wmb();
+
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
++}
++
++static void mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
++{
++ unsigned long flags;
+
++ spin_lock_irqsave(&r->lock, flags);
++ __mix_pool_bytes(r, in, nbytes, out);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
++struct fast_pool {
++ __u32 pool[4];
++ unsigned long last;
++ unsigned short count;
++ unsigned char rotate;
++ unsigned char last_timer_intr;
++};
++
++/*
++ * This is a fast mixing routine used by the interrupt randomness
++ * collector. It's hardcoded for an 128 bit pool and assumes that any
++ * locks that might be needed are taken by the caller.
++ */
++static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+ {
+- mix_pool_bytes_extract(r, in, bytes, NULL);
++ const char *bytes = in;
++ __u32 w;
++ unsigned i = f->count;
++ unsigned input_rotate = f->rotate;
++
++ while (nbytes--) {
++ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
++ f->pool[(i + 1) & 3];
++ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate += (i++ & 3) ? 7 : 14;
++ }
++ f->count = i;
++ f->rotate = input_rotate;
+ }
+
+ /*
+@@ -533,30 +578,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+ */
+ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ {
+- unsigned long flags;
+- int entropy_count;
++ int entropy_count, orig;
+
+ if (!nbits)
+ return;
+
+- spin_lock_irqsave(&r->lock, flags);
+-
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+- entropy_count = r->entropy_count;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count += nbits;
+ if (entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
+ entropy_count = 0;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
+- r->entropy_count = entropy_count;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++
++ if (!r->initialized && nbits > 0) {
++ r->entropy_total += nbits;
++ if (r->entropy_total > 128)
++ r->initialized = 1;
++ }
+
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+- spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+ /*********************************************************************
+@@ -609,6 +658,25 @@ static void set_timer_rand_state(unsigned int irq,
+ }
+ #endif
+
++/*
++ * Add device- or boot-specific data to the input and nonblocking
++ * pools to help initialize them to unique values.
++ *
++ * None of this adds any entropy, it is meant to avoid the
++ * problem of the nonblocking pool having similar initial state
++ * across largely identical devices.
++ */
++void add_device_randomness(const void *buf, unsigned int size)
++{
++ unsigned long time = get_cycles() ^ jiffies;
++
++ mix_pool_bytes(&input_pool, buf, size, NULL);
++ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
++ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
++ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
++}
++EXPORT_SYMBOL(add_device_randomness);
++
+ static struct timer_rand_state input_timer_state;
+
+ /*
+@@ -624,8 +692,8 @@ static struct timer_rand_state input_timer_state;
+ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ {
+ struct {
+- cycles_t cycles;
+ long jiffies;
++ unsigned cycles;
+ unsigned num;
+ } sample;
+ long delta, delta2, delta3;
+@@ -639,7 +707,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
++ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+@@ -696,17 +764,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
+ }
+ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+-void add_interrupt_randomness(int irq)
++static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
++
++void add_interrupt_randomness(int irq, int irq_flags)
+ {
+- struct timer_rand_state *state;
++ struct entropy_store *r;
++ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
++ struct pt_regs *regs = get_irq_regs();
++ unsigned long now = jiffies;
++ __u32 input[4], cycles = get_cycles();
++
++ input[0] = cycles ^ jiffies;
++ input[1] = irq;
++ if (regs) {
++ __u64 ip = instruction_pointer(regs);
++ input[2] = ip;
++ input[3] = ip >> 32;
++ }
+
+- state = get_timer_rand_state(irq);
++ fast_mix(fast_pool, input, sizeof(input));
+
+- if (state == NULL)
++ if ((fast_pool->count & 1023) &&
++ !time_after(now, fast_pool->last + HZ))
+ return;
+
+- DEBUG_ENT("irq event %d\n", irq);
+- add_timer_randomness(state, 0x100 + irq);
++ fast_pool->last = now;
++
++ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++ /*
++ * If we don't have a valid cycle counter, and we see
++ * back-to-back timer interrupts, then skip giving credit for
++ * any entropy.
++ */
++ if (cycles == 0) {
++ if (irq_flags & __IRQF_TIMER) {
++ if (fast_pool->last_timer_intr)
++ return;
++ fast_pool->last_timer_intr = 1;
++ } else
++ fast_pool->last_timer_intr = 0;
++ }
++ credit_entropy_bits(r, 1);
+ }
+
+ #ifdef CONFIG_BLOCK
+@@ -738,7 +837,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ */
+ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
++ __u32 tmp[OUTPUT_POOL_WORDS];
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+@@ -757,7 +856,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+- mix_pool_bytes(r, tmp, bytes);
++ mix_pool_bytes(r, tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
+ }
+ }
+@@ -816,13 +915,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ static void extract_buf(struct entropy_store *r, __u8 *out)
+ {
+ int i;
+- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
++ union {
++ __u32 w[5];
++ unsigned long l[LONGS(EXTRACT_SIZE)];
++ } hash;
++ __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
++ unsigned long flags;
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+- sha_init(hash);
++ sha_init(hash.w);
++ spin_lock_irqsave(&r->lock, flags);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
++ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+
+ /*
+ * We mix the hash back into the pool to prevent backtracking
+@@ -833,13 +938,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
+ */
+- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
++ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
++ spin_unlock_irqrestore(&r->lock, flags);
+
+ /*
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
+ */
+- sha_transform(hash, extract, workspace);
++ sha_transform(hash.w, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
+
+@@ -848,19 +954,30 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
+ */
+- hash[0] ^= hash[3];
+- hash[1] ^= hash[4];
+- hash[2] ^= rol32(hash[2], 16);
+- memcpy(out, hash, EXTRACT_SIZE);
+- memset(hash, 0, sizeof(hash));
++ hash.w[0] ^= hash.w[3];
++ hash.w[1] ^= hash.w[4];
++ hash.w[2] ^= rol32(hash.w[2], 16);
++
++ /*
++ * If we have a architectural hardware random number
++ * generator, mix that in, too.
++ */
++ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
++ unsigned long v;
++ if (!arch_get_random_long(&v))
++ break;
++ hash.l[i] ^= v;
++ }
++
++ memcpy(out, &hash, EXTRACT_SIZE);
++ memset(&hash, 0, sizeof(hash));
+ }
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+- size_t nbytes, int min, int reserved)
++ size_t nbytes, int min, int reserved)
+ {
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+- unsigned long flags;
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+@@ -869,6 +986,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ extract_buf(r, tmp);
+
+ if (fips_enabled) {
++ unsigned long flags;
++
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+@@ -927,17 +1046,34 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+
+ /*
+ * This function is the exported kernel interface. It returns some
+- * number of good random numbers, suitable for seeding TCP sequence
+- * numbers, etc.
++ * number of good random numbers, suitable for key generation, seeding
++ * TCP sequence numbers, etc. It does not use the hw random number
++ * generator, if available; use get_random_bytes_arch() for that.
+ */
+ void get_random_bytes(void *buf, int nbytes)
+ {
++ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
++}
++EXPORT_SYMBOL(get_random_bytes);
++
++/*
++ * This function will use the architecture-specific hardware random
++ * number generator if it is available. The arch-specific hw RNG will
++ * almost certainly be faster than what we can do in software, but it
++ * is impossible to verify that it is implemented securely (as
++ * opposed, to, say, the AES encryption of a sequence number using a
++ * key known by the NSA). So it's useful if we need the speed, but
++ * only if we're willing to trust the hardware manufacturer not to
++ * have put in a back door.
++ */
++void get_random_bytes_arch(void *buf, int nbytes)
++{
+ char *p = buf;
+
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+-
++
+ if (!arch_get_random_long(&v))
+ break;
+
+@@ -946,9 +1082,11 @@ void get_random_bytes(void *buf, int nbytes)
+ nbytes -= chunk;
+ }
+
+- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
++ if (nbytes)
++ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ }
+-EXPORT_SYMBOL(get_random_bytes);
++EXPORT_SYMBOL(get_random_bytes_arch);
++
+
+ /*
+ * init_std_data - initialize pool with system data
+@@ -961,16 +1099,19 @@ EXPORT_SYMBOL(get_random_bytes);
+ */
+ static void init_std_data(struct entropy_store *r)
+ {
+- ktime_t now;
+- unsigned long flags;
++ int i;
++ ktime_t now = ktime_get_real();
++ unsigned long rv;
+
+- spin_lock_irqsave(&r->lock, flags);
+ r->entropy_count = 0;
+- spin_unlock_irqrestore(&r->lock, flags);
+-
+- now = ktime_get_real();
+- mix_pool_bytes(r, &now, sizeof(now));
+- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
++ r->entropy_total = 0;
++ mix_pool_bytes(r, &now, sizeof(now), NULL);
++ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
++ if (!arch_get_random_long(&rv))
++ break;
++ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
++ }
++ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
+ static int rand_initialize(void)
+@@ -1107,7 +1248,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ count -= bytes;
+ p += bytes;
+
+- mix_pool_bytes(r, buf, bytes);
++ mix_pool_bytes(r, buf, bytes, NULL);
+ cond_resched();
+ }
+
+diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
+index 51e0e2d..a330492 100644
+--- a/drivers/firmware/pcdp.c
++++ b/drivers/firmware/pcdp.c
+@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+- pcdp = ioremap(efi.hcdp, 4096);
++ pcdp = early_ioremap(efi.hcdp, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
+
+ if (strstr(cmdline, "console=hcdp")) {
+@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
+ }
+
+ out:
+- iounmap(pcdp);
++ early_iounmap(pcdp, 4096);
+ return rc;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index d4c4937..fae2050 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -708,8 +708,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+
+ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+- for (clock = 0; clock <= max_clock; clock++) {
++ for (clock = 0; clock <= max_clock; clock++) {
++ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (intel_dp_link_required(mode->clock, bpp)
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index a6dcd18..96532bc 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -40,11 +40,28 @@
+ * Note that newer firmware allows querying device for maximum useable
+ * coordinates.
+ */
++#define XMIN 0
++#define XMAX 6143
++#define YMIN 0
++#define YMAX 6143
+ #define XMIN_NOMINAL 1472
+ #define XMAX_NOMINAL 5472
+ #define YMIN_NOMINAL 1408
+ #define YMAX_NOMINAL 4448
+
++/* Size in bits of absolute position values reported by the hardware */
++#define ABS_POS_BITS 13
++
++/*
++ * Any position values from the hardware above the following limits are
++ * treated as "wrapped around negative" values that have been truncated to
++ * the 13-bit reporting range of the hardware. These are just reasonable
++ * guesses and can be adjusted if hardware is found that operates outside
++ * of these parameters.
++ */
++#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
++#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
++
+ /*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+ * opposite from what userspace expects.
+@@ -544,6 +561,12 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+ }
+
++ /* Convert wrap-around values to negative */
++ if (hw->x > X_MAX_POSITIVE)
++ hw->x -= 1 << ABS_POS_BITS;
++ if (hw->y > Y_MAX_POSITIVE)
++ hw->y -= 1 << ABS_POS_BITS;
++
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 532a902..d432032 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -19,7 +19,7 @@
+ /*
+ * Tunable constants
+ */
+-#define ENDIO_HOOK_POOL_SIZE 10240
++#define ENDIO_HOOK_POOL_SIZE 1024
+ #define DEFERRED_SET_SIZE 64
+ #define MAPPING_POOL_SIZE 1024
+ #define PRISON_CELLS 1024
+@@ -857,7 +857,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+
+ if (m->err) {
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -869,7 +869,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ if (r) {
+ DMERR("dm_thin_insert_block() failed");
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -884,6 +884,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ } else
+ cell_defer(tc, m->cell, m->data_block);
+
++out:
+ list_del(&m->list);
+ mempool_free(m, tc->pool->mapping_pool);
+ }
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 2d97bf0..62306e5 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2321,7 +2321,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
+ /* There is nowhere to write, so all non-sync
+ * drives must be failed - so we are finished
+ */
+- sector_t rv = max_sector - sector_nr;
++ sector_t rv;
++ if (min_bad > 0)
++ max_sector = sector_nr + min_bad;
++ rv = max_sector - sector_nr;
+ *skipped = 1;
+ put_buf(r1_bio);
+ return rv;
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index ed77c6d..5327061 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ spin_lock_init(&dev->hw_lock);
+
++ dev->hw_io = pnp_port_start(pnp_dev, 0);
++
+ pnp_set_drvdata(pnp_dev, dev);
+ dev->pnp_dev = pnp_dev;
+
+@@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ /* claim the resources */
+ error = -EBUSY;
+- dev->hw_io = pnp_port_start(pnp_dev, 0);
+ if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+ dev->hw_io = -1;
+ dev->irq = -1;
+diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
+index 60107ee..4eec7b7 100644
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
+ u32 fatevent;
+ int err;
+
+- add_interrupt_randomness(irq);
+-
+ err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
+ event_regs, 3);
+ if (err)
+diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
+index f742745..b90f3e0 100644
+--- a/drivers/mfd/wm831x-otp.c
++++ b/drivers/mfd/wm831x-otp.c
+@@ -18,6 +18,7 @@
+ #include <linux/bcd.h>
+ #include <linux/delay.h>
+ #include <linux/mfd/core.h>
++#include <linux/random.h>
+
+ #include <linux/mfd/wm831x/core.h>
+ #include <linux/mfd/wm831x/otp.h>
+@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
+
+ int wm831x_otp_init(struct wm831x *wm831x)
+ {
++ char uuid[WM831X_UNIQUE_ID_LEN];
+ int ret;
+
+ ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
+@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
+ dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
+ ret);
+
++ ret = wm831x_unique_id_read(wm831x, uuid);
++ if (ret == 0)
++ add_device_randomness(uuid, sizeof(uuid));
++ else
++ dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index bdf960b..ae7528b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -925,6 +925,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0411, 0x015d) },
+ { USB_DEVICE(0x0411, 0x016f) },
+ { USB_DEVICE(0x0411, 0x01a2) },
++ { USB_DEVICE(0x0411, 0x01ee) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x002f) },
+ { USB_DEVICE(0x07aa, 0x003c) },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index d1049ee..26fba2d 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -1431,14 +1431,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
+ */
+ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS;
+- else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
++ else
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
+
+- if (!asus->dsts_id) {
+- pr_err("Can't find DSTS");
+- return -ENODEV;
+- }
+-
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->wapf >= 0)
+diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
+index bdc909b..f3c2110 100644
+--- a/drivers/rtc/rtc-wm831x.c
++++ b/drivers/rtc/rtc-wm831x.c
+@@ -24,7 +24,7 @@
+ #include <linux/mfd/wm831x/core.h>
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+-
++#include <linux/random.h>
+
+ /*
+ * R16416 (0x4020) - RTC Write Counter
+@@ -96,6 +96,26 @@ struct wm831x_rtc {
+ unsigned int alarm_enabled:1;
+ };
+
++static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
++{
++ int ret;
++ u16 reg;
++
++ /*
++ * The write counter contains a pseudo-random number which is
++ * regenerated every time we set the RTC so it should be a
++ * useful per-system source of entropy.
++ */
++ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
++ if (ret >= 0) {
++ reg = ret;
++ add_device_randomness(&reg, sizeof(reg));
++ } else {
++ dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
++ ret);
++ }
++}
++
+ /*
+ * Read current time and date in RTC
+ */
+@@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
+ alm_irq, ret);
+ }
+
++ wm831x_rtc_add_randomness(wm831x);
++
+ return 0;
+
+ err:
+diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
+index 6903d39..90e9e32 100644
+--- a/drivers/staging/media/lirc/lirc_sir.c
++++ b/drivers/staging/media/lirc/lirc_sir.c
+@@ -53,6 +53,7 @@
+ #include <linux/io.h>
+ #include <asm/irq.h>
+ #include <linux/fcntl.h>
++#include <linux/platform_device.h>
+ #ifdef LIRC_ON_SA1100
+ #include <asm/hardware.h>
+ #ifdef CONFIG_SA1100_COLLIE
+@@ -488,9 +489,11 @@ static struct lirc_driver driver = {
+ .owner = THIS_MODULE,
+ };
+
++static struct platform_device *lirc_sir_dev;
+
+ static int init_chrdev(void)
+ {
++ driver.dev = &lirc_sir_dev->dev;
+ driver.minor = lirc_register_driver(&driver);
+ if (driver.minor < 0) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
+@@ -1216,20 +1219,71 @@ static int init_lirc_sir(void)
+ return 0;
+ }
+
++static int __devinit lirc_sir_probe(struct platform_device *dev)
++{
++ return 0;
++}
++
++static int __devexit lirc_sir_remove(struct platform_device *dev)
++{
++ return 0;
++}
++
++static struct platform_driver lirc_sir_driver = {
++ .probe = lirc_sir_probe,
++ .remove = __devexit_p(lirc_sir_remove),
++ .driver = {
++ .name = "lirc_sir",
++ .owner = THIS_MODULE,
++ },
++};
+
+ static int __init lirc_sir_init(void)
+ {
+ int retval;
+
++ retval = platform_driver_register(&lirc_sir_driver);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
++ "failed!\n");
++ return -ENODEV;
++ }
++
++ lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
++ if (!lirc_sir_dev) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
++ "failed!\n");
++ retval = -ENOMEM;
++ goto pdev_alloc_fail;
++ }
++
++ retval = platform_device_add(lirc_sir_dev);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
++ "failed!\n");
++ retval = -ENODEV;
++ goto pdev_add_fail;
++ }
++
+ retval = init_chrdev();
+ if (retval < 0)
+- return retval;
++ goto fail;
++
+ retval = init_lirc_sir();
+ if (retval) {
+ drop_chrdev();
+- return retval;
++ goto fail;
+ }
++
+ return 0;
++
++fail:
++ platform_device_del(lirc_sir_dev);
++pdev_add_fail:
++ platform_device_put(lirc_sir_dev);
++pdev_alloc_fail:
++ platform_driver_unregister(&lirc_sir_driver);
++ return retval;
+ }
+
+ static void __exit lirc_sir_exit(void)
+@@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void)
+ drop_hardware();
+ drop_chrdev();
+ drop_port();
++ platform_device_unregister(lirc_sir_dev);
++ platform_driver_unregister(&lirc_sir_driver);
+ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
+ }
+
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index a4b192d..08b92a6 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -660,7 +660,8 @@ static void pch_dma_rx_complete(void *arg)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ async_tx_ack(priv->desc_rx);
+- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ }
+
+ static void pch_dma_tx_complete(void *arg)
+@@ -715,7 +716,8 @@ static int handle_rx_to(struct eg20t_port *priv)
+ int rx_size;
+ int ret;
+ if (!priv->start_rx) {
+- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ return 0;
+ }
+ buf = &priv->rxbuf;
+@@ -977,11 +979,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ case PCH_UART_IID_RDR: /* Received Data Ready */
+ if (priv->use_dma) {
+ pch_uart_hal_disable_interrupt(priv,
+- PCH_UART_HAL_RX_INT);
++ PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ ret = dma_handle_rx(priv);
+ if (!ret)
+ pch_uart_hal_enable_interrupt(priv,
+- PCH_UART_HAL_RX_INT);
++ PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ } else {
+ ret = handle_rx(priv);
+ }
+@@ -1107,7 +1111,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+ priv->start_rx = 0;
+- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ priv->int_dis_flag = 1;
+ }
+
+@@ -1163,6 +1168,7 @@ static int pch_uart_startup(struct uart_port *port)
+ break;
+ case 16:
+ fifo_size = PCH_UART_HAL_FIFO16;
++ break;
+ case 1:
+ default:
+ fifo_size = PCH_UART_HAL_FIFO_DIS;
+@@ -1200,7 +1206,8 @@ static int pch_uart_startup(struct uart_port *port)
+ pch_request_dma(port);
+
+ priv->start_rx = 1;
+- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ uart_update_timeout(port, CS8, default_baud);
+
+ return 0;
+@@ -1258,7 +1265,7 @@ static void pch_uart_set_termios(struct uart_port *port,
+ stb = PCH_UART_HAL_STB1;
+
+ if (termios->c_cflag & PARENB) {
+- if (!(termios->c_cflag & PARODD))
++ if (termios->c_cflag & PARODD)
+ parity = PCH_UART_HAL_PARITY_ODD;
+ else
+ parity = PCH_UART_HAL_PARITY_EVEN;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 175b6bb..52340cc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -24,6 +24,7 @@
+ #include <linux/kthread.h>
+ #include <linux/mutex.h>
+ #include <linux/freezer.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -1897,6 +1898,14 @@ int usb_new_device(struct usb_device *udev)
+ /* Tell the world! */
+ announce_device(udev);
+
++ if (udev->serial)
++ add_device_randomness(udev->serial, strlen(udev->serial));
++ if (udev->product)
++ add_device_randomness(udev->product, strlen(udev->product));
++ if (udev->manufacturer)
++ add_device_randomness(udev->manufacturer,
++ strlen(udev->manufacturer));
++
+ device_enable_async_suspend(&udev->dev);
+ /* Register the device. The device driver is responsible
+ * for configuring the device and invoking the add-device
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 1fc8f12..347bb05 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
+ writel(FLAG_CF, &ehci_regs->configured_flag);
+
+ /* Wait until the controller is no longer halted */
+- loop = 10;
++ loop = 1000;
+ do {
+ status = readl(&ehci_regs->status);
+ if (!(status & STS_HALT))
+diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
+index aaccffa..dd9533a 100644
+--- a/drivers/video/smscufx.c
++++ b/drivers/video/smscufx.c
+@@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 24a49d4..1585db1 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
+ bio->bi_rw |= REQ_WRITE;
+ }
+
+- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
+- bio, per_dev->length);
++ osd_req_write(or, _ios_obj(ios, cur_comp),
++ per_dev->offset, bio, per_dev->length);
+ ORE_DBGMSG("write(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(per_dev->offset),
+ _LLU(per_dev->length), dev);
+ } else if (ios->kern_buff) {
+@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
+ (ios->si.unit_off + ios->length >
+ ios->layout->stripe_unit));
+
+- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
++ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
+ per_dev->offset,
+ ios->kern_buff, ios->length);
+ if (unlikely(ret))
+ goto out;
+ ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(per_dev->offset),
+ _LLU(ios->length), per_dev->dev);
+ } else {
+- osd_req_set_attributes(or, _ios_obj(ios, dev));
++ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
+ ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ ios->out_attr_len, dev);
+ }
+
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index c43a452..961e562 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -452,8 +452,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+
+ dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+
+- /* Only do I/O if gfp is a superset of GFP_KERNEL */
+- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
++ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
++ * doing this memory reclaim for a fs-related allocation.
++ */
++ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
++ !(current->flags & PF_FSTRANS)) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 9cfa60a..87a1746 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2236,7 +2236,7 @@ out_acl:
+ if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
+ if ((buflen -= 4) < 0)
+ goto out_resource;
+- WRITE32(1);
++ WRITE32(0);
+ }
+ if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
+ if ((buflen -= 4) < 0)
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index ac258be..c598cfb 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
+ if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
+ goto out;
+
+- down_read(&inode->i_sb->s_umount);
++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
+ nilfs_transaction_begin(inode->i_sb, &ti, 0);
+ ret = nilfs_cpfile_change_cpmode(
+@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+
+- up_read(&inode->i_sb->s_umount);
++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
+ out:
+ mnt_drop_write(filp->f_path.mnt);
+ return ret;
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 8351c44..97bfbdd 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ struct nilfs_root *root;
+ int ret;
+
++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
++
+ down_read(&nilfs->ns_segctor_sem);
+ ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
+ up_read(&nilfs->ns_segctor_sem);
+@@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ ret = nilfs_get_root_dentry(s, root, root_dentry);
+ nilfs_put_root(root);
+ out:
++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
+ return ret;
+ }
+
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 35a8970..1c98f53 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
+ nilfs->ns_bdev = bdev;
+ atomic_set(&nilfs->ns_ndirtyblks, 0);
+ init_rwsem(&nilfs->ns_sem);
++ mutex_init(&nilfs->ns_snapshot_mount_mutex);
+ INIT_LIST_HEAD(&nilfs->ns_dirty_files);
+ INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
+ spin_lock_init(&nilfs->ns_inode_lock);
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index 9992b11..de7435f 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -47,6 +47,7 @@ enum {
+ * @ns_flags: flags
+ * @ns_bdev: block device
+ * @ns_sem: semaphore for shared states
++ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
+ * @ns_sbh: buffer heads of on-disk super blocks
+ * @ns_sbp: pointers to super block data
+ * @ns_sbwtime: previous write time of super block
+@@ -99,6 +100,7 @@ struct the_nilfs {
+
+ struct block_device *ns_bdev;
+ struct rw_semaphore ns_sem;
++ struct mutex ns_snapshot_mount_mutex;
+
+ /*
+ * used for
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index c5ed2f1..a2227f7 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -41,6 +41,9 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
+ unsigned long *, int *, int, unsigned int flags);
+ void unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
++void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page);
+ void __unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
+ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
+@@ -99,6 +102,13 @@ static inline unsigned long hugetlb_total_pages(void)
+ #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+ #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
+ #define unmap_hugepage_range(vma, start, end, page) BUG()
++static inline void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page)
++{
++ BUG();
++}
++
+ static inline void hugetlb_report_meminfo(struct seq_file *m)
+ {
+ }
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index df53fdf..cdde2b3 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -124,8 +124,17 @@ extern struct group_info init_groups;
+
+ extern struct cred init_cred;
+
++extern struct task_group root_task_group;
++
++#ifdef CONFIG_CGROUP_SCHED
++# define INIT_CGROUP_SCHED(tsk) \
++ .sched_task_group = &root_task_group,
++#else
++# define INIT_CGROUP_SCHED(tsk)
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+-# define INIT_PERF_EVENTS(tsk) \
++# define INIT_PERF_EVENTS(tsk) \
+ .perf_event_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
+ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
+@@ -162,6 +171,7 @@ extern struct cred init_cred;
+ }, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
++ INIT_CGROUP_SCHED(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 8f74538..29e217a 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -50,11 +50,13 @@ struct rnd_state {
+
+ extern void rand_initialize_irq(int irq);
+
++extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq);
++extern void add_interrupt_randomness(int irq, int irq_flags);
+
+ extern void get_random_bytes(void *buf, int nbytes);
++extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+
+ #ifndef MODULE
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index d336c35..1e86bb4 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1236,6 +1236,9 @@ struct task_struct {
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#ifdef CONFIG_CGROUP_SCHED
++ struct task_group *sched_task_group;
++#endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+@@ -2646,7 +2649,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
+ extern long sched_group_rt_period(struct task_group *tg);
+ extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
+ #endif
+-#endif
++#endif /* CONFIG_CGROUP_SCHED */
+
+ extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 866c9d5..80fb1c6 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
+ * @uaddr2: the pi futex we will take prior to returning to user-space
+ *
+ * The caller will wait on uaddr and will be requeued by futex_requeue() to
+- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
+- * complete the acquisition of the rt_mutex prior to returning to userspace.
+- * This ensures the rt_mutex maintains an owner when it has waiters; without
+- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
+- * need to.
++ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
++ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
++ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
++ * without one, the pi logic would not know which task to boost/deboost, if
++ * there was a need to.
+ *
+ * We call schedule in futex_wait_queue_me() when we enqueue and return there
+ * via the following:
+@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ struct futex_q q = futex_q_init;
+ int res, ret;
+
++ if (uaddr == uaddr2)
++ return -EINVAL;
++
+ if (!bitset)
+ return -EINVAL;
+
+@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor
+ * the pi_state.
+ */
+- WARN_ON(!&q.pi_state);
++ WARN_ON(!q.pi_state);
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * fault, unlock the rt_mutex and return the fault to userspace.
+ */
+ if (ret == -EFAULT) {
+- if (rt_mutex_owner(pi_mutex) == current)
++ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+ } else if (ret == -EINTR) {
+ /*
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index 470d08c..10e0772 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -117,7 +117,7 @@ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
+ irqreturn_t retval = IRQ_NONE;
+- unsigned int random = 0, irq = desc->irq_data.irq;
++ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+ do {
+ irqreturn_t res;
+@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
+- random |= action->flags;
++ flags |= action->flags;
+ break;
+
+ default:
+@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ action = action->next;
+ } while (action);
+
+- if (random & IRQF_SAMPLE_RANDOM)
+- add_interrupt_randomness(irq);
++ add_interrupt_randomness(irq, flags);
+
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 9cd8ca7..e0431c4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -746,22 +746,19 @@ static inline int cpu_of(struct rq *rq)
+ /*
+ * Return the group to which this tasks belongs.
+ *
+- * We use task_subsys_state_check() and extend the RCU verification with
+- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+- * task it moves into the cgroup. Therefore by holding either of those locks,
+- * we pin the task to the current cgroup.
++ * We cannot use task_subsys_state() and friends because the cgroup
++ * subsystem changes that value before the cgroup_subsys::attach() method
++ * is called, therefore we cannot pin it and might observe the wrong value.
++ *
++ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
++ * core changes this before calling sched_move_task().
++ *
++ * Instead we use a 'copy' which is updated from sched_move_task() while
++ * holding both task_struct::pi_lock and rq::lock.
+ */
+ static inline struct task_group *task_group(struct task_struct *p)
+ {
+- struct task_group *tg;
+- struct cgroup_subsys_state *css;
+-
+- css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+- lockdep_is_held(&p->pi_lock) ||
+- lockdep_is_held(&task_rq(p)->lock));
+- tg = container_of(css, struct task_group, css);
+-
+- return autogroup_task_group(p, tg);
++ return p->sched_task_group;
+ }
+
+ /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+@@ -2372,7 +2369,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+ *
+ * sched_move_task() holds both and thus holding either pins the cgroup,
+- * see set_task_rq().
++ * see task_group().
+ *
+ * Furthermore, all task_rq users should acquire both locks, see
+ * task_rq_lock().
+@@ -8952,6 +8949,7 @@ void sched_destroy_group(struct task_group *tg)
+ */
+ void sched_move_task(struct task_struct *tsk)
+ {
++ struct task_group *tg;
+ int on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+@@ -8966,6 +8964,12 @@ void sched_move_task(struct task_struct *tsk)
+ if (unlikely(running))
+ tsk->sched_class->put_prev_task(rq, tsk);
+
++ tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
++ lockdep_is_held(&tsk->sighand->siglock)),
++ struct task_group, css);
++ tg = autogroup_task_group(tsk, tg);
++ tsk->sched_task_group = tg;
++
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 993599e..d74c317 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -886,7 +886,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ * %pK cannot be used in IRQ context because its test
+ * for CAP_SYSLOG would be meaningless.
+ */
+- if (in_irq() || in_serving_softirq() || in_nmi()) {
++ if (kptr_restrict && (in_irq() || in_serving_softirq() ||
++ in_nmi())) {
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
+ return string(buf, end, "pK-error", spec);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index b1e1bad..0f897b8 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2382,6 +2382,25 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ }
+ }
+
++void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page)
++{
++ __unmap_hugepage_range(vma, start, end, ref_page);
++
++ /*
++ * Clear this flag so that x86's huge_pmd_share page_table_shareable
++ * test will fail on a vma being torn down, and not grab a page table
++ * on its way out. We're lucky that the flag has such an appropriate
++ * name, and can in fact be safely cleared here. We could clear it
++ * before the __unmap_hugepage_range above, but all that's necessary
++ * is to clear it before releasing the i_mmap_mutex. This works
++ * because in the context this is called, the VMA is about to be
++ * destroyed and the i_mmap_mutex is held.
++ */
++ vma->vm_flags &= ~VM_MAYSHARE;
++}
++
+ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+ {
+@@ -2939,9 +2958,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
+ }
+ }
+ spin_unlock(&mm->page_table_lock);
+- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+-
++ /*
++ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
++ * may have cleared our pud entry and done put_page on the page table:
++ * once we release i_mmap_mutex, another task can do the final put_page
++ * and that page table be reused and filled with junk.
++ */
+ flush_tlb_range(vma, start, end);
++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ }
+
+ int hugetlb_reserve_pages(struct inode *inode,
+diff --git a/mm/internal.h b/mm/internal.h
+index 2189af4..0c26b5e 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -309,3 +309,5 @@ extern u64 hwpoison_filter_flags_mask;
+ extern u64 hwpoison_filter_flags_value;
+ extern u64 hwpoison_filter_memcg;
+ extern u32 hwpoison_filter_enable;
++
++extern void set_pageblock_order(void);
+diff --git a/mm/memory.c b/mm/memory.c
+index 1b1ca17..70f5daf 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1358,8 +1358,11 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
+ * Since no pte has actually been setup, it is
+ * safe to do nothing in this case.
+ */
+- if (vma->vm_file)
+- unmap_hugepage_range(vma, start, end, NULL);
++ if (vma->vm_file) {
++ mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
++ __unmap_hugepage_range_final(vma, start, end, NULL);
++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
++ }
+
+ start = end;
+ } else
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 9a611d3..862b608 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -33,6 +33,24 @@
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
++ struct hlist_node *n;
++
++ /*
++ * RCU here will block mmu_notifier_unregister until
++ * ->release returns.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
++ /*
++ * if ->release runs before mmu_notifier_unregister it
++ * must be handled as it's the only way for the driver
++ * to flush all existing sptes and stop the driver
++ * from establishing any more sptes before all the
++ * pages in the mm are freed.
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++ rcu_read_unlock();
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
+ * mmu_notifier_unregister to return.
+ */
+ hlist_del_init_rcu(&mn->hlist);
+- /*
+- * RCU here will block mmu_notifier_unregister until
+- * ->release returns.
+- */
+- rcu_read_lock();
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- rcu_read_unlock();
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- hlist_del_rcu(&mn->hlist);
+-
+ /*
+ * RCU here will force exit_mmap to wait ->release to finish
+ * before freeing the pages.
+ */
+ rcu_read_lock();
+- spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ /*
+ * exit_mmap will block in mmu_notifier_release to
+ * guarantee ->release is called before freeing the
+@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ rcu_read_unlock();
+- } else
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
++ hlist_del_rcu(&mn->hlist);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
++ }
+
+ /*
+ * Wait any running method to finish, of course including
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 065dbe8..6e51bf0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4281,25 +4281,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
+
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+-/* Return a sensible default order for the pageblock size. */
+-static inline int pageblock_default_order(void)
+-{
+- if (HPAGE_SHIFT > PAGE_SHIFT)
+- return HUGETLB_PAGE_ORDER;
+-
+- return MAX_ORDER-1;
+-}
+-
+ /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
+-static inline void __init set_pageblock_order(unsigned int order)
++void __init set_pageblock_order(void)
+ {
++ unsigned int order;
++
+ /* Check that pageblock_nr_pages has not already been setup */
+ if (pageblock_order)
+ return;
+
++ if (HPAGE_SHIFT > PAGE_SHIFT)
++ order = HUGETLB_PAGE_ORDER;
++ else
++ order = MAX_ORDER - 1;
++
+ /*
+ * Assume the largest contiguous order of interest is a huge page.
+- * This value may be variable depending on boot parameters on IA64
++ * This value may be variable depending on boot parameters on IA64 and
++ * powerpc.
+ */
+ pageblock_order = order;
+ }
+@@ -4307,15 +4306,13 @@ static inline void __init set_pageblock_order(unsigned int order)
+
+ /*
+ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
+- * and pageblock_default_order() are unused as pageblock_order is set
+- * at compile-time. See include/linux/pageblock-flags.h for the values of
+- * pageblock_order based on the kernel config
++ * is unused as pageblock_order is set at compile-time. See
++ * include/linux/pageblock-flags.h for the values of pageblock_order based on
++ * the kernel config
+ */
+-static inline int pageblock_default_order(unsigned int order)
++void __init set_pageblock_order(void)
+ {
+- return MAX_ORDER-1;
+ }
+-#define set_pageblock_order(x) do {} while (0)
+
+ #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+@@ -4403,7 +4400,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ if (!size)
+ continue;
+
+- set_pageblock_order(pageblock_default_order());
++ set_pageblock_order();
+ setup_usemap(pgdat, zone, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+diff --git a/mm/sparse.c b/mm/sparse.c
+index a8bc7d3..bf7d3cc 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -486,6 +486,9 @@ void __init sparse_init(void)
+ struct page **map_map;
+ #endif
+
++ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
++ set_pageblock_order();
++
+ /*
+ * map is using big page (aka 2M in x86 64 bit)
+ * usemap is less one page (aka 24 bytes)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5738654..4b18703 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1177,6 +1177,7 @@ static int __dev_open(struct net_device *dev)
+ net_dmaengine_get();
+ dev_set_rx_mode(dev);
+ dev_activate(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ return ret;
+@@ -4841,6 +4842,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+ err = ops->ndo_set_mac_address(dev, sa);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return err;
+ }
+ EXPORT_SYMBOL(dev_set_mac_address);
+@@ -5621,6 +5623,7 @@ int register_netdevice(struct net_device *dev)
+ dev_init_scheduler(dev);
+ dev_hold(dev);
+ list_netdevice(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+
+ /* Notify protocols, that a new device appeared. */
+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 7f36b38..b856f87 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -33,22 +33,19 @@
+ #define TRACE_ON 1
+ #define TRACE_OFF 0
+
+-static void send_dm_alert(struct work_struct *unused);
+-
+-
+ /*
+ * Globals, our netlink socket pointer
+ * and the work handle that will send up
+ * netlink alerts
+ */
+ static int trace_state = TRACE_OFF;
+-static DEFINE_SPINLOCK(trace_state_lock);
++static DEFINE_MUTEX(trace_state_mutex);
+
+ struct per_cpu_dm_data {
+- struct work_struct dm_alert_work;
+- struct sk_buff *skb;
+- atomic_t dm_hit_count;
+- struct timer_list send_timer;
++ spinlock_t lock;
++ struct sk_buff *skb;
++ struct work_struct dm_alert_work;
++ struct timer_list send_timer;
+ };
+
+ struct dm_hw_stat_delta {
+@@ -74,56 +71,59 @@ static int dm_delay = 1;
+ static unsigned long dm_hw_check_delta = 2*HZ;
+ static LIST_HEAD(hw_stats_list);
+
+-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
++static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ {
+ size_t al;
+ struct net_dm_alert_msg *msg;
+ struct nlattr *nla;
++ struct sk_buff *skb;
++ unsigned long flags;
+
+ al = sizeof(struct net_dm_alert_msg);
+ al += dm_hit_limit * sizeof(struct net_dm_drop_point);
+ al += sizeof(struct nlattr);
+
+- data->skb = genlmsg_new(al, GFP_KERNEL);
+- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
+- 0, NET_DM_CMD_ALERT);
+- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
+- msg = nla_data(nla);
+- memset(msg, 0, al);
+- atomic_set(&data->dm_hit_count, dm_hit_limit);
++ skb = genlmsg_new(al, GFP_KERNEL);
++
++ if (skb) {
++ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
++ 0, NET_DM_CMD_ALERT);
++ nla = nla_reserve(skb, NLA_UNSPEC,
++ sizeof(struct net_dm_alert_msg));
++ msg = nla_data(nla);
++ memset(msg, 0, al);
++ } else {
++ mod_timer(&data->send_timer, jiffies + HZ / 10);
++ }
++
++ spin_lock_irqsave(&data->lock, flags);
++ swap(data->skb, skb);
++ spin_unlock_irqrestore(&data->lock, flags);
++
++ return skb;
+ }
+
+-static void send_dm_alert(struct work_struct *unused)
++static void send_dm_alert(struct work_struct *work)
+ {
+ struct sk_buff *skb;
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data;
+
+- /*
+- * Grab the skb we're about to send
+- */
+- skb = data->skb;
++ data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
+
+- /*
+- * Replace it with a new one
+- */
+- reset_per_cpu_data(data);
+-
+- /*
+- * Ship it!
+- */
+- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
++ skb = reset_per_cpu_data(data);
+
++ if (skb)
++ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ }
+
+ /*
+ * This is the timer function to delay the sending of an alert
+ * in the event that more drops will arrive during the
+- * hysteresis period. Note that it operates under the timer interrupt
+- * so we don't need to disable preemption here
++ * hysteresis period.
+ */
+-static void sched_send_work(unsigned long unused)
++static void sched_send_work(unsigned long _data)
+ {
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+
+ schedule_work(&data->dm_alert_work);
+ }
+@@ -134,17 +134,19 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ struct nlmsghdr *nlh;
+ struct nlattr *nla;
+ int i;
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct sk_buff *dskb;
++ struct per_cpu_dm_data *data;
++ unsigned long flags;
+
++ local_irq_save(flags);
++ data = &__get_cpu_var(dm_cpu_data);
++ spin_lock(&data->lock);
++ dskb = data->skb;
+
+- if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
+- /*
+- * we're already at zero, discard this hit
+- */
++ if (!dskb)
+ goto out;
+- }
+
+- nlh = (struct nlmsghdr *)data->skb->data;
++ nlh = (struct nlmsghdr *)dskb->data;
+ nla = genlmsg_data(nlmsg_data(nlh));
+ msg = nla_data(nla);
+ for (i = 0; i < msg->entries; i++) {
+@@ -153,11 +155,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ goto out;
+ }
+ }
+-
++ if (msg->entries == dm_hit_limit)
++ goto out;
+ /*
+ * We need to create a new entry
+ */
+- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
++ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
+ nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
+ memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
+ msg->points[msg->entries].count = 1;
+@@ -165,11 +168,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+
+ if (!timer_pending(&data->send_timer)) {
+ data->send_timer.expires = jiffies + dm_delay * HZ;
+- add_timer_on(&data->send_timer, smp_processor_id());
++ add_timer(&data->send_timer);
+ }
+
+ out:
+- return;
++ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
+@@ -213,7 +216,7 @@ static int set_all_monitor_traces(int state)
+ struct dm_hw_stat_delta *new_stat = NULL;
+ struct dm_hw_stat_delta *temp;
+
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+
+ if (state == trace_state) {
+ rc = -EAGAIN;
+@@ -252,7 +255,7 @@ static int set_all_monitor_traces(int state)
+ rc = -EINPROGRESS;
+
+ out_unlock:
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+
+ return rc;
+ }
+@@ -295,12 +298,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+
+ new_stat->dev = dev;
+ new_stat->last_rx = jiffies;
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+ list_add_rcu(&new_stat->list, &hw_stats_list);
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+ break;
+ case NETDEV_UNREGISTER:
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+ list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
+ if (new_stat->dev == dev) {
+ new_stat->dev = NULL;
+@@ -311,7 +314,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+ }
+ }
+ }
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+ break;
+ }
+ out:
+@@ -367,13 +370,15 @@ static int __init init_net_drop_monitor(void)
+
+ for_each_present_cpu(cpu) {
+ data = &per_cpu(dm_cpu_data, cpu);
+- reset_per_cpu_data(data);
+ INIT_WORK(&data->dm_alert_work, send_dm_alert);
+ init_timer(&data->send_timer);
+- data->send_timer.data = cpu;
++ data->send_timer.data = (unsigned long)data;
+ data->send_timer.function = sched_send_work;
++ spin_lock_init(&data->lock);
++ reset_per_cpu_data(data);
+ }
+
++
+ goto out;
+
+ out_unreg:
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2ef859a..05842ab 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1354,6 +1354,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ goto errout;
+ send_addr_notify = 1;
+ modified = 1;
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ if (tb[IFLA_MTU]) {
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 8761bf8..337c68b 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -246,7 +246,7 @@ static int rpcb_create_local_unix(void)
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create AF_LOCAL rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+- result = -PTR_ERR(clnt);
++ result = PTR_ERR(clnt);
+ goto out;
+ }
+
+@@ -293,7 +293,7 @@ static int rpcb_create_local_net(void)
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create local rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+- result = -PTR_ERR(clnt);
++ result = PTR_ERR(clnt);
+ goto out;
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 4e2b3b4..c90b832 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -755,7 +755,9 @@ void rpc_execute(struct rpc_task *task)
+
+ static void rpc_async_schedule(struct work_struct *work)
+ {
++ current->flags |= PF_FSTRANS;
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index b446e10..06cdbff 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
+ int rc = 0;
+
+ if (!xprt->shutdown) {
++ current->flags |= PF_FSTRANS;
+ xprt_clear_connected(xprt);
+
+ dprintk("RPC: %s: %sconnect\n", __func__,
+@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
+
+ out:
+ xprt_wake_pending_tasks(xprt, rc);
+-
+ out_clear:
+ dprintk("RPC: %s: exit\n", __func__);
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 55472c4..1a6edc7 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1895,6 +1895,8 @@ static void xs_local_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ status = __sock_create(xprt->xprt_net, AF_LOCAL,
+ SOCK_STREAM, 0, &sock, 1);
+@@ -1928,6 +1930,7 @@ static void xs_local_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+@@ -1970,6 +1973,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ /* Start by resetting any existing state */
+ xs_reset_transport(transport);
+ sock = xs_create_sock(xprt, transport,
+@@ -1988,6 +1993,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+@@ -2113,6 +2119,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ if (!sock) {
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ sock = xs_create_sock(xprt, transport,
+@@ -2162,6 +2170,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ case -EINPROGRESS:
+ case -EALREADY:
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ return;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+@@ -2174,6 +2183,7 @@ out_eagain:
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 74d5292..b5e4c1c 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -981,6 +981,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
++ if (total == 1)
++ return 0;
++
+ for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
+ const struct ieee80211_iface_combination *c;
+ struct ieee80211_iface_limit *limits;
+diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
+index 1cff331..4608c2c 100644
+--- a/sound/drivers/mpu401/mpu401_uart.c
++++ b/sound/drivers/mpu401/mpu401_uart.c
+@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
+ spin_lock_init(&mpu->output_lock);
+ spin_lock_init(&mpu->timer_lock);
+ mpu->hardware = hardware;
++ mpu->irq = -1;
+ if (! (info_flags & MPU401_INFO_INTEGRATED)) {
+ int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
+ mpu->res = request_region(port, res_size, "MPU401 UART");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 191fd78..2e2eb93 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4809,6 +4809,15 @@ static int alc269_resume(struct hda_codec *codec)
+ }
+ #endif /* CONFIG_PM */
+
++static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == ALC_FIXUP_ACT_PRE_PROBE)
++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++}
++
+ static void alc269_fixup_hweq(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+@@ -4909,6 +4918,8 @@ enum {
+ ALC269_FIXUP_DMIC,
+ ALC269VB_FIXUP_AMIC,
+ ALC269VB_FIXUP_DMIC,
++ ALC269_FIXUP_LENOVO_DOCK,
++ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ };
+
+ static const struct alc_fixup alc269_fixups[] = {
+@@ -5029,6 +5040,20 @@ static const struct alc_fixup alc269_fixups[] = {
+ { }
+ },
+ },
++ [ALC269_FIXUP_LENOVO_DOCK] = {
++ .type = ALC_FIXUP_PINS,
++ .v.pins = (const struct alc_pincfg[]) {
++ { 0x19, 0x23a11040 }, /* dock mic */
++ { 0x1b, 0x2121103f }, /* dock headphone */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
++ },
++ [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5051,6 +5076,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
++ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5109,6 +5136,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ static const struct alc_model_fixup alc269_fixup_models[] = {
+ {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
+ {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
++ {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ {}
+ };
+
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 1fe1308..7160ff2 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -3227,7 +3227,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+ int imux_is_smixer;
+- unsigned int parm;
++ unsigned int parm, parm2;
+ /* MUX6 (1eh) = stereo mixer */
+ imux_is_smixer =
+ snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
+@@ -3250,7 +3250,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x27, &parm);
+ snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm);
+- snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
++ parm2 = parm; /* for pin 0x0b */
+
+ /* PW2 (26h), AOW2 (ah) */
+ parm = AC_PWRST_D3;
+@@ -3265,6 +3265,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ if (!spec->hp_independent_mode) /* check for redirected HP */
+ set_pin_power_state(codec, 0x28, &parm);
+ snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm);
++ if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
++ parm = parm2;
++ snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
+ /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
+ snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE,
+ imux_is_smixer ? AC_PWRST_D0 : parm);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 07dd7eb..e97df24 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -3105,6 +3105,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
+ /* VMID 2*250k */
+ snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
+ WM8962_VMID_SEL_MASK, 0x100);
++
++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
++ msleep(100);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index de61b8a..98c5774 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2508,7 +2508,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
+ return -EINVAL;
+ }
+
+- bclk_rate = params_rate(params) * 2;
++ bclk_rate = params_rate(params) * 4;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bclk_rate *= 16;
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 379baad..5e634a2 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
+ return 0;
+
+ /* If a clock source can't tell us whether it's valid, we assume it is */
+- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
++ if (!uac2_control_is_readable(cs_desc->bmControls,
++ UAC2_CS_CONTROL_CLOCK_VALID - 1))
+ return 1;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
diff --git a/3.2.54/1027_linux-3.2.28.patch b/3.2.54/1027_linux-3.2.28.patch
new file mode 100644
index 0000000..4dbba4b
--- /dev/null
+++ b/3.2.54/1027_linux-3.2.28.patch
@@ -0,0 +1,1114 @@
+diff --git a/Makefile b/Makefile
+index bdf851f..5368961 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
+index 6ee781b..3ee3e84 100644
+--- a/arch/arm/configs/mxs_defconfig
++++ b/arch/arm/configs/mxs_defconfig
+@@ -32,7 +32,6 @@ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_PREEMPT_VOLUNTARY=y
+ CONFIG_AEABI=y
+-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+ CONFIG_AUTO_ZRELADDR=y
+ CONFIG_FPE_NWFPE=y
+ CONFIG_NET=y
+diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
+index f0c05f4..ae7786d 100644
+--- a/arch/arm/mach-pxa/raumfeld.c
++++ b/arch/arm/mach-pxa/raumfeld.c
+@@ -951,12 +951,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
+
+ static struct eeti_ts_platform_data eeti_ts_pdata = {
+ .irq_active_high = 1,
++ .irq_gpio = GPIO_TOUCH_IRQ,
+ };
+
+ static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
+ .type = "eeti_ts",
+ .addr = 0x0a,
+- .irq = gpio_to_irq(GPIO_TOUCH_IRQ),
+ .platform_data = &eeti_ts_pdata,
+ };
+
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index 84a9828..38c6645 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -615,7 +615,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
+ return -EFAULT;
+ if (a.offset & ~PAGE_MASK)
+ return -EINVAL;
+- a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
+ }
+@@ -626,7 +625,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+- a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+ }
+
+diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
+index 18c51df..25408d3 100644
+--- a/arch/s390/kernel/compat_wrapper.S
++++ b/arch/s390/kernel/compat_wrapper.S
+@@ -1636,7 +1636,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper)
+ llgfr %r6,%r6 # unsigned long
+ llgf %r0,164(%r15) # unsigned long
+ stg %r0,160(%r15)
+- jg sys_process_vm_readv
++ jg compat_sys_process_vm_readv
+
+ ENTRY(compat_sys_process_vm_writev_wrapper)
+ lgfr %r2,%r2 # compat_pid_t
+@@ -1646,4 +1646,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
+ llgfr %r6,%r6 # unsigned long
+ llgf %r0,164(%r15) # unsigned long
+ stg %r0,160(%r15)
+- jg sys_process_vm_writev
++ jg compat_sys_process_vm_writev
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 7315488..407789b 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
+ #endif
+ CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
++ CPU_BASED_RDPMC_EXITING |
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ /*
+ * We can allow some features even when not supported by the
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index d62c731..c364358 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1170,12 +1170,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+ return (int32_t)(seq1 - seq2) >= 0;
+ }
+
+-static inline u32
+-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+- return ring->outstanding_lazy_request = dev_priv->next_seqno;
+-}
++u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+
+ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 3e2edc6..548a400 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1647,6 +1647,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ }
+ }
+
++static u32
++i915_gem_get_seqno(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ u32 seqno = dev_priv->next_seqno;
++
++ /* reserve 0 for non-seqno */
++ if (++dev_priv->next_seqno == 0)
++ dev_priv->next_seqno = 1;
++
++ return seqno;
++}
++
++u32
++i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
++{
++ if (ring->outstanding_lazy_request == 0)
++ ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
++
++ return ring->outstanding_lazy_request;
++}
++
+ int
+ i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+@@ -1658,6 +1680,7 @@ i915_add_request(struct intel_ring_buffer *ring,
+ int ret;
+
+ BUG_ON(request == NULL);
++ seqno = i915_gem_next_request_seqno(ring);
+
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index f6613dc..19085c0 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring)
+ return space;
+ }
+
+-static u32 i915_gem_get_seqno(struct drm_device *dev)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- u32 seqno;
+-
+- seqno = dev_priv->next_seqno;
+-
+- /* reserve 0 for non-seqno */
+- if (++dev_priv->next_seqno == 0)
+- dev_priv->next_seqno = 1;
+-
+- return seqno;
+-}
+-
+ static int
+ render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+@@ -277,8 +263,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+- /* Initialize the ring. */
+- I915_WRITE_START(ring, obj->gtt_offset);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+@@ -304,14 +288,19 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ }
+ }
+
++ /* Initialize the ring. This must happen _after_ we've cleared the ring
++ * registers with the above sequence (the readback of the HEAD registers
++ * also enforces ordering), otherwise the hw might lose the new ring
++ * register values. */
++ I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+- if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+- I915_READ_START(ring) != obj->gtt_offset ||
+- (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
++ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
++ I915_READ_START(ring) == obj->gtt_offset &&
++ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+@@ -488,7 +477,7 @@ gen6_add_request(struct intel_ring_buffer *ring,
+ mbox1_reg = ring->signal_mbox[0];
+ mbox2_reg = ring->signal_mbox[1];
+
+- *seqno = i915_gem_get_seqno(ring->dev);
++ *seqno = i915_gem_next_request_seqno(ring);
+
+ update_mboxes(ring, *seqno, mbox1_reg);
+ update_mboxes(ring, *seqno, mbox2_reg);
+@@ -586,8 +575,7 @@ static int
+ pc_render_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+ {
+- struct drm_device *dev = ring->dev;
+- u32 seqno = i915_gem_get_seqno(dev);
++ u32 seqno = i915_gem_next_request_seqno(ring);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+@@ -638,8 +626,7 @@ static int
+ render_ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+ {
+- struct drm_device *dev = ring->dev;
+- u32 seqno = i915_gem_get_seqno(dev);
++ u32 seqno = i915_gem_next_request_seqno(ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+@@ -813,7 +800,7 @@ ring_add_request(struct intel_ring_buffer *ring,
+ if (ret)
+ return ret;
+
+- seqno = i915_gem_get_seqno(ring->dev);
++ seqno = i915_gem_next_request_seqno(ring);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 931f4df..fc0633c 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1065,24 +1065,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
+
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+- save->vga_control[0] = RREG32(D1VGA_CONTROL);
+- save->vga_control[1] = RREG32(D2VGA_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+- save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+- save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+- if (rdev->num_crtc >= 4) {
+- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
+- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+- }
+- if (rdev->num_crtc >= 6) {
+- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
+- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+- }
+
+ /* Stop all video */
+ WREG32(VGA_RENDER_CONTROL, 0);
+@@ -1193,47 +1177,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+ /* Unlock host access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+- /* Restore video state */
+- WREG32(D1VGA_CONTROL, save->vga_control[0]);
+- WREG32(D2VGA_CONTROL, save->vga_control[1]);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+- }
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ }
+
+@@ -2080,10 +2023,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ else {
+- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+- rdev->config.evergreen.tile_config |= 1 << 4;
+- else
++ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
++ case 0: /* four banks */
+ rdev->config.evergreen.tile_config |= 0 << 4;
++ break;
++ case 1: /* eight banks */
++ rdev->config.evergreen.tile_config |= 1 << 4;
++ break;
++ case 2: /* sixteen banks */
++ default:
++ rdev->config.evergreen.tile_config |= 2 << 4;
++ break;
++ }
+ }
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 9e50814..636255b 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -804,10 +804,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+- rdev->config.cayman.tile_config |= 1 << 4;
+- else
++ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
++ case 0: /* four banks */
+ rdev->config.cayman.tile_config |= 0 << 4;
++ break;
++ case 1: /* eight banks */
++ rdev->config.cayman.tile_config |= 1 << 4;
++ break;
++ case 2: /* sixteen banks */
++ default:
++ rdev->config.cayman.tile_config |= 2 << 4;
++ break;
++ }
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+ rdev->config.cayman.tile_config |=
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 5991484..5ce9402 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -253,13 +253,10 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ * rv515
+ */
+ struct rv515_mc_save {
+- u32 d1vga_control;
+- u32 d2vga_control;
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+- u32 d1crtc_control;
+- u32 d2crtc_control;
+ };
++
+ int rv515_init(struct radeon_device *rdev);
+ void rv515_fini(struct radeon_device *rdev);
+ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+@@ -387,11 +384,10 @@ void r700_cp_fini(struct radeon_device *rdev);
+ * evergreen
+ */
+ struct evergreen_mc_save {
+- u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+- u32 crtc_control[6];
+ };
++
+ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+ int evergreen_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index 6613ee9..d5f45b4 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+
+ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+ {
+- save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
+- save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
+ save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+- save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
+- save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
+
+ /* Stop all video */
+ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+ /* Unlock host access */
+ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+- /* Restore video state */
+- WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
+- WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
+- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
+- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
+- WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
+- WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
+- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
+- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+ }
+
+diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
+index 7f8f538..4f938bb 100644
+--- a/drivers/input/touchscreen/eeti_ts.c
++++ b/drivers/input/touchscreen/eeti_ts.c
+@@ -48,7 +48,7 @@ struct eeti_ts_priv {
+ struct input_dev *input;
+ struct work_struct work;
+ struct mutex mutex;
+- int irq, irq_active_high;
++ int irq_gpio, irq, irq_active_high;
+ };
+
+ #define EETI_TS_BITDEPTH (11)
+@@ -62,7 +62,7 @@ struct eeti_ts_priv {
+
+ static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
+ {
+- return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
++ return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
+ }
+
+ static void eeti_ts_read(struct work_struct *work)
+@@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
+ static int __devinit eeti_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *idp)
+ {
+- struct eeti_ts_platform_data *pdata;
++ struct eeti_ts_platform_data *pdata = client->dev.platform_data;
+ struct eeti_ts_priv *priv;
+ struct input_dev *input;
+ unsigned int irq_flags;
+@@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
+
+ priv->client = client;
+ priv->input = input;
+- priv->irq = client->irq;
++ priv->irq_gpio = pdata->irq_gpio;
++ priv->irq = gpio_to_irq(pdata->irq_gpio);
+
+- pdata = client->dev.platform_data;
++ err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
++ if (err < 0)
++ goto err1;
+
+ if (pdata)
+ priv->irq_active_high = pdata->irq_active_high;
+@@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
+
+ err = input_register_device(input);
+ if (err)
+- goto err1;
++ goto err2;
+
+ err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
+ client->name, priv);
+ if (err) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+- goto err2;
++ goto err3;
+ }
+
+ /*
+@@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
+ device_init_wakeup(&client->dev, 0);
+ return 0;
+
+-err2:
++err3:
+ input_unregister_device(input);
+ input = NULL; /* so we dont try to free it below */
++err2:
++ gpio_free(pdata->irq_gpio);
+ err1:
+ input_free_device(input);
+ kfree(priv);
+diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
+index 43a76c4..db662e2 100644
+--- a/drivers/mfd/ezx-pcap.c
++++ b/drivers/mfd/ezx-pcap.c
+@@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
+ }
+ local_irq_enable();
+ ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
+- } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
++ } while (gpio_get_value(pdata->gpio));
+ }
+
+ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
+diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
+index 23406e6..ae286a9 100644
+--- a/drivers/net/caif/caif_serial.c
++++ b/drivers/net/caif/caif_serial.c
+@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
++ if (!dev)
++ return -ENOMEM;
++
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 965c723..721adfd 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -5378,7 +5378,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
+ int k, last;
+
+ if (skb == NULL) {
+- j++;
++ j = NEXT_TX_BD(j);
+ continue;
+ }
+
+@@ -5390,8 +5390,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
+ tx_buf->skb = NULL;
+
+ last = tx_buf->nr_frags;
+- j++;
+- for (k = 0; k < last; k++, j++) {
++ j = NEXT_TX_BD(j);
++ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
+ tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index de00805..0549261 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -4743,12 +4743,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ e1000_setup_rctl(adapter);
+ e1000_set_rx_mode(netdev);
+
++ rctl = er32(RCTL);
++
+ /* turn on all-multi mode if wake on multicast is enabled */
+- if (wufc & E1000_WUFC_MC) {
+- rctl = er32(RCTL);
++ if (wufc & E1000_WUFC_MC)
+ rctl |= E1000_RCTL_MPE;
+- ew32(RCTL, rctl);
+- }
++
++ /* enable receives in the hardware */
++ ew32(RCTL, rctl | E1000_RCTL_EN);
+
+ if (hw->mac_type >= e1000_82540) {
+ ctrl = er32(CTRL);
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index 3072d35..4f4d52a 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -1600,10 +1600,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+- * If the partner code word is null, stop forcing
+- * and restart auto negotiation.
+ */
+- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
++ if (rxcw & E1000_RXCW_C) {
+ /* Enable autoneg, and unforce link up */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7bea9c6..a12c9bf 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1243,10 +1243,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ int vnet_hdr_sz;
+ int ret;
+
+- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
++ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+ if (copy_from_user(&ifr, argp, ifreq_len))
+ return -EFAULT;
+-
++ } else {
++ memset(&ifr, 0, sizeof(ifr));
++ }
+ if (cmd == TUNGETFEATURES) {
+ /* Currently this just means: "what IFF flags are valid?".
+ * This is needed because we never checked for invalid flags on
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index 582ca2d..c4c6a73 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -1308,7 +1308,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
+ int retv;
+ int length = 0; /* shut up GCC */
+
+- urb = usb_alloc_urb(0, GFP_NOIO);
++ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 7f97164..2b8e957 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -674,6 +674,7 @@ int ath9k_hw_init(struct ath_hw *ah)
+ case AR9300_DEVID_AR9340:
+ case AR9300_DEVID_AR9580:
+ case AR9300_DEVID_AR9462:
++ case AR9485_DEVID_AR1111:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 1bd8edf..a5c4ba8 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -48,6 +48,7 @@
+ #define AR9300_DEVID_AR9580 0x0033
+ #define AR9300_DEVID_AR9462 0x0034
+ #define AR9300_DEVID_AR9330 0x0035
++#define AR9485_DEVID_AR1111 0x0037
+
+ #define AR5416_AR9100_DEVID 0x000b
+
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index 2dcdf63..1883d39 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -35,6 +35,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
+ { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
+ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
++ { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
+ { 0 }
+ };
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+index 9ba2c1b..3395025 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+@@ -708,11 +708,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ */
+ static bool rs_use_green(struct ieee80211_sta *sta)
+ {
+- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+- struct iwl_rxon_context *ctx = sta_priv->ctx;
+-
+- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+- !(ctx->ht.non_gf_sta_present);
++ /*
++ * There's a bug somewhere in this code that causes the
++ * scaling to get stuck because GF+SGI can't be combined
++ * in SISO rates. Until we find that bug, disable GF, it
++ * has only limited benefit and we still interoperate with
++ * GF APs since we can always receive GF transmissions.
++ */
++ return false;
+ }
+
+ /**
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
+index bf55b4a..d69f88c 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
+
+ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
+ {
+- struct ieee80211_conf conf = { .flags = 0 };
+- struct rt2x00lib_conf libconf = { .conf = &conf };
++ struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
+
+ rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+ }
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index db34db6..a49e848 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -120,15 +120,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
+ u8 request;
+ u16 wvalue;
+ u16 index;
+- __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++ __le32 *data;
++ unsigned long flags;
+
++ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
++ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
++ rtlpriv->usb_data_index = 0;
++ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+ wvalue = (u16)addr;
+ _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+- if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+- rtlpriv->usb_data_index = 0;
+ return le32_to_cpu(*data);
+ }
+
+@@ -909,6 +913,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
+ GFP_KERNEL);
+ if (!rtlpriv->usb_data)
+ return -ENOMEM;
++
++ /* this spin lock must be initialized early */
++ spin_lock_init(&rtlpriv->locks.usb_lock);
++
+ rtlpriv->usb_data_index = 0;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ udev = interface_to_usbdev(intf);
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index b1e9deb..deb87e9 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1550,6 +1550,7 @@ struct rtl_locks {
+ spinlock_t rf_lock;
+ spinlock_t lps_lock;
+ spinlock_t waitq_lock;
++ spinlock_t usb_lock;
+
+ /*Dual mac*/
+ spinlock_t cck_and_rw_pagea_lock;
+diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
+index 7daf4b8..90effcc 100644
+--- a/fs/hfsplus/wrapper.c
++++ b/fs/hfsplus/wrapper.c
+@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct bio *bio;
+ int ret = 0;
+- unsigned int io_size;
++ u64 io_size;
+ loff_t start;
+ int offset;
+
+diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
+index f875b31..16625d7 100644
+--- a/include/linux/input/eeti_ts.h
++++ b/include/linux/input/eeti_ts.h
+@@ -2,6 +2,7 @@
+ #define LINUX_INPUT_EETI_TS_H
+
+ struct eeti_ts_platform_data {
++ int irq_gpio;
+ unsigned int irq_active_high;
+ };
+
+diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
+index 40c37216..32a1b5c 100644
+--- a/include/linux/mfd/ezx-pcap.h
++++ b/include/linux/mfd/ezx-pcap.h
+@@ -16,6 +16,7 @@ struct pcap_subdev {
+ struct pcap_platform_data {
+ unsigned int irq_base;
+ unsigned int config;
++ int gpio;
+ void (*init) (void *); /* board specific init */
+ int num_subdevs;
+ struct pcap_subdev *subdevs;
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index 68223e4..4e9115d 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -428,9 +428,9 @@ static int __init caif_device_init(void)
+
+ static void __exit caif_device_exit(void)
+ {
+- unregister_pernet_subsys(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ dev_remove_pack(&caif_packet_type);
++ unregister_pernet_subsys(&caif_net_ops);
+ }
+
+ module_init(caif_device_init);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 05842ab..0cf604b 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -670,6 +670,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
+ }
+ }
+
++static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
++{
++ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
++ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
++}
++
+ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ const struct ifinfomsg *ifm)
+ {
+@@ -678,7 +684,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
+ if (ifm->ifi_change)
+ flags = (flags & ifm->ifi_change) |
+- (dev->flags & ~ifm->ifi_change);
++ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
+
+ return flags;
+ }
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 86f3b88..afaa735 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
+ case CIPSO_V4_TAG_LOCAL:
+ /* This is a non-standard tag that we only allow for
+ * local connections, so if the incoming interface is
+- * not the loopback device drop the packet. */
+- if (!(skb->dev->flags & IFF_LOOPBACK)) {
++ * not the loopback device drop the packet. Further,
++ * there is no legitimate reason for setting this from
++ * userspace so reject it if skb is NULL. */
++ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
+ err_offset = opt_iter;
+ goto validate_return_locked;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 11ba922..ad466a7 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2391,7 +2391,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ /* Cap the max timeout in ms TCP will retry/retrans
+ * before giving up and aborting (ETIMEDOUT) a connection.
+ */
+- icsk->icsk_user_timeout = msecs_to_jiffies(val);
++ if (val < 0)
++ err = -EINVAL;
++ else
++ icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ break;
+ default:
+ err = -ENOPROTOOPT;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 32e6ca2..a08a621 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5415,7 +5415,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ if (tp->copied_seq == tp->rcv_nxt &&
+ len - tcp_header_len <= tp->ucopy.len) {
+ #ifdef CONFIG_NET_DMA
+- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
++ if (tp->ucopy.task == current &&
++ sock_owned_by_user(sk) &&
++ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+ copied_early = 1;
+ eaten = 1;
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index a7078fd..f85de8e 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -543,6 +543,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
+
+ del_timer_sync(&sdata->u.mesh.housekeeping_timer);
+ del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
++ del_timer_sync(&sdata->u.mesh.mesh_path_timer);
+ /*
+ * If the timer fired while we waited for it, it will have
+ * requeued the work. Now the work will be running again
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 17859ea..351a69b 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -559,6 +559,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
+
+ sch->qstats.backlog = q->qdisc->qstats.backlog;
+ opts = nla_nest_start(skb, TCA_OPTIONS);
++ if (opts == NULL)
++ goto nla_put_failure;
+ NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+ return nla_nest_end(skb, opts);
+
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index b7692aa..0fc18c7 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
+
+ epb = &ep->base;
+
+- if (hlist_unhashed(&epb->node))
+- return;
+-
+ epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+
+ head = &sctp_ep_hashtable[epb->hashent];
+
+ sctp_write_lock(&head->lock);
+- __hlist_del(&epb->node);
++ hlist_del_init(&epb->node);
+ sctp_write_unlock(&head->lock);
+ }
+
+@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
+ head = &sctp_assoc_hashtable[epb->hashent];
+
+ sctp_write_lock(&head->lock);
+- __hlist_del(&epb->node);
++ hlist_del_init(&epb->node);
+ sctp_write_unlock(&head->lock);
+ }
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 0075554..8e49d76 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1231,8 +1231,14 @@ out_free:
+ SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
+ " kaddrs: %p err: %d\n",
+ asoc, kaddrs, err);
+- if (asoc)
++ if (asoc) {
++ /* sctp_primitive_ASSOCIATE may have added this association
++ * To the hash table, try to unhash it, just in case, its a noop
++ * if it wasn't hashed so we're safe
++ */
++ sctp_unhash_established(asoc);
+ sctp_association_free(asoc);
++ }
+ return err;
+ }
+
+@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ goto out_unlock;
+
+ out_free:
+- if (new_asoc)
++ if (new_asoc) {
++ sctp_unhash_established(asoc);
+ sctp_association_free(asoc);
++ }
+ out_unlock:
+ sctp_release_sock(sk);
+
+diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
+index 788a12c..2ab7850 100644
+--- a/net/wanrouter/wanmain.c
++++ b/net/wanrouter/wanmain.c
+@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
+ * successfully, add it to the interface list.
+ */
+
+- if (dev->name == NULL) {
+- err = -EINVAL;
+- } else {
++#ifdef WANDEBUG
++ printk(KERN_INFO "%s: registering interface %s...\n",
++ wanrouter_modname, dev->name);
++#endif
+
+- #ifdef WANDEBUG
+- printk(KERN_INFO "%s: registering interface %s...\n",
+- wanrouter_modname, dev->name);
+- #endif
+-
+- err = register_netdev(dev);
+- if (!err) {
+- struct net_device *slave = NULL;
+- unsigned long smp_flags=0;
+-
+- lock_adapter_irq(&wandev->lock, &smp_flags);
+-
+- if (wandev->dev == NULL) {
+- wandev->dev = dev;
+- } else {
+- for (slave=wandev->dev;
+- DEV_TO_SLAVE(slave);
+- slave = DEV_TO_SLAVE(slave))
+- DEV_TO_SLAVE(slave) = dev;
+- }
+- ++wandev->ndev;
+-
+- unlock_adapter_irq(&wandev->lock, &smp_flags);
+- err = 0; /* done !!! */
+- goto out;
++ err = register_netdev(dev);
++ if (!err) {
++ struct net_device *slave = NULL;
++ unsigned long smp_flags=0;
++
++ lock_adapter_irq(&wandev->lock, &smp_flags);
++
++ if (wandev->dev == NULL) {
++ wandev->dev = dev;
++ } else {
++ for (slave=wandev->dev;
++ DEV_TO_SLAVE(slave);
++ slave = DEV_TO_SLAVE(slave))
++ DEV_TO_SLAVE(slave) = dev;
+ }
++ ++wandev->ndev;
++
++ unlock_adapter_irq(&wandev->lock, &smp_flags);
++ err = 0; /* done !!! */
++ goto out;
+ }
+ if (wandev->del_if)
+ wandev->del_if(wandev, dev);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 220f3bd..8f5042d 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -971,6 +971,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
+ */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&wdev->list);
++ /*
++ * Ensure that all events have been processed and
++ * freed.
++ */
++ cfg80211_process_wdev_events(wdev);
+ break;
+ case NETDEV_PRE_UP:
+ if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index b9ec306..02c3be3 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ u32 *flags, struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+
+ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index b5e4c1c..22fb802 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -725,7 +725,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
+ wdev->connect_keys = NULL;
+ }
+
+-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
++void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+ {
+ struct cfg80211_event *ev;
+ unsigned long flags;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 51a1afc..402f330 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3059,7 +3059,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
+- SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 2e2eb93..32c8169 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4981,6 +4981,8 @@ static const struct alc_fixup alc269_fixups[] = {
+ [ALC269_FIXUP_PCM_44K] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc269_fixup_pcm_44k,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_QUANTA_MUTE
+ },
+ [ALC269_FIXUP_STEREO_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+@@ -5077,9 +5079,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
++ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+
+ #if 1
diff --git a/3.2.54/1028_linux-3.2.29.patch b/3.2.54/1028_linux-3.2.29.patch
new file mode 100644
index 0000000..3c65179
--- /dev/null
+++ b/3.2.54/1028_linux-3.2.29.patch
@@ -0,0 +1,4279 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index f986e7d..82d7fa6 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5452,7 +5452,7 @@ F: Documentation/blockdev/ramdisk.txt
+ F: drivers/block/brd.c
+
+ RANDOM NUMBER DRIVER
+-M: Matt Mackall <mpm@selenic.com>
++M: Theodore Ts'o" <tytso@mit.edu>
+ S: Maintained
+ F: drivers/char/random.c
+
+diff --git a/Makefile b/Makefile
+index 5368961..d96fc2a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 640f909..6f1aca7 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -14,8 +14,8 @@
+ */
+
+
+-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
++#define ATOMIC_INIT(i) { (i) }
++#define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
+diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
+index 06edfef..3eeb47c 100644
+--- a/arch/alpha/include/asm/socket.h
++++ b/arch/alpha/include/asm/socket.h
+@@ -69,9 +69,11 @@
+
+ #define SO_RXQ_OVFL 40
+
++#ifdef __KERNEL__
+ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+ #define SOCK_NONBLOCK 0x40000000
++#endif /* __KERNEL__ */
+
+ #endif /* _ASM_SOCKET_H */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9451dce..8512475 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -288,13 +288,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+- * <--------------- offset --------------------> <- type --> 0 0 0
++ * <--------------- offset ----------------------> < type -> 0 0 0
+ *
+- * This gives us up to 63 swap files and 32GB per swap file. Note that
++ * This gives us up to 31 swap files and 64GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+ #define __SWP_TYPE_SHIFT 3
+-#define __SWP_TYPE_BITS 6
++#define __SWP_TYPE_BITS 5
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
+index c202113..ea94765 100644
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
+-#ifdef CONFIG_ARM_ERRATA_720789
+- mov r3, #0
+-#else
+ asid r3, r3 @ mask ASID
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(W(mov) r3, #0 )
++ ALT_UP(W(nop) )
+ #endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index ad83dad..f0702f3 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -628,8 +628,10 @@ static int __init vfp_init(void)
+ if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+ elf_hwcap |= HWCAP_NEON;
+ #endif
++#ifdef CONFIG_VFPv3
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
++#endif
+ }
+ }
+ return 0;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index f581a18..df7d12c 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ }
+
+ /*
+- * search for a shareable pmd page for hugetlb.
++ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
++ * and returns the corresponding pte. While this is not necessary for the
++ * !shared pmd case because we can allocate the pmd later as well, it makes the
++ * code much cleaner. pmd allocation is essential for the shared case because
++ * pud has to be populated inside the same i_mmap_mutex section - otherwise
++ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
++ * bad pmd for sharing.
+ */
+-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
++static pte_t *
++huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ {
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
++ pte_t *pte;
+
+ if (!vma_shareable(vma, addr))
+- return;
++ return (pte_t *)pmd_alloc(mm, pud, addr);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ put_page(virt_to_page(spte));
+ spin_unlock(&mm->page_table_lock);
+ out:
++ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ mutex_unlock(&mapping->i_mmap_mutex);
++ return pte;
+ }
+
+ /*
+@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ } else {
+ BUG_ON(sz != PMD_SIZE);
+ if (pud_none(*pud))
+- huge_pmd_share(mm, addr, pud);
+- pte = (pte_t *) pmd_alloc(mm, pud, addr);
++ pte = huge_pmd_share(mm, addr, pud);
++ else
++ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ }
+ }
+ BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
+index e7d13f5..d05f2fe 100644
+--- a/drivers/acpi/acpica/tbxface.c
++++ b/drivers/acpi/acpica/tbxface.c
+@@ -436,6 +436,7 @@ acpi_get_table_with_size(char *signature,
+
+ return (AE_NOT_FOUND);
+ }
++ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+
+ acpi_status
+ acpi_get_table(char *signature,
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8c78443..3790c80 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -385,7 +385,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ goto repeat;
+ }
+
+- dev->power.deferred_resume = false;
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+@@ -446,6 +445,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ wake_up_all(&dev->power.wait_queue);
+
+ if (dev->power.deferred_resume) {
++ dev->power.deferred_resume = false;
+ rpm_resume(dev, 0);
+ retval = -EAGAIN;
+ goto out;
+@@ -568,6 +568,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
++ retval = 1;
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+@@ -645,7 +646,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+- if (!retval)
++ if (retval >= 0)
+ rpm_idle(dev, RPM_ASYNC);
+
+ out:
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index acda773..38aa6dd 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
+ {
+ case CMD_TARGET_STATUS:
+ /* Pass it up to the upper layers... */
+- if( ei->ScsiStatus)
+- {
+-#if 0
+- printk(KERN_WARNING "cciss: cmd %p "
+- "has SCSI Status = %x\n",
+- c, ei->ScsiStatus);
+-#endif
+- cmd->result |= (ei->ScsiStatus << 1);
+- }
+- else { /* scsi status is zero??? How??? */
++ if (!ei->ScsiStatus) {
+
+ /* Ordinarily, this case should never happen, but there is a bug
+ in some released firmware revisions that allows it to happen
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 650a308..de9c800 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -4,6 +4,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/hdreg.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/virtio.h>
+ #include <linux/virtio_blk.h>
+ #include <linux/scatterlist.h>
+@@ -26,14 +27,17 @@ struct virtio_blk
+ /* The disk structure for the kernel. */
+ struct gendisk *disk;
+
+- /* Request tracking. */
+- struct list_head reqs;
+-
+ mempool_t *pool;
+
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
++ /* Lock for config space updates */
++ struct mutex config_lock;
++
++ /* enable config space updates */
++ bool config_enable;
++
+ /* What host tells us, plus 2 for header & tailer. */
+ unsigned int sg_elems;
+
+@@ -46,7 +50,6 @@ struct virtio_blk
+
+ struct virtblk_req
+ {
+- struct list_head list;
+ struct request *req;
+ struct virtio_blk_outhdr out_hdr;
+ struct virtio_scsi_inhdr in_hdr;
+@@ -90,7 +93,6 @@ static void blk_done(struct virtqueue *vq)
+ }
+
+ __blk_end_request_all(vbr->req, error);
+- list_del(&vbr->list);
+ mempool_free(vbr, vblk->pool);
+ }
+ /* In case queue is stopped waiting for more buffers. */
+@@ -175,7 +177,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+ return false;
+ }
+
+- list_add_tail(&vbr->list, &vblk->reqs);
+ return true;
+ }
+
+@@ -316,6 +317,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
++ mutex_lock(&vblk->config_lock);
++ if (!vblk->config_enable)
++ goto done;
++
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+@@ -338,6 +343,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
++done:
++ mutex_unlock(&vblk->config_lock);
+ }
+
+ static void virtblk_config_changed(struct virtio_device *vdev)
+@@ -381,11 +388,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ goto out_free_index;
+ }
+
+- INIT_LIST_HEAD(&vblk->reqs);
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+ sg_init_table(vblk->sg, vblk->sg_elems);
++ mutex_init(&vblk->config_lock);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
++ vblk->config_enable = true;
+
+ /* We expect one virtqueue, for output. */
+ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
+@@ -539,16 +547,19 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+ struct virtio_blk *vblk = vdev->priv;
+ int index = vblk->index;
+
+- flush_work(&vblk->config_work);
++ /* Prevent config work handler from accessing the device. */
++ mutex_lock(&vblk->config_lock);
++ vblk->config_enable = false;
++ mutex_unlock(&vblk->config_lock);
+
+- /* Nothing should be pending. */
+- BUG_ON(!list_empty(&vblk->reqs));
++ del_gendisk(vblk->disk);
++ blk_cleanup_queue(vblk->disk->queue);
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+- del_gendisk(vblk->disk);
+- blk_cleanup_queue(vblk->disk->queue);
++ flush_work(&vblk->config_work);
++
+ put_disk(vblk->disk);
+ mempool_destroy(vblk->pool);
+ vdev->config->del_vqs(vdev);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 631d4f6..8ae9235 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1114,6 +1114,16 @@ static void init_std_data(struct entropy_store *r)
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
++/*
++ * Note that setup_arch() may call add_device_randomness()
++ * long before we get here. This allows seeding of the pools
++ * with some platform dependent data very early in the boot
++ * process. But it limits our options here. We must use
++ * statically allocated structures that already have all
++ * initializations complete at compile time. We should also
++ * take care not to overwrite the precious per platform data
++ * we were given.
++ */
+ static int rand_initialize(void)
+ {
+ init_std_data(&input_pool);
+@@ -1391,10 +1401,15 @@ static int proc_do_uuid(ctl_table *table, int write,
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+- uuid[8] = 0;
+- }
+- if (uuid[8] == 0)
+ generate_random_uuid(uuid);
++ } else {
++ static DEFINE_SPINLOCK(bootid_spinlock);
++
++ spin_lock(&bootid_spinlock);
++ if (!uuid[8])
++ generate_random_uuid(uuid);
++ spin_unlock(&bootid_spinlock);
++ }
+
+ sprintf(buf, "%pU", uuid);
+
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 153980b..b298158 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -6,6 +6,7 @@
+ #include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/bootmem.h>
++#include <linux/random.h>
+ #include <asm/dmi.h>
+
+ /*
+@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+
+ dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+
++ add_device_randomness(buf, dmi_len);
++
+ dmi_iounmap(buf, dmi_len);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index cc75c4b..3eed270 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4748,17 +4748,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ continue;
+ }
+
+- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+- /* Use VBT settings if we have an eDP panel */
+- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+-
+- if (edp_bpc < display_bpc) {
+- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+- display_bpc = edp_bpc;
+- }
+- continue;
+- }
+-
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index fae2050..c8ecaab 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1152,10 +1152,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+ pp = ironlake_get_pp_control(dev_priv);
+- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
++ /* We need to switch off panel power _and_ force vdd, for otherwise some
++ * panels get very unhappy and cease to work. */
++ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
++ intel_dp->want_panel_vdd = false;
++
+ ironlake_wait_panel_off(intel_dp);
+ }
+
+@@ -1265,11 +1269,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+- ironlake_edp_panel_off(intel_dp);
+-
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
++ ironlake_edp_panel_off(intel_dp);
+ intel_dp_link_down(intel_dp);
+- ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
+
+ static void intel_dp_commit(struct drm_encoder *encoder)
+@@ -1304,11 +1306,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+- ironlake_edp_panel_off(intel_dp);
+-
+ intel_dp_sink_dpms(intel_dp, mode);
++ ironlake_edp_panel_off(intel_dp);
+ intel_dp_link_down(intel_dp);
+- ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
+diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
+index cb006a7..3002d82 100644
+--- a/drivers/gpu/drm/nouveau/nvd0_display.c
++++ b/drivers/gpu/drm/nouveau/nvd0_display.c
+@@ -472,7 +472,7 @@ static int
+ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+- const u32 data = (y << 16) | x;
++ const u32 data = (y << 16) | (x & 0xffff);
+
+ nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
+ nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
+index 1b50ad8..4760466 100644
+--- a/drivers/gpu/drm/radeon/atombios.h
++++ b/drivers/gpu/drm/radeon/atombios.h
+@@ -101,6 +101,7 @@
+ #define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
+ #define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
+ #define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
++#define ATOM_INIT (ATOM_DISABLE+7)
+ #define ATOM_GET_STATUS (ATOM_DISABLE+8)
+
+ #define ATOM_BLANKING 1
+@@ -251,25 +252,25 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+- USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
++ USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+- USHORT MemoryPLLInit;
+- USHORT AdjustDisplayPll; //only used by Bios
++ USHORT MemoryPLLInit; //Atomic Table, used only by Bios
++ USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+- USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
++ USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+- USHORT GetConditionalGoldenSetting; //only used by Bios
++ USHORT GetConditionalGoldenSetting; //Only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+- USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+- USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
++ USHORT PatchMCSetting; //only used by BIOS
++ USHORT MC_SEQ_Control; //only used by BIOS
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+@@ -282,7 +283,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+- USHORT UpdateCRTC_DoubleBufferRegisters;
++ USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+@@ -308,27 +309,36 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+- USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
++ USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+- USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
++ USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
++ USHORT GetVoltageInfo; //Function Table,only used by Bios since SI
+ }ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+ // For backward compatible
+ #define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
+-#define UNIPHYTransmitterControl DIG1TransmitterControl
+-#define LVTMATransmitterControl DIG2TransmitterControl
++#define DPTranslatorControl DIG2EncoderControl
++#define UNIPHYTransmitterControl DIG1TransmitterControl
++#define LVTMATransmitterControl DIG2TransmitterControl
+ #define SetCRTC_DPM_State GetConditionalGoldenSetting
+ #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+ #define HPDInterruptService ReadHWAssistedI2CStatus
+ #define EnableVGA_Access GetSCLKOverMCLKRatio
+-#define GetDispObjectInfo EnableYUV
++#define EnableYUV GetDispObjectInfo
++#define DynamicClockGating EnableDispPowerGating
++#define SetupHWAssistedI2CStatus ComputeMemoryClockParam
++
++#define TMDSAEncoderControl PatchMCSetting
++#define LVDSEncoderControl MC_SEQ_Control
++#define LCD1OutputControl HW_Misc_Operation
++
+
+ typedef struct _ATOM_MASTER_COMMAND_TABLE
+ {
+@@ -495,6 +505,34 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+ // ucInputFlag
+ #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
++// use for ComputeMemoryClockParamTable
++typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
++{
++ union
++ {
++ ULONG ulClock;
++ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
++ };
++ UCHAR ucDllSpeed; //Output
++ UCHAR ucPostDiv; //Output
++ union{
++ UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
++ UCHAR ucPllCntlFlag; //Output:
++ };
++ UCHAR ucBWCntl;
++}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
++
++// definition of ucInputFlag
++#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01
++// definition of ucPllCntlFlag
++#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
++#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04
++#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08
++#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10
++
++//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
++#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
++
+ typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+@@ -562,6 +600,16 @@ typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+ #define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
+
+ /****************************************************************************/
++// Structure used by EnableDispPowerGatingTable.ctb
++/****************************************************************************/
++typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
++{
++ UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ...
++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
++ UCHAR ucPadding[2];
++}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
++
++/****************************************************************************/
+ // Structure used by EnableASIC_StaticPwrMgtTable.ctb
+ /****************************************************************************/
+ typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+@@ -807,6 +855,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
++#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03
+ #define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+ #define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+ #define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+@@ -814,6 +863,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+ #define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+ #define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+ #define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
++#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60
+
+ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+ {
+@@ -1171,6 +1221,106 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+ #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
++typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
++{
++#if ATOM_BIG_ENDIAN
++ UCHAR ucReservd1:1;
++ UCHAR ucHPDSel:3;
++ UCHAR ucPhyClkSrcId:2;
++ UCHAR ucCoherentMode:1;
++ UCHAR ucReserved:1;
++#else
++ UCHAR ucReserved:1;
++ UCHAR ucCoherentMode:1;
++ UCHAR ucPhyClkSrcId:2;
++ UCHAR ucHPDSel:3;
++ UCHAR ucReservd1:1;
++#endif
++}ATOM_DIG_TRANSMITTER_CONFIG_V5;
++
++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
++{
++ USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio
++ UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
++ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
++ UCHAR ucLaneNum; // indicate lane number 1-8
++ UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
++ UCHAR ucDigMode; // indicate DIG mode
++ union{
++ ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
++ UCHAR ucConfig;
++ };
++ UCHAR ucDigEncoderSel; // indicate DIG front end encoder
++ UCHAR ucDPLaneSet;
++ UCHAR ucReserved;
++ UCHAR ucReserved1;
++}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
++
++//ucPhyId
++#define ATOM_PHY_ID_UNIPHYA 0
++#define ATOM_PHY_ID_UNIPHYB 1
++#define ATOM_PHY_ID_UNIPHYC 2
++#define ATOM_PHY_ID_UNIPHYD 3
++#define ATOM_PHY_ID_UNIPHYE 4
++#define ATOM_PHY_ID_UNIPHYF 5
++#define ATOM_PHY_ID_UNIPHYG 6
++
++// ucDigEncoderSel
++#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01
++#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02
++#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04
++#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08
++#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10
++#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20
++#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40
++
++// ucDigMode
++#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0
++#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1
++#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2
++#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3
++#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4
++#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5
++
++// ucDPLaneSet
++#define DP_LANE_SET__0DB_0_4V 0x00
++#define DP_LANE_SET__0DB_0_6V 0x01
++#define DP_LANE_SET__0DB_0_8V 0x02
++#define DP_LANE_SET__0DB_1_2V 0x03
++#define DP_LANE_SET__3_5DB_0_4V 0x08
++#define DP_LANE_SET__3_5DB_0_6V 0x09
++#define DP_LANE_SET__3_5DB_0_8V 0x0a
++#define DP_LANE_SET__6DB_0_4V 0x10
++#define DP_LANE_SET__6DB_0_6V 0x11
++#define DP_LANE_SET__9_5DB_0_4V 0x18
++
++// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
++// Bit1
++#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02
++
++// Bit3:2
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02
++
++#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00
++#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04
++#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c
++// Bit6:4
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04
++
++#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60
++
++#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
++
++
+ /****************************************************************************/
+ // Structures used by ExternalEncoderControlTable V1.3
+ // ASIC Families: Evergreen, Llano, NI
+@@ -1793,6 +1943,7 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+ #define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+ #define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+ #define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
++#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL
+ #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+ #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+ #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+@@ -2030,12 +2181,77 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
+ USHORT usVoltageLevel; // real voltage level
+ }SET_VOLTAGE_PARAMETERS_V2;
+
++
++typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
++{
++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
++ UCHAR ucVoltageMode; // Indicate action: Set voltage level
++ USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
++}SET_VOLTAGE_PARAMETERS_V1_3;
++
++//ucVoltageType
++#define VOLTAGE_TYPE_VDDC 1
++#define VOLTAGE_TYPE_MVDDC 2
++#define VOLTAGE_TYPE_MVDDQ 3
++#define VOLTAGE_TYPE_VDDCI 4
++
++//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
++#define ATOM_SET_VOLTAGE 0 //Set voltage Level
++#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
++#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
++#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
++#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
++
++// define vitual voltage id in usVoltageLevel
++#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
++#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
++#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
++#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
++
+ typedef struct _SET_VOLTAGE_PS_ALLOCATION
+ {
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+ }SET_VOLTAGE_PS_ALLOCATION;
+
++// New Added from SI for GetVoltageInfoTable, input parameter structure
++typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
++{
++ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
++ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
++ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
++ ULONG ulReserved;
++}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
++
++// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
++typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
++{
++ ULONG ulVotlageGpioState;
++ ULONG ulVoltageGPioMask;
++}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
++
++// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
++typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
++{
++ USHORT usVoltageLevel;
++ USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
++ ULONG ulReseved;
++}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
++
++
++// GetVoltageInfo v1.1 ucVoltageMode
++#define ATOM_GET_VOLTAGE_VID 0x00
++#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
++#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
++// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
++#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
++
++// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
++#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
++// undefined power state
++#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
++#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
++
+ /****************************************************************************/
+ // Structures used by TVEncoderControlTable
+ /****************************************************************************/
+@@ -2065,9 +2281,9 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+- USHORT DAC_Info; // Will be obsolete from R600
++ USHORT PaletteData; // Only used by BIOS
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
+- USHORT TMDS_Info; // Will be obsolete from R600
++ USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+@@ -2096,15 +2312,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+ }ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+-// For backward compatible
+-#define LVDS_Info LCD_Info
+-
+ typedef struct _ATOM_MASTER_DATA_TABLE
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+ }ATOM_MASTER_DATA_TABLE;
+
++// For backward compatible
++#define LVDS_Info LCD_Info
++#define DAC_Info PaletteData
++#define TMDS_Info DIGTransmitterInfo
+
+ /****************************************************************************/
+ // Structure used in MultimediaCapabilityInfoTable
+@@ -2171,7 +2388,9 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+ typedef struct _ATOM_FIRMWARE_CAPABILITY
+ {
+ #if ATOM_BIG_ENDIAN
+- USHORT Reserved:3;
++ USHORT Reserved:1;
++ USHORT SCL2Redefined:1;
++ USHORT PostWithoutModeSet:1;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+@@ -2193,7 +2412,9 @@ typedef struct _ATOM_FIRMWARE_CAPABILITY
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+- USHORT Reserved:3;
++ USHORT PostWithoutModeSet:1;
++ USHORT SCL2Redefined:1;
++ USHORT Reserved:1;
+ #endif
+ }ATOM_FIRMWARE_CAPABILITY;
+
+@@ -2418,7 +2639,8 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+- ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
++ UCHAR ucRemoteDisplayConfig;
++ UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+@@ -2438,6 +2660,11 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
+
+ #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
+
++
++// definition of ucRemoteDisplayConfig
++#define REMOTE_DISPLAY_DISABLE 0x00
++#define REMOTE_DISPLAY_ENABLE 0x01
++
+ /****************************************************************************/
+ // Structures used in IntegratedSystemInfoTable
+ /****************************************************************************/
+@@ -2660,8 +2887,9 @@ usMinDownStreamHTLinkWidth: same as above.
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
++#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5
+
+-#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
++#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code
+
+ #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
+ #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
+@@ -2753,6 +2981,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+ #define ASIC_INT_DIG4_ENCODER_ID 0x0b
+ #define ASIC_INT_DIG5_ENCODER_ID 0x0c
+ #define ASIC_INT_DIG6_ENCODER_ID 0x0d
++#define ASIC_INT_DIG7_ENCODER_ID 0x0e
+
+ //define Encoder attribute
+ #define ATOM_ANALOG_ENCODER 0
+@@ -3226,15 +3455,23 @@ typedef struct _ATOM_LCD_INFO_V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+- UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
++ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+- ULONG ulReserved[4];
++ UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh
++ UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h
++ UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h
++ UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h
++
++ USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode.
++ UCHAR uceDPToLVDSRxId;
++ UCHAR ucLcdReservd;
++ ULONG ulReserved[2];
+ }ATOM_LCD_INFO_V13;
+
+ #define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+@@ -3273,6 +3510,11 @@ typedef struct _ATOM_LCD_INFO_V13
+ //Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+ #define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
+
++//uceDPToLVDSRxId
++#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip
++#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init
++#define eDP_TO_LVDS_RT_ID 0x02 // RT tanslator which require AMD SW init
++
+ typedef struct _ATOM_PATCH_RECORD_MODE
+ {
+ UCHAR ucRecordType;
+@@ -3317,6 +3559,7 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+ #define LCD_CAP_RECORD_TYPE 3
+ #define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
+ #define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
++#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6
+ #define ATOM_RECORD_END_TYPE 0xFF
+
+ /****************************Spread Spectrum Info Table Definitions **********************/
+@@ -3528,6 +3771,7 @@ else //Non VGA case
+
+ CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
++/***********************************************************************************/
+ #define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
+
+ typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+@@ -3818,13 +4062,17 @@ typedef struct _EXT_DISPLAY_PATH
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+- UCHAR ucReserved;
+- USHORT usReserved[2];
++ UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
++ USHORT usCaps;
++ USHORT usReserved;
+ }EXT_DISPLAY_PATH;
+
+ #define NUMBER_OF_UCHAR_FOR_GUID 16
+ #define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
++//usCaps
++#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
++
+ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -3832,7 +4080,9 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR uc3DStereoPinId; // use for eDP panel
+- UCHAR Reserved [6]; // for potential expansion
++ UCHAR ucRemoteDisplayConfig;
++ UCHAR uceDPToLVDSRxId;
++ UCHAR Reserved[4]; // for potential expansion
+ }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+ //Related definitions, all records are different but they have a commond header
+@@ -3977,6 +4227,7 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+ #define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
+
+ // Indexes to GPIO array in GLSync record
++// GLSync record is for Frame Lock/Gen Lock feature.
+ #define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+ #define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+ #define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+@@ -3984,7 +4235,9 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+ #define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+ #define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+ #define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+-#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8
++#define ATOM_GPIO_INDEX_GLSYNC_MAX 9
+
+ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+ {
+@@ -3994,7 +4247,8 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+ }ATOM_ENCODER_DVO_CF_RECORD;
+
+ // Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+-#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
++#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
++#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
+
+ typedef struct _ATOM_ENCODER_CAP_RECORD
+ {
+@@ -4003,11 +4257,13 @@ typedef struct _ATOM_ENCODER_CAP_RECORD
+ USHORT usEncoderCap;
+ struct {
+ #if ATOM_BIG_ENDIAN
+- USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
++ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
++ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ #else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+- USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
++ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
++ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
+ #endif
+ };
+ };
+@@ -4157,6 +4413,7 @@ typedef struct _ATOM_VOLTAGE_CONTROL
+ #define VOLTAGE_CONTROL_ID_VT1556M 0x07
+ #define VOLTAGE_CONTROL_ID_CHL822x 0x08
+ #define VOLTAGE_CONTROL_ID_VT1586M 0x09
++#define VOLTAGE_CONTROL_ID_UP1637 0x0A
+
+ typedef struct _ATOM_VOLTAGE_OBJECT
+ {
+@@ -4193,6 +4450,69 @@ typedef struct _ATOM_LEAKID_VOLTAGE
+ USHORT usVoltage;
+ }ATOM_LEAKID_VOLTAGE;
+
++typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
++ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
++ UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase
++ USHORT usSize; //Size of Object
++}ATOM_VOLTAGE_OBJECT_HEADER_V3;
++
++typedef struct _VOLTAGE_LUT_ENTRY_V2
++{
++ ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
++ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
++}VOLTAGE_LUT_ENTRY_V2;
++
++typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
++{
++ USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register
++ USHORT usVoltageId;
++ USHORT usLeakageId; // The corresponding Voltage Value, in mV
++}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
++
++typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
++ UCHAR ucVoltageControlI2cLine;
++ UCHAR ucVoltageControlAddress;
++ UCHAR ucVoltageControlOffset;
++ ULONG ulReserved;
++ VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
++}ATOM_I2C_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
++ UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
++ UCHAR ucPhaseDelay; // phase delay in unit of micro second
++ UCHAR ucReserved;
++ ULONG ulGpioMaskVal; // GPIO Mask value
++ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
++}ATOM_GPIO_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucLeakageCntlId; // default is 0
++ UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
++ UCHAR ucReserved[2];
++ ULONG ulMaxVoltageLevel;
++ LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
++}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
++
++typedef union _ATOM_VOLTAGE_OBJECT_V3{
++ ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
++ ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
++ ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
++}ATOM_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control
++}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
++
+ typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+ {
+ UCHAR ucProfileId;
+@@ -4305,7 +4625,18 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+- ULONG ulReserved3[21];
++ ULONG SclkDpmBoostMargin;
++ ULONG SclkDpmThrottleMargin;
++ USHORT SclkDpmTdpLimitPG;
++ USHORT SclkDpmTdpLimitBoost;
++ ULONG ulBoostEngineCLock;
++ UCHAR ulBoostVid_2bit;
++ UCHAR EnableBoost;
++ USHORT GnbTdpLimit;
++ USHORT usMaxLVDSPclkFreqInSingleLink;
++ UCHAR ucLvdsMisc;
++ UCHAR ucLVDSReserved;
++ ULONG ulReserved3[15];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+ }ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+@@ -4313,9 +4644,16 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+ #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+ #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+-// ulOtherDisplayMisc
+-#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
++//ucLVDSMisc:
++#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01
++#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02
++#define SYS_INFO_LVDSMISC__888_BPC 0x04
++#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
++#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
+
++// not used any more
++#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
++#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08
+
+ /**********************************************************************************************************************
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+@@ -4384,7 +4722,208 @@ ucUMAChannelNumber: System memory channel numbers.
+ ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+-sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
++sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
++ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
++ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
++ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
++ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
++ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
++usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
++usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
++usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
++usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
++usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
++usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
++ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
++ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
++ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
++ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
++ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
++**********************************************************************************************************************/
++
++// this Table is used for Liano/Ontario APU
++typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
++{
++ ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
++ ULONG ulPowerplayTable[128];
++}ATOM_FUSION_SYSTEM_INFO_V1;
++/**********************************************************************************************************************
++ ATOM_FUSION_SYSTEM_INFO_V1 Description
++sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
++ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
++**********************************************************************************************************************/
++
++// this IntegrateSystemInfoTable is used for Trinity APU
++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ ULONG ulBootUpEngineClock;
++ ULONG ulDentistVCOFreq;
++ ULONG ulBootUpUMAClock;
++ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
++ ULONG ulBootUpReqDisplayVector;
++ ULONG ulOtherDisplayMisc;
++ ULONG ulGPUCapInfo;
++ ULONG ulSB_MMIO_Base_Addr;
++ USHORT usRequestedPWMFreqInHz;
++ UCHAR ucHtcTmpLmt;
++ UCHAR ucHtcHystLmt;
++ ULONG ulMinEngineClock;
++ ULONG ulSystemConfig;
++ ULONG ulCPUCapInfo;
++ USHORT usNBP0Voltage;
++ USHORT usNBP1Voltage;
++ USHORT usBootUpNBVoltage;
++ USHORT usExtDispConnInfoOffset;
++ USHORT usPanelRefreshRateRange;
++ UCHAR ucMemoryType;
++ UCHAR ucUMAChannelNumber;
++ UCHAR strVBIOSMsg[40];
++ ULONG ulReserved[20];
++ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
++ ULONG ulGMCRestoreResetTime;
++ ULONG ulMinimumNClk;
++ ULONG ulIdleNClk;
++ ULONG ulDDR_DLL_PowerUpTime;
++ ULONG ulDDR_PLL_PowerUpTime;
++ USHORT usPCIEClkSSPercentage;
++ USHORT usPCIEClkSSType;
++ USHORT usLvdsSSPercentage;
++ USHORT usLvdsSSpreadRateIn10Hz;
++ USHORT usHDMISSPercentage;
++ USHORT usHDMISSpreadRateIn10Hz;
++ USHORT usDVISSPercentage;
++ USHORT usDVISSpreadRateIn10Hz;
++ ULONG SclkDpmBoostMargin;
++ ULONG SclkDpmThrottleMargin;
++ USHORT SclkDpmTdpLimitPG;
++ USHORT SclkDpmTdpLimitBoost;
++ ULONG ulBoostEngineCLock;
++ UCHAR ulBoostVid_2bit;
++ UCHAR EnableBoost;
++ USHORT GnbTdpLimit;
++ USHORT usMaxLVDSPclkFreqInSingleLink;
++ UCHAR ucLvdsMisc;
++ UCHAR ucLVDSReserved;
++ UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
++ UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
++ UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
++ UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
++ UCHAR ucLVDSOffToOnDelay_in4Ms;
++ UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
++ UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
++ UCHAR ucLVDSReserved1;
++ ULONG ulLCDBitDepthControlVal;
++ ULONG ulNbpStateMemclkFreq[4];
++ USHORT usNBP2Voltage;
++ USHORT usNBP3Voltage;
++ ULONG ulNbpStateNClkFreq[4];
++ UCHAR ucNBDPMEnable;
++ UCHAR ucReserved[3];
++ UCHAR ucDPMState0VclkFid;
++ UCHAR ucDPMState0DclkFid;
++ UCHAR ucDPMState1VclkFid;
++ UCHAR ucDPMState1DclkFid;
++ UCHAR ucDPMState2VclkFid;
++ UCHAR ucDPMState2DclkFid;
++ UCHAR ucDPMState3VclkFid;
++ UCHAR ucDPMState3DclkFid;
++ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
++}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
++
++// ulOtherDisplayMisc
++#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
++#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02
++#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04
++#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08
++
++// ulGPUCapInfo
++#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
++#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
++#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
++
++/**********************************************************************************************************************
++ ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
++ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
++ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
++ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
++sDISPCLK_Voltage: Report Display clock voltage requirement.
++
++ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
++ ATOM_DEVICE_CRT1_SUPPORT 0x0001
++ ATOM_DEVICE_DFP1_SUPPORT 0x0008
++ ATOM_DEVICE_DFP6_SUPPORT 0x0040
++ ATOM_DEVICE_DFP2_SUPPORT 0x0080
++ ATOM_DEVICE_DFP3_SUPPORT 0x0200
++ ATOM_DEVICE_DFP4_SUPPORT 0x0400
++ ATOM_DEVICE_DFP5_SUPPORT 0x0800
++ ATOM_DEVICE_LCD1_SUPPORT 0x0002
++ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
++ =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
++ bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
++ =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
++ bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
++ =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
++ bit[3]=0: VBIOS fast boot is disable
++ =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
++ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
++ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
++ bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
++ =1: DP mode use single PLL mode
++ bit[3]=0: Enable AUX HW mode detection logic
++ =1: Disable AUX HW mode detection logic
++
++ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
++
++usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
++ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
++
++ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
++ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
++ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
++ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
++ and enabling VariBri under the driver environment from PP table is optional.
++
++ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
++ that BL control from GPU is expected.
++ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
++ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
++ it's per platform
++ and enabling VariBri under the driver environment from PP table is optional.
++
++ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
++ Threshold on value to enter HTC_active state.
++ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
++ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
++ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
++ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
++ =1: PCIE Power Gating Enabled
++ Bit[1]=0: DDR-DLL shut-down feature disabled.
++ 1: DDR-DLL shut-down feature enabled.
++ Bit[2]=0: DDR-PLL Power down feature disabled.
++ 1: DDR-PLL Power down feature enabled.
++ulCPUCapInfo: TBD
++usNBP0Voltage: VID for voltage on NB P0 State
++usNBP1Voltage: VID for voltage on NB P1 State
++usNBP2Voltage: VID for voltage on NB P2 State
++usNBP3Voltage: VID for voltage on NB P3 State
++usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
++usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
++usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
++ to indicate a range.
++ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
++ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
++ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
++ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
++ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
++ucUMAChannelNumber: System memory channel numbers.
++ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
++ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
++ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
++sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+@@ -4398,6 +4937,41 @@ usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%;
+ usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+ usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+ usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
++ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
++ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
++ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
++ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
++ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
++ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
++ =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
++ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
++ =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
++ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
++ =0 means to use VBIOS default delay which is 125 ( 500ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
++ =0 means to use VBIOS default delay which is 0 ( 0ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
++ =0 means to use VBIOS default delay which is 0 ( 0ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
++
+ **********************************************************************************************************************/
+
+ /**************************************************************************/
+@@ -4459,6 +5033,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+ #define ASIC_INTERNAL_SS_ON_DP 7
+ #define ASIC_INTERNAL_SS_ON_DCPLL 8
+ #define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
++#define ASIC_INTERNAL_VCE_SS 10
+
+ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+ {
+@@ -4520,7 +5095,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+ #define ATOM_DOS_MODE_INFO_DEF 7
+ #define ATOM_I2C_CHANNEL_STATUS_DEF 8
+ #define ATOM_I2C_CHANNEL_STATUS1_DEF 9
+-
++#define ATOM_INTERNAL_TIMER_DEF 10
+
+ // BIOS_0_SCRATCH Definition
+ #define ATOM_S0_CRT1_MONO 0x00000001L
+@@ -4648,6 +5223,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+ #define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
+ #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
+ #define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
++#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+ #define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
+ #define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
+
+@@ -5038,6 +5614,23 @@ typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+ }ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
++{
++ USHORT usHight; // Image Hight
++ USHORT usWidth; // Image Width
++ USHORT usGraphPitch;
++ UCHAR ucColorDepth;
++ UCHAR ucPixelFormat;
++ UCHAR ucSurface; // Surface 1 or 2
++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
++ UCHAR ucModeType;
++ UCHAR ucReserved;
++}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
++
++// ucEnable
++#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f
++#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10
++
+ typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+ {
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+@@ -5057,6 +5650,58 @@ typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+ USHORT usY_Size;
+ }GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
++typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
++{
++ union{
++ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
++ USHORT usSurface;
++ };
++ USHORT usY_Size;
++ USHORT usDispXStart;
++ USHORT usDispYStart;
++}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
++
++
++typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
++{
++ UCHAR ucLutId;
++ UCHAR ucAction;
++ USHORT usLutStartIndex;
++ USHORT usLutLength;
++ USHORT usLutOffsetInVram;
++}PALETTE_DATA_CONTROL_PARAMETERS_V3;
++
++// ucAction:
++#define PALETTE_DATA_AUTO_FILL 1
++#define PALETTE_DATA_READ 2
++#define PALETTE_DATA_WRITE 3
++
++
++typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
++{
++ UCHAR ucInterruptId;
++ UCHAR ucServiceId;
++ UCHAR ucStatus;
++ UCHAR ucReserved;
++}INTERRUPT_SERVICE_PARAMETER_V2;
++
++// ucInterruptId
++#define HDP1_INTERRUPT_ID 1
++#define HDP2_INTERRUPT_ID 2
++#define HDP3_INTERRUPT_ID 3
++#define HDP4_INTERRUPT_ID 4
++#define HDP5_INTERRUPT_ID 5
++#define HDP6_INTERRUPT_ID 6
++#define SW_INTERRUPT_ID 11
++
++// ucAction
++#define INTERRUPT_SERVICE_GEN_SW_INT 1
++#define INTERRUPT_SERVICE_GET_STATUS 2
++
++ // ucStatus
++#define INTERRUPT_STATUS__INT_TRIGGER 1
++#define INTERRUPT_STATUS__HPD_HIGH 2
++
+ typedef struct _INDIRECT_IO_ACCESS
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -5189,7 +5834,7 @@ typedef struct _ATOM_INIT_REG_BLOCK{
+
+ #define END_OF_REG_INDEX_BLOCK 0x0ffff
+ #define END_OF_REG_DATA_BLOCK 0x00000000
+-#define ATOM_INIT_REG_MASK_FLAG 0x80
++#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS
+ #define CLOCK_RANGE_HIGHEST 0x00ffffff
+
+ #define VALUE_DWORD SIZEOF ULONG
+@@ -5229,6 +5874,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
+ #define _128Mx8 0x51
+ #define _128Mx16 0x52
+ #define _256Mx8 0x61
++#define _256Mx16 0x62
+
+ #define SAMSUNG 0x1
+ #define INFINEON 0x2
+@@ -5585,7 +6231,7 @@ typedef struct _ATOM_VRAM_MODULE_V7
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+- USHORT usReserved;
++ USHORT usEnableChannels; // bit vector which indicate which channels are enabled
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+@@ -5597,7 +6243,8 @@ typedef struct _ATOM_VRAM_MODULE_V7
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+- UCHAR ucReserved[3];
++ USHORT usSEQSettingOffset;
++ UCHAR ucReserved;
+ // Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+@@ -5633,10 +6280,10 @@ typedef struct _ATOM_VRAM_INFO_V3
+ typedef struct _ATOM_VRAM_INFO_V4
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+- USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+- USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+- USHORT usRerseved;
+- UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
++ USHORT usRerseved;
++ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+@@ -5648,9 +6295,10 @@ typedef struct _ATOM_VRAM_INFO_V4
+ typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+- USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+- USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+- USHORT usReserved[4];
++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
++ USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
++ USHORT usReserved[3];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+@@ -5935,6 +6583,52 @@ typedef struct _ATOM_DISP_OUT_INFO_V2
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+ }ATOM_DISP_OUT_INFO_V2;
+
++
++typedef struct _ATOM_DISP_CLOCK_ID {
++ UCHAR ucPpllId;
++ UCHAR ucPpllAttribute;
++}ATOM_DISP_CLOCK_ID;
++
++// ucPpllAttribute
++#define CLOCK_SOURCE_SHAREABLE 0x01
++#define CLOCK_SOURCE_DP_MODE 0x02
++#define CLOCK_SOURCE_NONE_DP_MODE 0x04
++
++//DispOutInfoTable
++typedef struct _ASIC_TRANSMITTER_INFO_V2
++{
++ USHORT usTransmitterObjId;
++ USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object
++ UCHAR ucTransmitterCmdTblId;
++ UCHAR ucConfig;
++ UCHAR ucEncoderID; // available 1st encoder ( default )
++ UCHAR ucOptionEncoderID; // available 2nd encoder ( optional )
++ UCHAR uc2ndEncoderID;
++ UCHAR ucReserved;
++}ASIC_TRANSMITTER_INFO_V2;
++
++typedef struct _ATOM_DISP_OUT_INFO_V3
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ USHORT ptrTransmitterInfo;
++ USHORT ptrEncoderInfo;
++ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
++ USHORT usReserved;
++ UCHAR ucDCERevision;
++ UCHAR ucMaxDispEngineNum;
++ UCHAR ucMaxActiveDispEngineNum;
++ UCHAR ucMaxPPLLNum;
++ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
++ UCHAR ucReserved[3];
++ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
++}ATOM_DISP_OUT_INFO_V3;
++
++typedef enum CORE_REF_CLK_SOURCE{
++ CLOCK_SRC_XTALIN=0,
++ CLOCK_SRC_XO_IN=1,
++ CLOCK_SRC_XO_IN2=2,
++}CORE_REF_CLK_SOURCE;
++
+ // DispDevicePriorityInfo
+ typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+ {
+@@ -6070,6 +6764,39 @@ typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+ #define HW_I2C_READ 0
+ #define I2C_2BYTE_ADDR 0x02
+
++/****************************************************************************/
++// Structures used by HW_Misc_OperationTable
++/****************************************************************************/
++typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
++{
++ UCHAR ucCmd; // Input: To tell which action to take
++ UCHAR ucReserved[3];
++ ULONG ulReserved;
++}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
++
++typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
++{
++ UCHAR ucReturnCode; // Output: Return value base on action was taken
++ UCHAR ucReserved[3];
++ ULONG ulReserved;
++}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
++
++// Actions code
++#define ATOM_GET_SDI_SUPPORT 0xF0
++
++// Return code
++#define ATOM_UNKNOWN_CMD 0
++#define ATOM_FEATURE_NOT_SUPPORTED 1
++#define ATOM_FEATURE_SUPPORTED 2
++
++typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
++{
++ ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output;
++ PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved;
++}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
++
++/****************************************************************************/
++
+ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+ {
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+@@ -6090,6 +6817,52 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+ #define SELECT_CRTC_PIXEL_RATE 7
+ #define SELECT_VGA_BLK 8
+
++// DIGTransmitterInfoTable structure used to program UNIPHY settings
++typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
++ USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
++ USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
++ USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
++ USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
++}DIG_TRANSMITTER_INFO_HEADER_V3_1;
++
++typedef struct _CLOCK_CONDITION_REGESTER_INFO{
++ USHORT usRegisterIndex;
++ UCHAR ucStartBit;
++ UCHAR ucEndBit;
++}CLOCK_CONDITION_REGESTER_INFO;
++
++typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
++ USHORT usMaxClockFreq;
++ UCHAR ucEncodeMode;
++ UCHAR ucPhySel;
++ ULONG ulAnalogSetting[1];
++}CLOCK_CONDITION_SETTING_ENTRY;
++
++typedef struct _CLOCK_CONDITION_SETTING_INFO{
++ USHORT usEntrySize;
++ CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
++}CLOCK_CONDITION_SETTING_INFO;
++
++typedef struct _PHY_CONDITION_REG_VAL{
++ ULONG ulCondition;
++ ULONG ulRegVal;
++}PHY_CONDITION_REG_VAL;
++
++typedef struct _PHY_CONDITION_REG_INFO{
++ USHORT usRegIndex;
++ USHORT usSize;
++ PHY_CONDITION_REG_VAL asRegVal[1];
++}PHY_CONDITION_REG_INFO;
++
++typedef struct _PHY_ANALOG_SETTING_INFO{
++ UCHAR ucEncodeMode;
++ UCHAR ucPhySel;
++ USHORT usSize;
++ PHY_CONDITION_REG_INFO asAnalogSetting[1];
++}PHY_ANALOG_SETTING_INFO;
++
+ /****************************************************************************/
+ //Portion VI: Definitinos for vbios MC scratch registers that driver used
+ /****************************************************************************/
+@@ -7020,4 +7793,68 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+
+ #pragma pack() // BIOS data must use byte aligment
+
++//
++// AMD ACPI Table
++//
++#pragma pack(1)
++
++typedef struct {
++ ULONG Signature;
++ ULONG TableLength; //Length
++ UCHAR Revision;
++ UCHAR Checksum;
++ UCHAR OemId[6];
++ UCHAR OemTableId[8]; //UINT64 OemTableId;
++ ULONG OemRevision;
++ ULONG CreatorId;
++ ULONG CreatorRevision;
++} AMD_ACPI_DESCRIPTION_HEADER;
++/*
++//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
++typedef struct {
++ UINT32 Signature; //0x0
++ UINT32 Length; //0x4
++ UINT8 Revision; //0x8
++ UINT8 Checksum; //0x9
++ UINT8 OemId[6]; //0xA
++ UINT64 OemTableId; //0x10
++ UINT32 OemRevision; //0x18
++ UINT32 CreatorId; //0x1C
++ UINT32 CreatorRevision; //0x20
++}EFI_ACPI_DESCRIPTION_HEADER;
++*/
++typedef struct {
++ AMD_ACPI_DESCRIPTION_HEADER SHeader;
++ UCHAR TableUUID[16]; //0x24
++ ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
++ ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
++ ULONG Reserved[4]; //0x3C
++}UEFI_ACPI_VFCT;
++
++typedef struct {
++ ULONG PCIBus; //0x4C
++ ULONG PCIDevice; //0x50
++ ULONG PCIFunction; //0x54
++ USHORT VendorID; //0x58
++ USHORT DeviceID; //0x5A
++ USHORT SSVID; //0x5C
++ USHORT SSID; //0x5E
++ ULONG Revision; //0x60
++ ULONG ImageLength; //0x64
++}VFCT_IMAGE_HEADER;
++
++
++typedef struct {
++ VFCT_IMAGE_HEADER VbiosHeader;
++ UCHAR VbiosContent[1];
++}GOP_VBIOS_CONTENT;
++
++typedef struct {
++ VFCT_IMAGE_HEADER Lib1Header;
++ UCHAR Lib1Content[1];
++}GOP_LIB1_CONTENT;
++
++#pragma pack()
++
++
+ #endif /* _ATOMBIOS_H */
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 8227e76..28e69e9 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -123,21 +123,6 @@ struct radeon_device;
+ /*
+ * BIOS.
+ */
+-#define ATRM_BIOS_PAGE 4096
+-
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_atrm_supported(struct pci_dev *pdev);
+-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
+-#else
+-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
+-{
+- return false;
+-}
+-
+-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
+- return -EINVAL;
+-}
+-#endif
+ bool radeon_get_bios(struct radeon_device *rdev);
+
+
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 9d2c369..38585c5 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -446,7 +446,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+- if ((dev->pdev->device == 0x9802) &&
++ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 9d95792..2a2cf0b 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -30,56 +30,8 @@ static struct radeon_atpx_priv {
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle atpx_handle;
+- acpi_handle atrm_handle;
+ } radeon_atpx_priv;
+
+-/* retrieve the ROM in 4k blocks */
+-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+- int offset, int len)
+-{
+- acpi_status status;
+- union acpi_object atrm_arg_elements[2], *obj;
+- struct acpi_object_list atrm_arg;
+- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+-
+- atrm_arg.count = 2;
+- atrm_arg.pointer = &atrm_arg_elements[0];
+-
+- atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+- atrm_arg_elements[0].integer.value = offset;
+-
+- atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+- atrm_arg_elements[1].integer.value = len;
+-
+- status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+- if (ACPI_FAILURE(status)) {
+- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+- return -ENODEV;
+- }
+-
+- obj = (union acpi_object *)buffer.pointer;
+- memcpy(bios+offset, obj->buffer.pointer, len);
+- kfree(buffer.pointer);
+- return len;
+-}
+-
+-bool radeon_atrm_supported(struct pci_dev *pdev)
+-{
+- /* get the discrete ROM only via ATRM */
+- if (!radeon_atpx_priv.atpx_detected)
+- return false;
+-
+- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+- return false;
+- return true;
+-}
+-
+-
+-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
+-{
+- return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
+-}
+-
+ static int radeon_atpx_get_version(acpi_handle handle)
+ {
+ acpi_status status;
+@@ -197,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+
+ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ {
+- acpi_handle dhandle, atpx_handle, atrm_handle;
++ acpi_handle dhandle, atpx_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+@@ -208,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ if (ACPI_FAILURE(status))
+ return false;
+
+- status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+- if (ACPI_FAILURE(status))
+- return false;
+-
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx_handle = atpx_handle;
+- radeon_atpx_priv.atrm_handle = atrm_handle;
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 229a20f..d306cc8 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -32,6 +32,7 @@
+
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
++#include <linux/acpi.h>
+ /*
+ * BIOS.
+ */
+@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ return true;
+ }
+
++#ifdef CONFIG_ACPI
+ /* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
++/* retrieve the ROM in 4k blocks */
++#define ATRM_BIOS_PAGE 4096
++/**
++ * radeon_atrm_call - fetch a chunk of the vbios
++ *
++ * @atrm_handle: acpi ATRM handle
++ * @bios: vbios image pointer
++ * @offset: offset of vbios image data to fetch
++ * @len: length of vbios image data to fetch
++ *
++ * Executes ATRM to fetch a chunk of the discrete
++ * vbios image on PX systems (all asics).
++ * Returns the length of the buffer fetched.
++ */
++static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
++ int offset, int len)
++{
++ acpi_status status;
++ union acpi_object atrm_arg_elements[2], *obj;
++ struct acpi_object_list atrm_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
++
++ atrm_arg.count = 2;
++ atrm_arg.pointer = &atrm_arg_elements[0];
++
++ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[0].integer.value = offset;
++
++ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[1].integer.value = len;
++
++ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
++ if (ACPI_FAILURE(status)) {
++ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
++ return -ENODEV;
++ }
++
++ obj = (union acpi_object *)buffer.pointer;
++ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
++ len = obj->buffer.length;
++ kfree(buffer.pointer);
++ return len;
++}
++
+ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ {
+ int ret;
+ int size = 256 * 1024;
+ int i;
++ struct pci_dev *pdev = NULL;
++ acpi_handle dhandle, atrm_handle;
++ acpi_status status;
++ bool found = false;
++
++ /* ATRM is for the discrete card only */
++ if (rdev->flags & RADEON_IS_IGP)
++ return false;
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
+
+- if (!radeon_atrm_supported(rdev->pdev))
++ if (!found)
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+@@ -117,10 +183,11 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+- ret = radeon_atrm_get_bios_chunk(rdev->bios,
+- (i * ATRM_BIOS_PAGE),
+- ATRM_BIOS_PAGE);
+- if (ret <= 0)
++ ret = radeon_atrm_call(atrm_handle,
++ rdev->bios,
++ (i * ATRM_BIOS_PAGE),
++ ATRM_BIOS_PAGE);
++ if (ret < ATRM_BIOS_PAGE)
+ break;
+ }
+
+@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+ return true;
+ }
++#else
++static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
++{
++ return false;
++}
++#endif
+
+ static bool ni_read_disabled_bios(struct radeon_device *rdev)
+ {
+@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+ return legacy_read_disabled_bios(rdev);
+ }
+
++#ifdef CONFIG_ACPI
++static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
++{
++ bool ret = false;
++ struct acpi_table_header *hdr;
++ acpi_size tbl_size;
++ UEFI_ACPI_VFCT *vfct;
++ GOP_VBIOS_CONTENT *vbios;
++ VFCT_IMAGE_HEADER *vhdr;
++
++ if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
++ return false;
++ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
++ goto out_unmap;
++ }
++
++ vfct = (UEFI_ACPI_VFCT *)hdr;
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
++ goto out_unmap;
++ }
++
++ vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
++ vhdr = &vbios->VbiosHeader;
++ DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
++ vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
++ vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
++
++ if (vhdr->PCIBus != rdev->pdev->bus->number ||
++ vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
++ vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
++ vhdr->VendorID != rdev->pdev->vendor ||
++ vhdr->DeviceID != rdev->pdev->device) {
++ DRM_INFO("ACPI VFCT table is not for this card\n");
++ goto out_unmap;
++ };
++
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
++ DRM_ERROR("ACPI VFCT image truncated\n");
++ goto out_unmap;
++ }
++
++ rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
++ ret = !!rdev->bios;
++
++out_unmap:
++ return ret;
++}
++#else
++static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
++{
++ return false;
++}
++#endif
+
+ bool radeon_get_bios(struct radeon_device *rdev)
+ {
+@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
+
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
++ r = radeon_acpi_vfct_bios(rdev);
++ if (r == false)
+ r = igp_read_bios_from_vram(rdev);
+ if (r == false)
+ r = radeon_read_bios(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 39497c7..f3ae607 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -117,6 +117,7 @@ int radeon_bo_create(struct radeon_device *rdev,
+ return -ENOMEM;
+ }
+
++retry:
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+@@ -129,8 +130,6 @@ int radeon_bo_create(struct radeon_device *rdev,
+ bo->gem_base.driver_private = NULL;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+-
+-retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index b99af34..a2abb8e 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ static const struct hid_device_id ch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ch_devices);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 95430a0..5cc029f 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1398,12 +1398,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index 2f0be4c..9e43aac 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
++ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
+ .driver_data = CP_2WHEEL_MOUSE_HACK },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 7db934d..e4317a2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -196,6 +196,7 @@
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
+ #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
++#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
+
+ #define USB_VENDOR_ID_CHUNGHWAT 0x2247
+ #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
+@@ -225,6 +226,7 @@
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
++#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
+ #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
+
+ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 0bfa545..c76b051 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -568,24 +568,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ scmnd->sc_data_direction);
+ }
+
+-static void srp_remove_req(struct srp_target_port *target,
+- struct srp_request *req, s32 req_lim_delta)
++/**
++ * srp_claim_req - Take ownership of the scmnd associated with a request.
++ * @target: SRP target port.
++ * @req: SRP request.
++ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
++ * ownership of @req->scmnd if it equals @scmnd.
++ *
++ * Return value:
++ * Either NULL or a pointer to the SCSI command the caller became owner of.
++ */
++static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
++ struct srp_request *req,
++ struct scsi_cmnd *scmnd)
+ {
+ unsigned long flags;
+
+- srp_unmap_data(req->scmnd, target, req);
++ spin_lock_irqsave(&target->lock, flags);
++ if (!scmnd) {
++ scmnd = req->scmnd;
++ req->scmnd = NULL;
++ } else if (req->scmnd == scmnd) {
++ req->scmnd = NULL;
++ } else {
++ scmnd = NULL;
++ }
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ return scmnd;
++}
++
++/**
++ * srp_free_req() - Unmap data and add request to the free request list.
++ */
++static void srp_free_req(struct srp_target_port *target,
++ struct srp_request *req, struct scsi_cmnd *scmnd,
++ s32 req_lim_delta)
++{
++ unsigned long flags;
++
++ srp_unmap_data(scmnd, target, req);
++
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += req_lim_delta;
+- req->scmnd = NULL;
+ list_add_tail(&req->list, &target->free_reqs);
+ spin_unlock_irqrestore(&target->lock, flags);
+ }
+
+ static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
+ {
+- req->scmnd->result = DID_RESET << 16;
+- req->scmnd->scsi_done(req->scmnd);
+- srp_remove_req(target, req, 0);
++ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
++
++ if (scmnd) {
++ scmnd->result = DID_RESET << 16;
++ scmnd->scsi_done(scmnd);
++ srp_free_req(target, req, scmnd, 0);
++ }
+ }
+
+ static int srp_reconnect_target(struct srp_target_port *target)
+@@ -1055,11 +1093,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ complete(&target->tsk_mgmt_done);
+ } else {
+ req = &target->req_ring[rsp->tag];
+- scmnd = req->scmnd;
+- if (!scmnd)
++ scmnd = srp_claim_req(target, req, NULL);
++ if (!scmnd) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Null scmnd for RSP w/tag %016llx\n",
+ (unsigned long long) rsp->tag);
++
++ spin_lock_irqsave(&target->lock, flags);
++ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ return;
++ }
+ scmnd->result = rsp->status;
+
+ if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
+@@ -1074,7 +1119,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
+
+- srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
++ srp_free_req(target, req, scmnd,
++ be32_to_cpu(rsp->req_lim_delta));
++
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done(scmnd);
+ }
+@@ -1613,25 +1660,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ {
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
+- int ret = SUCCESS;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+
+- if (!req || target->qp_in_error)
+- return FAILED;
+- if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+- SRP_TSK_ABORT_TASK))
++ if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
+ return FAILED;
++ srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
++ SRP_TSK_ABORT_TASK);
++ srp_free_req(target, req, scmnd, 0);
++ scmnd->result = DID_ABORT << 16;
+
+- if (req->scmnd) {
+- if (!target->tsk_mgmt_status) {
+- srp_remove_req(target, req, 0);
+- scmnd->result = DID_ABORT << 16;
+- } else
+- ret = FAILED;
+- }
+-
+- return ret;
++ return SUCCESS;
+ }
+
+ static int srp_reset_device(struct scsi_cmnd *scmnd)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d8646d7..2887f22 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1144,8 +1144,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ ret = 0;
+ }
+ rdev->sectors = rdev->sb_start;
+- /* Limit to 4TB as metadata cannot record more than that */
+- if (rdev->sectors >= (2ULL << 32))
++ /* Limit to 4TB as metadata cannot record more than that.
++ * (not needed for Linear and RAID0 as metadata doesn't
++ * record this size)
++ */
++ if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
+ rdev->sectors = (2ULL << 32) - 2;
+
+ if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
+@@ -1427,7 +1430,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+ /* Limit to 4TB as metadata cannot record more than that.
+ * 4TB == 2^32 KB, or 2*2^32 sectors.
+ */
+- if (num_sectors >= (2ULL << 32))
++ if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
+ num_sectors = (2ULL << 32) - 2;
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
+index fb68805..027550d 100644
+--- a/drivers/media/dvb/siano/smsusb.c
++++ b/drivers/media/dvb/siano/smsusb.c
+@@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)
+ return 0;
+ }
+
+-static const struct usb_device_id smsusb_id_table[] __devinitconst = {
++static const struct usb_device_id smsusb_id_table[] = {
+ { USB_DEVICE(0x187f, 0x0010),
+ .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+ { USB_DEVICE(0x187f, 0x0100),
+diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
+index 89fec4c..731cd16 100644
+--- a/drivers/media/video/gspca/spca506.c
++++ b/drivers/media/video/gspca/spca506.c
+@@ -685,7 +685,7 @@ static const struct sd_desc sd_desc = {
+ };
+
+ /* -- module initialisation -- */
+-static const struct usb_device_id device_table[] __devinitconst = {
++static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x06e1, 0xa190)},
+ /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
+ {USB_DEVICE(0x0733, 0x0430)}, */
+diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
+index 17bbacb..cc2ae7e 100644
+--- a/drivers/misc/sgi-xp/xpc_uv.c
++++ b/drivers/misc/sgi-xp/xpc_uv.c
+@@ -18,6 +18,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
+ #include <asm/uv/uv_hub.h>
+@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
+ XPC_NOTIFY_MSG_SIZE_UV)
+ #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+
++static int xpc_mq_node = -1;
++
+ static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
+ static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
+
+@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
+ #if defined CONFIG_X86_64
+ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
+ UV_AFFINITY_CPU);
+- if (mq->irq < 0) {
+- dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
+- -mq->irq);
++ if (mq->irq < 0)
+ return mq->irq;
+- }
+
+ mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
+
+@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
+ mq->mmr_blade = uv_cpu_to_blade_id(cpu);
+
+ nid = cpu_to_node(cpu);
+- page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+- pg_order);
++ page = alloc_pages_exact_node(nid,
++ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
++ pg_order);
+ if (page == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
+ "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
+@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
+ };
+
++static int
++xpc_init_mq_node(int nid)
++{
++ int cpu;
++
++ get_online_cpus();
++
++ for_each_cpu(cpu, cpumask_of_node(nid)) {
++ xpc_activate_mq_uv =
++ xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
++ XPC_ACTIVATE_IRQ_NAME,
++ xpc_handle_activate_IRQ_uv);
++ if (!IS_ERR(xpc_activate_mq_uv))
++ break;
++ }
++ if (IS_ERR(xpc_activate_mq_uv)) {
++ put_online_cpus();
++ return PTR_ERR(xpc_activate_mq_uv);
++ }
++
++ for_each_cpu(cpu, cpumask_of_node(nid)) {
++ xpc_notify_mq_uv =
++ xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
++ XPC_NOTIFY_IRQ_NAME,
++ xpc_handle_notify_IRQ_uv);
++ if (!IS_ERR(xpc_notify_mq_uv))
++ break;
++ }
++ if (IS_ERR(xpc_notify_mq_uv)) {
++ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
++ put_online_cpus();
++ return PTR_ERR(xpc_notify_mq_uv);
++ }
++
++ put_online_cpus();
++ return 0;
++}
++
+ int
+ xpc_init_uv(void)
+ {
++ int nid;
++ int ret = 0;
++
+ xpc_arch_ops = xpc_arch_ops_uv;
+
+ if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
+@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
+ return -E2BIG;
+ }
+
+- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
+- XPC_ACTIVATE_IRQ_NAME,
+- xpc_handle_activate_IRQ_uv);
+- if (IS_ERR(xpc_activate_mq_uv))
+- return PTR_ERR(xpc_activate_mq_uv);
++ if (xpc_mq_node < 0)
++ for_each_online_node(nid) {
++ ret = xpc_init_mq_node(nid);
+
+- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
+- XPC_NOTIFY_IRQ_NAME,
+- xpc_handle_notify_IRQ_uv);
+- if (IS_ERR(xpc_notify_mq_uv)) {
+- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+- return PTR_ERR(xpc_notify_mq_uv);
+- }
++ if (!ret)
++ break;
++ }
++ else
++ ret = xpc_init_mq_node(xpc_mq_node);
+
+- return 0;
++ if (ret < 0)
++ dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
++ -ret);
++
++ return ret;
+ }
+
+ void
+@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
+ xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ }
++
++module_param(xpc_mq_node, int, 0);
++MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index e888202..01b104e 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -652,7 +652,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ flags);
+ dev_put(nt->np.dev);
+ nt->np.dev = NULL;
+- netconsole_target_put(nt);
+ }
+ nt->enabled = 0;
+ stopped = true;
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index e6d791c..b4cbc82 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -1782,7 +1782,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ int retval;
+- bool decrypt_error = false;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+@@ -1804,6 +1803,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+ tsf_lower = tsf & 0xffffffff;
+
+ do {
++ bool decrypt_error = false;
+ /* If handling rx interrupt and flush is in progress => exit */
+ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
+ break;
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 9b60968..8a009bc 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");
+ * whenever you add a new device.
+ */
+
+-static struct usb_device_id p54u_table[] __devinitdata = {
++static struct usb_device_id p54u_table[] = {
+ /* Version 1 devices (pci chip + net2280) */
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+index 4a78f9e..4e98c39 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+@@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+ MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
+ MODULE_LICENSE("GPL");
+
+-static struct usb_device_id rtl8187_table[] __devinitdata = {
++static struct usb_device_id rtl8187_table[] = {
+ /* Asus */
+ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
+ /* Belkin */
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index d024f83..68af94c 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -952,6 +952,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
+ if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+
++ /*
++ * The reason for doing this here is the same as for the analogous code
++ * in pci_pm_suspend_noirq().
++ */
++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index b0859d4..ec5b17f 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -86,6 +86,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, 0x8A, { KEY_PROG1 } },
+ { KE_KEY, 0x95, { KEY_MEDIA } },
+ { KE_KEY, 0x99, { KEY_PHONE } },
++ { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
++ { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
++ { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
++ { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ { KE_KEY, 0xb5, { KEY_CALC } },
+ { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
+ { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 30d2072..33471e1 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -439,6 +439,9 @@ static void tsi721_db_dpc(struct work_struct *work)
+ " info %4.4x\n", DBELL_SID(idb.bytes),
+ DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+ }
++
++ wr_ptr = ioread32(priv->regs +
++ TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+ }
+
+ iowrite32(rd_ptr & (IDB_QSIZE - 1),
+@@ -449,6 +452,10 @@ static void tsi721_db_dpc(struct work_struct *work)
+ regval |= TSI721_SR_CHINT_IDBQRCV;
+ iowrite32(regval,
+ priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
++
++ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
++ if (wr_ptr != rd_ptr)
++ schedule_work(&priv->idb_work);
+ }
+
+ /**
+@@ -2155,7 +2162,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+ struct tsi721_device *priv;
+- int i, cap;
++ int cap;
+ int err;
+ u32 regval;
+
+@@ -2175,12 +2182,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
+ priv->pdev = pdev;
+
+ #ifdef DEBUG
++ {
++ int i;
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
+ i, (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long)pci_resource_len(pdev, i),
+ pci_resource_flags(pdev, i));
+ }
++ }
+ #endif
+ /*
+ * Verify BAR configuration
+diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
+index 971bc8e..11bcb20 100644
+--- a/drivers/rtc/rtc-rs5c348.c
++++ b/drivers/rtc/rtc-rs5c348.c
+@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
+ tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
+ if (!pdata->rtc_24h) {
+- tm->tm_hour %= 12;
+- if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
++ if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
++ tm->tm_hour -= 20;
++ tm->tm_hour %= 12;
+ tm->tm_hour += 12;
++ } else
++ tm->tm_hour %= 12;
+ }
+ tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
+ tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
+diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
+index 8be5604..0d70f68 100644
+--- a/drivers/staging/speakup/main.c
++++ b/drivers/staging/speakup/main.c
+@@ -1854,7 +1854,7 @@ static void speakup_bits(struct vc_data *vc)
+
+ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
+ {
+- static u_char *goto_buf = "\0\0\0\0\0\0";
++ static u_char goto_buf[8];
+ static int num;
+ int maxlen, go_pos;
+ char *cp;
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 27521b6..ae62d57 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -222,7 +222,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
+ // Static vars definitions
+ //
+
+-static struct usb_device_id vt6656_table[] __devinitdata = {
++static struct usb_device_id vt6656_table[] = {
+ {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
+ {}
+ };
+diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
+index f958eb4..3f0ce2b 100644
+--- a/drivers/staging/winbond/wbusb.c
++++ b/drivers/staging/winbond/wbusb.c
+@@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("0.1");
+
+-static const struct usb_device_id wb35_table[] __devinitconst = {
++static const struct usb_device_id wb35_table[] = {
+ { USB_DEVICE(0x0416, 0x0035) },
+ { USB_DEVICE(0x18E8, 0x6201) },
+ { USB_DEVICE(0x18E8, 0x6206) },
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 94c03d2..597fb9b 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3509,9 +3509,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
+ return 0;
+
+ out:
+- while (i >= 0) {
+- __free_page(sg_page(&cmd->t_data_sg[i]));
++ while (i > 0) {
+ i--;
++ __free_page(sg_page(&cmd->t_data_sg[i]));
+ }
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = NULL;
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 5acd24a..086f7fe 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -1407,10 +1407,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
+ static int pmz_poll_get_char(struct uart_port *port)
+ {
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
++ int tries = 2;
+
+- while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
+- udelay(5);
+- return read_zsdata(uap);
++ while (tries) {
++ if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0)
++ return read_zsdata(uap);
++ if (tries--)
++ udelay(5);
++ }
++
++ return NO_POLL_CHAR;
+ }
+
+ static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 1094469..dbf7d20 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1043,7 +1043,8 @@ skip_normal_probe:
+ }
+
+
+- if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
++ if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
++ control_interface->cur_altsetting->desc.bNumEndpoints == 0)
+ return -EINVAL;
+
+ epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 4e1f0aa..9a2a1ae 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net)
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ struct gether *link = dev->port_usb;
++ const struct usb_endpoint_descriptor *in;
++ const struct usb_endpoint_descriptor *out;
+
+ if (link->close)
+ link->close(link);
+@@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net)
+ * their own pace; the network stack can handle old packets.
+ * For the moment we leave this here, since it works.
+ */
++ in = link->in_ep->desc;
++ out = link->out_ep->desc;
+ usb_ep_disable(link->in_ep);
+ usb_ep_disable(link->out_ep);
+ if (netif_carrier_ok(net)) {
+ DBG(dev, "host still using in/out endpoints\n");
++ link->in_ep->desc = in;
++ link->out_ep->desc = out;
+ usb_ep_enable(link->in_ep);
+ usb_ep_enable(link->out_ep);
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index daf5754..07c72a4 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -95,6 +95,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 05f82e9..f7c0a2a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -163,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+
+ ret = handshake(xhci, &xhci->op_regs->command,
+- CMD_RESET, 0, 250 * 1000);
++ CMD_RESET, 0, 10 * 1000 * 1000);
+ if (ret)
+ return ret;
+
+@@ -172,7 +172,8 @@ int xhci_reset(struct xhci_hcd *xhci)
+ * xHCI cannot write to any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+- return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
++ return handshake(xhci, &xhci->op_regs->status,
++ STS_CNR, 0, 10 * 1000 * 1000);
+ }
+
+ #ifdef CONFIG_PCI
+diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
+index fc15ad4..723e833 100644
+--- a/drivers/usb/misc/emi62.c
++++ b/drivers/usb/misc/emi62.c
+@@ -259,7 +259,7 @@ wraperr:
+ return err;
+ }
+
+-static const struct usb_device_id id_table[] __devinitconst = {
++static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 4045e39..b3182bb 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+ { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+ { USB_DEVICE(PI_VID, PI_E861_PID) },
++ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index d27d7d7..54b4258 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -795,6 +795,13 @@
+ #define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
+
+ /*
++ * Kondo Kagaku Co.Ltd.
++ * http://www.kondo-robot.com/EN
++ */
++#define KONDO_VID 0x165c
++#define KONDO_USB_SERIAL_PID 0x0002
++
++/*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+ */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5c7d654..b150ed9 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1191,9 +1191,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
+ }
+
+ spin_lock_irqsave(&mos7840_port->pool_lock, flags);
+- for (i = 0; i < NUM_URBS; ++i)
+- if (mos7840_port->busy[i])
+- chars += URB_TRANSFER_BUFFER_SIZE;
++ for (i = 0; i < NUM_URBS; ++i) {
++ if (mos7840_port->busy[i]) {
++ struct urb *urb = mos7840_port->write_urb_pool[i];
++ chars += urb->transfer_buffer_length;
++ }
++ }
+ spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
+ dbg("%s - returns %d", __func__, chars);
+ return chars;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d89aac1..113560d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -80,84 +80,9 @@ static void option_instat_callback(struct urb *urb);
+ #define OPTION_PRODUCT_GTM380_MODEM 0x7201
+
+ #define HUAWEI_VENDOR_ID 0x12D1
+-#define HUAWEI_PRODUCT_E600 0x1001
+-#define HUAWEI_PRODUCT_E220 0x1003
+-#define HUAWEI_PRODUCT_E220BIS 0x1004
+-#define HUAWEI_PRODUCT_E1401 0x1401
+-#define HUAWEI_PRODUCT_E1402 0x1402
+-#define HUAWEI_PRODUCT_E1403 0x1403
+-#define HUAWEI_PRODUCT_E1404 0x1404
+-#define HUAWEI_PRODUCT_E1405 0x1405
+-#define HUAWEI_PRODUCT_E1406 0x1406
+-#define HUAWEI_PRODUCT_E1407 0x1407
+-#define HUAWEI_PRODUCT_E1408 0x1408
+-#define HUAWEI_PRODUCT_E1409 0x1409
+-#define HUAWEI_PRODUCT_E140A 0x140A
+-#define HUAWEI_PRODUCT_E140B 0x140B
+-#define HUAWEI_PRODUCT_E140C 0x140C
+-#define HUAWEI_PRODUCT_E140D 0x140D
+-#define HUAWEI_PRODUCT_E140E 0x140E
+-#define HUAWEI_PRODUCT_E140F 0x140F
+-#define HUAWEI_PRODUCT_E1410 0x1410
+-#define HUAWEI_PRODUCT_E1411 0x1411
+-#define HUAWEI_PRODUCT_E1412 0x1412
+-#define HUAWEI_PRODUCT_E1413 0x1413
+-#define HUAWEI_PRODUCT_E1414 0x1414
+-#define HUAWEI_PRODUCT_E1415 0x1415
+-#define HUAWEI_PRODUCT_E1416 0x1416
+-#define HUAWEI_PRODUCT_E1417 0x1417
+-#define HUAWEI_PRODUCT_E1418 0x1418
+-#define HUAWEI_PRODUCT_E1419 0x1419
+-#define HUAWEI_PRODUCT_E141A 0x141A
+-#define HUAWEI_PRODUCT_E141B 0x141B
+-#define HUAWEI_PRODUCT_E141C 0x141C
+-#define HUAWEI_PRODUCT_E141D 0x141D
+-#define HUAWEI_PRODUCT_E141E 0x141E
+-#define HUAWEI_PRODUCT_E141F 0x141F
+-#define HUAWEI_PRODUCT_E1420 0x1420
+-#define HUAWEI_PRODUCT_E1421 0x1421
+-#define HUAWEI_PRODUCT_E1422 0x1422
+-#define HUAWEI_PRODUCT_E1423 0x1423
+-#define HUAWEI_PRODUCT_E1424 0x1424
+-#define HUAWEI_PRODUCT_E1425 0x1425
+-#define HUAWEI_PRODUCT_E1426 0x1426
+-#define HUAWEI_PRODUCT_E1427 0x1427
+-#define HUAWEI_PRODUCT_E1428 0x1428
+-#define HUAWEI_PRODUCT_E1429 0x1429
+-#define HUAWEI_PRODUCT_E142A 0x142A
+-#define HUAWEI_PRODUCT_E142B 0x142B
+-#define HUAWEI_PRODUCT_E142C 0x142C
+-#define HUAWEI_PRODUCT_E142D 0x142D
+-#define HUAWEI_PRODUCT_E142E 0x142E
+-#define HUAWEI_PRODUCT_E142F 0x142F
+-#define HUAWEI_PRODUCT_E1430 0x1430
+-#define HUAWEI_PRODUCT_E1431 0x1431
+-#define HUAWEI_PRODUCT_E1432 0x1432
+-#define HUAWEI_PRODUCT_E1433 0x1433
+-#define HUAWEI_PRODUCT_E1434 0x1434
+-#define HUAWEI_PRODUCT_E1435 0x1435
+-#define HUAWEI_PRODUCT_E1436 0x1436
+-#define HUAWEI_PRODUCT_E1437 0x1437
+-#define HUAWEI_PRODUCT_E1438 0x1438
+-#define HUAWEI_PRODUCT_E1439 0x1439
+-#define HUAWEI_PRODUCT_E143A 0x143A
+-#define HUAWEI_PRODUCT_E143B 0x143B
+-#define HUAWEI_PRODUCT_E143C 0x143C
+-#define HUAWEI_PRODUCT_E143D 0x143D
+-#define HUAWEI_PRODUCT_E143E 0x143E
+-#define HUAWEI_PRODUCT_E143F 0x143F
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+-#define HUAWEI_PRODUCT_E14AC 0x14AC
+-#define HUAWEI_PRODUCT_K3806 0x14AE
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+-#define HUAWEI_PRODUCT_K3770 0x14C9
+-#define HUAWEI_PRODUCT_K3771 0x14CA
+-#define HUAWEI_PRODUCT_K4510 0x14CB
+-#define HUAWEI_PRODUCT_K4511 0x14CC
+-#define HUAWEI_PRODUCT_ETS1220 0x1803
+-#define HUAWEI_PRODUCT_E353 0x1506
+-#define HUAWEI_PRODUCT_E173S 0x1C05
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -614,101 +539,123 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
++
++
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 8745637..bf9a9b7 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -373,8 +373,15 @@ static void fb_flashcursor(struct work_struct *work)
+ struct vc_data *vc = NULL;
+ int c;
+ int mode;
++ int ret;
++
++ /* FIXME: we should sort out the unbind locking instead */
++ /* instead we just fail to flash the cursor if we can't get
++ * the lock instead of blocking fbcon deinit */
++ ret = console_trylock();
++ if (ret == 0)
++ return;
+
+- console_lock();
+ if (ops && ops->currcon != -1)
+ vc = vc_cons[ops->currcon].d;
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 4115eca..19a4f0b 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -964,7 +964,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
+ /*
+ * Initialise the state of a blockdev page's buffers.
+ */
+-static void
++static sector_t
+ init_page_buffers(struct page *page, struct block_device *bdev,
+ sector_t block, int size)
+ {
+@@ -986,33 +986,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
+ block++;
+ bh = bh->b_this_page;
+ } while (bh != head);
++
++ /*
++ * Caller needs to validate requested block against end of device.
++ */
++ return end_block;
+ }
+
+ /*
+ * Create the page-cache page that contains the requested block.
+ *
+- * This is user purely for blockdev mappings.
++ * This is used purely for blockdev mappings.
+ */
+-static struct page *
++static int
+ grow_dev_page(struct block_device *bdev, sector_t block,
+- pgoff_t index, int size)
++ pgoff_t index, int size, int sizebits)
+ {
+ struct inode *inode = bdev->bd_inode;
+ struct page *page;
+ struct buffer_head *bh;
++ sector_t end_block;
++ int ret = 0; /* Will call free_more_memory() */
+
+ page = find_or_create_page(inode->i_mapping, index,
+ (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+ if (!page)
+- return NULL;
++ return ret;
+
+ BUG_ON(!PageLocked(page));
+
+ if (page_has_buffers(page)) {
+ bh = page_buffers(page);
+ if (bh->b_size == size) {
+- init_page_buffers(page, bdev, block, size);
+- return page;
++ end_block = init_page_buffers(page, bdev,
++ index << sizebits, size);
++ goto done;
+ }
+ if (!try_to_free_buffers(page))
+ goto failed;
+@@ -1032,15 +1040,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ */
+ spin_lock(&inode->i_mapping->private_lock);
+ link_dev_buffers(page, bh);
+- init_page_buffers(page, bdev, block, size);
++ end_block = init_page_buffers(page, bdev, index << sizebits, size);
+ spin_unlock(&inode->i_mapping->private_lock);
+- return page;
+-
++done:
++ ret = (block < end_block) ? 1 : -ENXIO;
+ failed:
+- BUG();
+ unlock_page(page);
+ page_cache_release(page);
+- return NULL;
++ return ret;
+ }
+
+ /*
+@@ -1050,7 +1057,6 @@ failed:
+ static int
+ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ {
+- struct page *page;
+ pgoff_t index;
+ int sizebits;
+
+@@ -1074,22 +1080,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ bdevname(bdev, b));
+ return -EIO;
+ }
+- block = index << sizebits;
++
+ /* Create a page with the proper size buffers.. */
+- page = grow_dev_page(bdev, block, index, size);
+- if (!page)
+- return 0;
+- unlock_page(page);
+- page_cache_release(page);
+- return 1;
++ return grow_dev_page(bdev, block, index, size, sizebits);
+ }
+
+ static struct buffer_head *
+ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ {
+- int ret;
+- struct buffer_head *bh;
+-
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+@@ -1102,21 +1100,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ return NULL;
+ }
+
+-retry:
+- bh = __find_get_block(bdev, block, size);
+- if (bh)
+- return bh;
++ for (;;) {
++ struct buffer_head *bh;
++ int ret;
+
+- ret = grow_buffers(bdev, block, size);
+- if (ret == 0) {
+- free_more_memory();
+- goto retry;
+- } else if (ret > 0) {
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
++
++ ret = grow_buffers(bdev, block, size);
++ if (ret < 0)
++ return NULL;
++ if (ret == 0)
++ free_more_memory();
+ }
+- return NULL;
+ }
+
+ /*
+@@ -1372,10 +1369,6 @@ EXPORT_SYMBOL(__find_get_block);
+ * which corresponds to the passed block_device, block and size. The
+ * returned buffer has its reference count incremented.
+ *
+- * __getblk() cannot fail - it just keeps trying. If you pass it an
+- * illegal block number, __getblk() will happily return a buffer_head
+- * which represents the non-existent block. Very weird.
+- *
+ * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
+ * attempt is failing. FIXME, perhaps?
+ */
+diff --git a/fs/compat.c b/fs/compat.c
+index c987875..e07a3d3 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -1174,11 +1174,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
+ struct file *file;
+ int fput_needed;
+ ssize_t ret;
++ loff_t pos;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+- ret = compat_readv(file, vec, vlen, &file->f_pos);
++ pos = file->f_pos;
++ ret = compat_readv(file, vec, vlen, &pos);
++ file->f_pos = pos;
+ fput_light(file, fput_needed);
+ return ret;
+ }
+@@ -1233,11 +1236,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
+ struct file *file;
+ int fput_needed;
+ ssize_t ret;
++ loff_t pos;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+- ret = compat_writev(file, vec, vlen, &file->f_pos);
++ pos = file->f_pos;
++ ret = compat_writev(file, vec, vlen, &pos);
++ file->f_pos = pos;
+ fput_light(file, fput_needed);
+ return ret;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a071348..f8d5fce 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -904,6 +904,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_allocated_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
++ ei->i_da_metadata_calc_last_lblock = 0;
+ spin_lock_init(&(ei->i_block_reservation_lock));
+ #ifdef CONFIG_QUOTA
+ ei->i_reserved_quota = 0;
+@@ -3107,6 +3108,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ int s, j, count = 0;
+
++ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
++ return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
++ sbi->s_itb_per_group + 2);
++
+ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+ (grp * EXT4_BLOCKS_PER_GROUP(sb));
+ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 0c84100..5242006 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1687,7 +1687,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
+ size_t n;
+ u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
+
+- for (n = 0; n < count; n++) {
++ for (n = 0; n < count; n++, iov++) {
+ if (iov->iov_len > (size_t) max)
+ return -ENOMEM;
+ max -= iov->iov_len;
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 3db6b82..d774309 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -38,6 +38,8 @@
+ #include <linux/buffer_head.h> /* various write calls */
+ #include <linux/prefetch.h>
+
++#include "../pnfs.h"
++#include "../internal.h"
+ #include "blocklayout.h"
+
+ #define NFSDBG_FACILITY NFSDBG_PNFS_LD
+@@ -814,7 +816,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
++ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s max_resp_sz %u max_pages %d\n",
+ __func__, max_resp_sz, max_pages);
+
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index c69682a..4e2ee99 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -153,7 +153,7 @@ static int _preload_range(struct pnfs_inval_markings *marks,
+ count = (int)(end - start) / (int)tree->mtt_step_size;
+
+ /* Pre-malloc what memory we might need */
+- storage = kmalloc(sizeof(*storage) * count, GFP_NOFS);
++ storage = kcalloc(count, sizeof(*storage), GFP_NOFS);
+ if (!storage)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index ac28990..756f4df 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1103,7 +1103,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct nfs_fattr *fattr = NULL;
+ int error;
+
+- if (nd->flags & LOOKUP_RCU)
++ if (nd && (nd->flags & LOOKUP_RCU))
+ return -ECHILD;
+
+ parent = dget_parent(dentry);
+@@ -1508,7 +1508,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct nfs_open_context *ctx;
+ int openflags, ret = 0;
+
+- if (nd->flags & LOOKUP_RCU)
++ if (nd && (nd->flags & LOOKUP_RCU))
+ return -ECHILD;
+
+ inode = dentry->d_inode;
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index d4bc9ed9..5195fd6 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -68,7 +68,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(client, &msg, 0);
+ dprintk("%s: reply fsinfo: %d\n", __func__, status);
+- if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
++ if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
+ msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
+ msg.rpc_resp = info->fattr;
+ status = rpc_call_sync(client, &msg, 0);
+diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
+index ed388aa..bd5d9cf 100644
+--- a/fs/nfs/nfs4filelayoutdev.c
++++ b/fs/nfs/nfs4filelayoutdev.c
+@@ -721,7 +721,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
++ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
+ __func__, inode, max_resp_sz, max_pages);
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 8000459..d20221d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5769,11 +5769,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+ dprintk("<-- %s\n", __func__);
+ }
+
++static size_t max_response_pages(struct nfs_server *server)
++{
++ u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
++ return nfs_page_array_len(0, max_resp_sz);
++}
++
++static void nfs4_free_pages(struct page **pages, size_t size)
++{
++ int i;
++
++ if (!pages)
++ return;
++
++ for (i = 0; i < size; i++) {
++ if (!pages[i])
++ break;
++ __free_page(pages[i]);
++ }
++ kfree(pages);
++}
++
++static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
++{
++ struct page **pages;
++ int i;
++
++ pages = kcalloc(size, sizeof(struct page *), gfp_flags);
++ if (!pages) {
++ dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
++ return NULL;
++ }
++
++ for (i = 0; i < size; i++) {
++ pages[i] = alloc_page(gfp_flags);
++ if (!pages[i]) {
++ dprintk("%s: failed to allocate page\n", __func__);
++ nfs4_free_pages(pages, size);
++ return NULL;
++ }
++ }
++
++ return pages;
++}
++
+ static void nfs4_layoutget_release(void *calldata)
+ {
+ struct nfs4_layoutget *lgp = calldata;
++ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
++ size_t max_pages = max_response_pages(server);
+
+ dprintk("--> %s\n", __func__);
++ nfs4_free_pages(lgp->args.layout.pages, max_pages);
+ put_nfs_open_context(lgp->args.ctx);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+@@ -5785,9 +5832,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
+ .rpc_release = nfs4_layoutget_release,
+ };
+
+-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
++int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ {
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
++ size_t max_pages = max_response_pages(server);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
+@@ -5805,6 +5853,13 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+
+ dprintk("--> %s\n", __func__);
+
++ lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
++ if (!lgp->args.layout.pages) {
++ nfs4_layoutget_release(lgp);
++ return -ENOMEM;
++ }
++ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
++
+ lgp->res.layoutp = &lgp->args.layout;
+ lgp->res.seq_res.sr_slot = NULL;
+ task = rpc_run_task(&task_setup_data);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index f881a63..3ad6595 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -575,9 +575,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs4_layoutget *lgp;
+ struct pnfs_layout_segment *lseg = NULL;
+- struct page **pages = NULL;
+- int i;
+- u32 max_resp_sz, max_pages;
+
+ dprintk("--> %s\n", __func__);
+
+@@ -586,20 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ if (lgp == NULL)
+ return NULL;
+
+- /* allocate pages for xdr post processing */
+- max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
+-
+- pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
+- if (!pages)
+- goto out_err_free;
+-
+- for (i = 0; i < max_pages; i++) {
+- pages[i] = alloc_page(gfp_flags);
+- if (!pages[i])
+- goto out_err_free;
+- }
+-
+ lgp->args.minlength = PAGE_CACHE_SIZE;
+ if (lgp->args.minlength > range->length)
+ lgp->args.minlength = range->length;
+@@ -608,39 +591,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+- lgp->args.layout.pages = pages;
+- lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+ lgp->lsegpp = &lseg;
+ lgp->gfp_flags = gfp_flags;
+
+ /* Synchronously retrieve layout information from server and
+ * store in lseg.
+ */
+- nfs4_proc_layoutget(lgp);
++ nfs4_proc_layoutget(lgp, gfp_flags);
+ if (!lseg) {
+ /* remember that LAYOUTGET failed and suspend trying */
+ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
+ }
+
+- /* free xdr pages */
+- for (i = 0; i < max_pages; i++)
+- __free_page(pages[i]);
+- kfree(pages);
+-
+ return lseg;
+-
+-out_err_free:
+- /* free any allocated xdr pages, lgp as it's not used */
+- if (pages) {
+- for (i = 0; i < max_pages; i++) {
+- if (!pages[i])
+- break;
+- __free_page(pages[i]);
+- }
+- kfree(pages);
+- }
+- kfree(lgp);
+- return NULL;
+ }
+
+ /* Initiates a LAYOUTRETURN(FILE) */
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 53d593a..c946b1b 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -162,7 +162,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
+ struct pnfs_devicelist *devlist);
+ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
+ struct pnfs_device *dev);
+-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
++extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
+ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
+
+ /* pnfs.c */
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 376cd65..6e85ec6 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -3087,4 +3087,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ return res;
+ }
+
++MODULE_ALIAS("nfs4");
++
+ #endif /* CONFIG_NFS_V4 */
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index c6e523a..301391a 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1742,12 +1742,12 @@ int __init nfs_init_writepagecache(void)
+ nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+ nfs_wdata_cachep);
+ if (nfs_wdata_mempool == NULL)
+- return -ENOMEM;
++ goto out_destroy_write_cache;
+
+ nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+ nfs_wdata_cachep);
+ if (nfs_commit_mempool == NULL)
+- return -ENOMEM;
++ goto out_destroy_write_mempool;
+
+ /*
+ * NFS congestion size, scale with available memory.
+@@ -1770,6 +1770,12 @@ int __init nfs_init_writepagecache(void)
+ nfs_congestion_kb = 256*1024;
+
+ return 0;
++
++out_destroy_write_mempool:
++ mempool_destroy(nfs_wdata_mempool);
++out_destroy_write_cache:
++ kmem_cache_destroy(nfs_wdata_cachep);
++ return -ENOMEM;
+ }
+
+ void nfs_destroy_writepagecache(void)
+diff --git a/fs/open.c b/fs/open.c
+index e2b5d51..b8485d3 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -882,9 +882,10 @@ static inline int build_open_flags(int flags, int mode, struct open_flags *op)
+ int lookup_flags = 0;
+ int acc_mode;
+
+- if (!(flags & O_CREAT))
+- mode = 0;
+- op->mode = mode;
++ if (flags & O_CREAT)
++ op->mode = (mode & S_IALLUGO) | S_IFREG;
++ else
++ op->mode = 0;
+
+ /* Must never be set by userspace */
+ flags &= ~FMODE_NONOTIFY;
+diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
+index 2da1715..4619247 100644
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -290,7 +290,7 @@ handle_fragments:
+
+ check_directory_table:
+ /* Sanity check directory_table */
+- if (msblk->directory_table >= next_table) {
++ if (msblk->directory_table > next_table) {
+ err = -EINVAL;
+ goto failed_mount;
+ }
+diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
+index 580a6d3..c04e0db 100644
+--- a/include/asm-generic/mutex-xchg.h
++++ b/include/asm-generic/mutex-xchg.h
+@@ -26,7 +26,13 @@ static inline void
+ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+- fail_fn(count);
++ /*
++ * We failed to acquire the lock, so mark it contended
++ * to ensure that any waiting tasks are woken up by the
++ * unlock slow path.
++ */
++ if (likely(atomic_xchg(count, -1) != 1))
++ fail_fn(count);
+ }
+
+ /**
+@@ -43,7 +49,8 @@ static inline int
+ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+- return fail_fn(count);
++ if (likely(atomic_xchg(count, -1) != 1))
++ return fail_fn(count);
+ return 0;
+ }
+
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 4269c3f..93629fc 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -775,6 +775,27 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
++/**
++ * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
++ * @vend: the 16 bit USB Vendor ID
++ * @cl: bInterfaceClass value
++ * @sc: bInterfaceSubClass value
++ * @pr: bInterfaceProtocol value
++ *
++ * This macro is used to create a struct usb_device_id that matches a
++ * specific vendor with a specific class of interfaces.
++ *
++ * This is especially useful when explicitly matching devices that have
++ * vendor specific bDeviceClass values, but standards-compliant interfaces.
++ */
++#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++ | USB_DEVICE_ID_MATCH_VENDOR, \
++ .idVendor = (vend), \
++ .bInterfaceClass = (cl), \
++ .bInterfaceSubClass = (sc), \
++ .bInterfaceProtocol = (pr)
++
+ /* ----------------------------------------------------------------------- */
+
+ /* Stuff for dynamic usb ids */
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 5bf0790..31fdc48 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
+ spin_unlock(&hash_lock);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+- fsnotify_put_mark(entry);
+ goto out;
+ }
+
+@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
+
+ fsnotify_duplicate_mark(&new->mark, entry);
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
+- free_chunk(new);
++ fsnotify_put_mark(&new->mark);
+ goto Fallback;
+ }
+
+@@ -293,7 +292,6 @@ static void untag_chunk(struct node *p)
+ spin_unlock(&hash_lock);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+- fsnotify_put_mark(entry);
+ goto out;
+
+ Fallback:
+@@ -322,7 +320,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+
+ entry = &chunk->mark;
+ if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
+- free_chunk(chunk);
++ fsnotify_put_mark(entry);
+ return -ENOSPC;
+ }
+
+@@ -332,6 +330,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&hash_lock);
+ chunk->dead = 1;
+ spin_unlock(&entry->lock);
++ fsnotify_get_mark(entry);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
+ return 0;
+@@ -396,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ fsnotify_duplicate_mark(chunk_entry, old_entry);
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
+ spin_unlock(&old_entry->lock);
+- free_chunk(chunk);
++ fsnotify_put_mark(chunk_entry);
+ fsnotify_put_mark(old_entry);
+ return -ENOSPC;
+ }
+@@ -412,6 +411,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+
++ fsnotify_get_mark(chunk_entry);
+ fsnotify_destroy_mark(chunk_entry);
+
+ fsnotify_put_mark(chunk_entry);
+@@ -445,7 +445,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&old_entry->lock);
+ fsnotify_destroy_mark(old_entry);
+ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
+- fsnotify_put_mark(old_entry); /* and kill it */
+ return 0;
+ }
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index e0431c4..910db7d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4355,6 +4355,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
+ #endif
+
++static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
++{
++ u64 temp = (__force u64) rtime;
++
++ temp *= (__force u64) utime;
++
++ if (sizeof(cputime_t) == 4)
++ temp = div_u64(temp, (__force u32) total);
++ else
++ temp = div64_u64(temp, (__force u64) total);
++
++ return (__force cputime_t) temp;
++}
++
+ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ {
+ cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
+@@ -4364,13 +4378,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ */
+ rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
+
+- if (total) {
+- u64 temp = rtime;
+-
+- temp *= utime;
+- do_div(temp, total);
+- utime = (cputime_t)temp;
+- } else
++ if (total)
++ utime = scale_utime(utime, rtime, total);
++ else
+ utime = rtime;
+
+ /*
+@@ -4397,13 +4407,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ total = cputime_add(cputime.utime, cputime.stime);
+ rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+- if (total) {
+- u64 temp = rtime;
+-
+- temp *= cputime.utime;
+- do_div(temp, total);
+- utime = (cputime_t)temp;
+- } else
++ if (total)
++ utime = scale_utime(cputime.utime, rtime, total);
++ else
+ utime = rtime;
+
+ sig->prev_utime = max(sig->prev_utime, utime);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 48febd7..86eb848 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1977,10 +1977,10 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
+ * proportional to the fraction of recently scanned pages on
+ * each list that were recently referenced and in active use.
+ */
+- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
++ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
+ ap /= reclaim_stat->recent_rotated[0] + 1;
+
+- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
++ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
+ fp /= reclaim_stat->recent_rotated[1] + 1;
+ spin_unlock_irq(&zone->lru_lock);
+
+@@ -1993,7 +1993,7 @@ out:
+ unsigned long scan;
+
+ scan = zone_nr_lru_pages(zone, sc, l);
+- if (priority || noswap) {
++ if (priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= priority;
+ if (!scan && force_scan)
+ scan = SWAP_CLUSTER_MAX;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 643a41b..6033f02 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1411,7 +1411,13 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
+ if (conn->type == ACL_LINK) {
+ conn->state = BT_CONFIG;
+ hci_conn_hold(conn);
+- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
++
++ if (!conn->out &&
++ !(conn->ssp_mode && conn->hdev->ssp_mode) &&
++ !hci_find_link_key(hdev, &ev->bdaddr))
++ conn->disc_timeout = HCI_PAIRING_TIMEOUT;
++ else
++ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ } else
+ conn->state = BT_CONNECTED;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 17b5b1c..dd76177 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -862,6 +862,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+ write_lock_bh(&conn->chan_lock);
+
+ hci_conn_hold(conn->hcon);
++ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
+@@ -2263,12 +2264,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+- switch (type) {
+- case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
+- goto done;
+- }
++ if (type != L2CAP_CONF_RFC)
++ continue;
++
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
++ goto done;
+ }
+
+ /* Use sane default values in case a misbehaving remote device
+diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
+index 75c3582..fb85d37 100644
+--- a/net/dccp/ccid.h
++++ b/net/dccp/ccid.h
+@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
+ u32 __user *optval, int __user *optlen)
+ {
+ int rc = -ENOPROTOOPT;
+- if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
++ if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
+ rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
+ optval, optlen);
+ return rc;
+@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
+ u32 __user *optval, int __user *optlen)
+ {
+ int rc = -ENOPROTOOPT;
+- if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
++ if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
+ rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
+ optval, optlen);
+ return rc;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 9ed2cd0..3282453 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -315,7 +315,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+ */
+ void svc_xprt_enqueue(struct svc_xprt *xprt)
+ {
+- struct svc_serv *serv = xprt->xpt_server;
+ struct svc_pool *pool;
+ struct svc_rqst *rqstp;
+ int cpu;
+@@ -361,8 +360,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
+ rqstp, rqstp->rq_xprt);
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+- rqstp->rq_reserved = serv->sv_max_mesg;
+- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ pool->sp_stats.threads_woken++;
+ wake_up(&rqstp->rq_wait);
+ } else {
+@@ -642,8 +639,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ if (xprt) {
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+- rqstp->rq_reserved = serv->sv_max_mesg;
+- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+ /* As there is a shortage of threads and this request
+ * had to be queued, don't allow the thread to wait so
+@@ -740,6 +735,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ else
+ len = xprt->xpt_ops->xpo_recvfrom(rqstp);
+ dprintk("svc: got len=%d\n", len);
++ rqstp->rq_reserved = serv->sv_max_mesg;
++ atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ }
+ svc_xprt_received(xprt);
+
+@@ -796,7 +793,8 @@ int svc_send(struct svc_rqst *rqstp)
+
+ /* Grab mutex to serialize outgoing data. */
+ mutex_lock(&xprt->xpt_mutex);
+- if (test_bit(XPT_DEAD, &xprt->xpt_flags))
++ if (test_bit(XPT_DEAD, &xprt->xpt_flags)
++ || test_bit(XPT_CLOSE, &xprt->xpt_flags))
+ len = -ENOTCONN;
+ else
+ len = xprt->xpt_ops->xpo_sendto(rqstp);
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 71bed1c..296192c 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1136,9 +1136,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ if (len >= 0)
+ svsk->sk_tcplen += len;
+ if (len != want) {
++ svc_tcp_save_pages(svsk, rqstp);
+ if (len < 0 && len != -EAGAIN)
+ goto err_other;
+- svc_tcp_save_pages(svsk, rqstp);
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ svsk->sk_tcplen, svsk->sk_reclen);
+ goto err_noclose;
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index 254ab52..2210b83 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
+ if (digi1 & AC_DIG1_EMPHASIS)
+ snd_iprintf(buffer, " Preemphasis");
+ if (digi1 & AC_DIG1_COPYRIGHT)
+- snd_iprintf(buffer, " Copyright");
++ snd_iprintf(buffer, " Non-Copyright");
+ if (digi1 & AC_DIG1_NONAUDIO)
+ snd_iprintf(buffer, " Non-Audio");
+ if (digi1 & AC_DIG1_PROFESSIONAL)
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 35abe3c..b22989e 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -276,6 +276,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
+ int type = dir ? HDA_INPUT : HDA_OUTPUT;
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
++ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
++ snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
++ return 0;
++ }
+ sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
+ return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+@@ -287,6 +291,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
+ int type = dir ? HDA_INPUT : HDA_OUTPUT;
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
++ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
++ snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
++ return 0;
++ }
+ sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
+ return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index 90117f8..90e5005 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -270,7 +270,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);
+
+ /* Mic select */
+ static const struct snd_kcontrol_new wm9712_mic_src_controls =
+-SOC_DAPM_ENUM("Route", wm9712_enum[7]);
++SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
+
+ /* diff select */
+ static const struct snd_kcontrol_new wm9712_diff_sel_controls =
+@@ -289,7 +289,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,
+ &wm9712_capture_selectl_controls),
+ SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
+ &wm9712_capture_selectr_controls),
+-SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0,
++SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
++ &wm9712_mic_src_controls),
++SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_mic_src_controls),
+ SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_diff_sel_controls),
+@@ -317,6 +319,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
++SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
+ SND_SOC_DAPM_OUTPUT("MONOOUT"),
+ SND_SOC_DAPM_OUTPUT("HPOUTL"),
+@@ -377,6 +380,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
+ {"Mic PGA", NULL, "MIC1"},
+ {"Mic PGA", NULL, "MIC2"},
+
++ /* microphones */
++ {"Differential Mic", NULL, "MIC1"},
++ {"Differential Mic", NULL, "MIC2"},
++ {"Left Mic Select Source", "Mic 1", "MIC1"},
++ {"Left Mic Select Source", "Mic 2", "MIC2"},
++ {"Left Mic Select Source", "Stereo", "MIC1"},
++ {"Left Mic Select Source", "Differential", "Differential Mic"},
++ {"Right Mic Select Source", "Mic 1", "MIC1"},
++ {"Right Mic Select Source", "Mic 2", "MIC2"},
++ {"Right Mic Select Source", "Stereo", "MIC2"},
++ {"Right Mic Select Source", "Differential", "Differential Mic"},
++
+ /* left capture selector */
+ {"Left Capture Select", "Mic", "MIC1"},
+ {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
diff --git a/3.2.54/1029_linux-3.2.30.patch b/3.2.54/1029_linux-3.2.30.patch
new file mode 100644
index 0000000..86aea4b
--- /dev/null
+++ b/3.2.54/1029_linux-3.2.30.patch
@@ -0,0 +1,5552 @@
+diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+index ab22fe6..e39a0c0 100644
+--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
++++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+@@ -10,8 +10,8 @@ Required properties:
+
+ Optional properties:
+ - fsl,card-wired : Indicate the card is wired to host permanently
+-- fsl,cd-internal : Indicate to use controller internal card detection
+-- fsl,wp-internal : Indicate to use controller internal write protection
++- fsl,cd-controller : Indicate to use controller internal card detection
++- fsl,wp-controller : Indicate to use controller internal write protection
+ - cd-gpios : Specify GPIOs for card detection
+ - wp-gpios : Specify GPIOs for write protection
+
+@@ -21,8 +21,8 @@ esdhc@70004000 {
+ compatible = "fsl,imx51-esdhc";
+ reg = <0x70004000 0x4000>;
+ interrupts = <1>;
+- fsl,cd-internal;
+- fsl,wp-internal;
++ fsl,cd-controller;
++ fsl,wp-controller;
+ };
+
+ esdhc@70008000 {
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index 2871fd5..99d4e44 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -20,6 +20,8 @@ Supported adapters:
+ * Intel Patsburg (PCH)
+ * Intel DH89xxCC (PCH)
+ * Intel Panther Point (PCH)
++ * Intel Lynx Point (PCH)
++ * Intel Lynx Point-LP (PCH)
+ Datasheets: Publicly available at the Intel website
+
+ On Intel Patsburg and later chipsets, both the normal host SMBus controller
+diff --git a/Makefile b/Makefile
+index d96fc2a..9fd7e60 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 987c72d..9fdc151 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -2065,6 +2065,7 @@ source "drivers/cpufreq/Kconfig"
+ config CPU_FREQ_IMX
+ tristate "CPUfreq driver for i.MX CPUs"
+ depends on ARCH_MXC && CPU_FREQ
++ select CPU_FREQ_TABLE
+ help
+ This enables the CPUfreq driver for i.MX CPUs.
+
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index dfcf3b0..362c7ca 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
+ zinstall uinstall install: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
+
+-%.dtb:
++%.dtb: scripts
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
+-dtbs:
++dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
+ # We use MRPROPER_FILES and CLEAN_FILES now
+diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
+index f8766af..4790df2 100644
+--- a/arch/arm/boot/dts/imx51-babbage.dts
++++ b/arch/arm/boot/dts/imx51-babbage.dts
+@@ -29,8 +29,8 @@
+ aips@70000000 { /* aips-1 */
+ spba@70000000 {
+ esdhc@70004000 { /* ESDHC1 */
+- fsl,cd-internal;
+- fsl,wp-internal;
++ fsl,cd-controller;
++ fsl,wp-controller;
+ status = "okay";
+ };
+
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 8512475..9b419ab 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -232,6 +232,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+ #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+
++#define pte_none(pte) (!pte_val(pte))
++#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
++#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
++#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
++#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
++#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
++#define pte_special(pte) (0)
++
++#define pte_present_user(pte) \
++ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
++ (L_PTE_PRESENT | L_PTE_USER))
++
+ #if __LINUX_ARM_ARCH__ < 6
+ static inline void __sync_icache_dcache(pte_t pteval)
+ {
+@@ -243,25 +255,15 @@ extern void __sync_icache_dcache(pte_t pteval);
+ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+ {
+- if (addr >= TASK_SIZE)
+- set_pte_ext(ptep, pteval, 0);
+- else {
++ unsigned long ext = 0;
++
++ if (addr < TASK_SIZE && pte_present_user(pteval)) {
+ __sync_icache_dcache(pteval);
+- set_pte_ext(ptep, pteval, PTE_EXT_NG);
++ ext |= PTE_EXT_NG;
+ }
+-}
+
+-#define pte_none(pte) (!pte_val(pte))
+-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
+-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
+-#define pte_special(pte) (0)
+-
+-#define pte_present_user(pte) \
+- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+- (L_PTE_PRESENT | L_PTE_USER))
++ set_pte_ext(ptep, pteval, ext);
++}
+
+ #define PTE_BIT_FUNC(fn,op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 814a52a9..2bc1a8e 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -160,6 +160,12 @@ static int debug_arch_supported(void)
+ arch >= ARM_DEBUG_ARCH_V7_1;
+ }
+
++/* Can we determine the watchpoint access type from the fsr? */
++static int debug_exception_updates_fsr(void)
++{
++ return 0;
++}
++
+ /* Determine number of WRP registers available. */
+ static int get_num_wrp_resources(void)
+ {
+@@ -620,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
+ info->address &= ~alignment_mask;
+ info->ctrl.len <<= offset;
+
+- /*
+- * Currently we rely on an overflow handler to take
+- * care of single-stepping the breakpoint when it fires.
+- * In the case of userspace breakpoints on a core with V7 debug,
+- * we can use the mismatch feature as a poor-man's hardware
+- * single-step, but this only works for per-task breakpoints.
+- */
+- if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
+- !core_has_mismatch_brps() || !bp->hw.bp_target)) {
+- pr_warning("overflow handler required but none found\n");
+- ret = -EINVAL;
++ if (!bp->overflow_handler) {
++ /*
++ * Mismatch breakpoints are required for single-stepping
++ * breakpoints.
++ */
++ if (!core_has_mismatch_brps())
++ return -EINVAL;
++
++ /* We don't allow mismatch breakpoints in kernel space. */
++ if (arch_check_bp_in_kernelspace(bp))
++ return -EPERM;
++
++ /*
++ * Per-cpu breakpoints are not supported by our stepping
++ * mechanism.
++ */
++ if (!bp->hw.bp_target)
++ return -EINVAL;
++
++ /*
++ * We only support specific access types if the fsr
++ * reports them.
++ */
++ if (!debug_exception_updates_fsr() &&
++ (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
++ info->ctrl.type == ARM_BREAKPOINT_STORE))
++ return -EINVAL;
+ }
++
+ out:
+ return ret;
+ }
+@@ -707,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ goto unlock;
+
+ /* Check that the access type matches. */
+- access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
+- HW_BREAKPOINT_R;
+- if (!(access & hw_breakpoint_type(wp)))
+- goto unlock;
++ if (debug_exception_updates_fsr()) {
++ access = (fsr & ARM_FSR_ACCESS_MASK) ?
++ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
++ if (!(access & hw_breakpoint_type(wp)))
++ goto unlock;
++ }
+
+ /* We have a winner. */
+ info->trigger = addr;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 8380bd1..7ac5dfd 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -380,20 +380,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ #endif
+ instr = *(u32 *) pc;
+ } else if (thumb_mode(regs)) {
+- get_user(instr, (u16 __user *)pc);
++ if (get_user(instr, (u16 __user *)pc))
++ goto die_sig;
+ if (is_wide_instruction(instr)) {
+ unsigned int instr2;
+- get_user(instr2, (u16 __user *)pc+1);
++ if (get_user(instr2, (u16 __user *)pc+1))
++ goto die_sig;
+ instr <<= 16;
+ instr |= instr2;
+ }
+- } else {
+- get_user(instr, (u32 __user *)pc);
++ } else if (get_user(instr, (u32 __user *)pc)) {
++ goto die_sig;
+ }
+
+ if (call_undef_hook(regs, instr) == 0)
+ return;
+
++die_sig:
+ #ifdef CONFIG_DEBUG_USER
+ if (user_debug & UDBG_UNDEFINED) {
+ printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
+diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
+index 1620b15..cb105bf8 100644
+--- a/arch/arm/mach-dove/common.c
++++ b/arch/arm/mach-dove/common.c
+@@ -92,7 +92,7 @@ void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge00_init(eth_data, &dove_mbus_dram_info,
+ DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
+- 0, get_tclk());
++ 0, get_tclk(), 1600);
+ }
+
+ /*****************************************************************************
+diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
+index 20ed2d5..f8f7437 100644
+--- a/arch/arm/mach-imx/hotplug.c
++++ b/arch/arm/mach-imx/hotplug.c
+@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
+ : "cc");
+ }
+
+-static inline void cpu_leave_lowpower(void)
+-{
+- unsigned int v;
+-
+- asm volatile(
+- "mrc p15, 0, %0, c1, c0, 0\n"
+- " orr %0, %0, %1\n"
+- " mcr p15, 0, %0, c1, c0, 0\n"
+- " mrc p15, 0, %0, c1, c0, 1\n"
+- " orr %0, %0, %2\n"
+- " mcr p15, 0, %0, c1, c0, 1\n"
+- : "=&r" (v)
+- : "Ir" (CR_C), "Ir" (0x40)
+- : "cc");
+-}
+-
+ /*
+ * platform-specific code to shutdown a CPU
+ *
+@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
+ {
+ cpu_enter_lowpower();
+ imx_enable_cpu(cpu, false);
+- cpu_do_idle();
+- cpu_leave_lowpower();
+
+- /* We should never return from idle */
+- panic("cpu %d unexpectedly exit from shutdown\n", cpu);
++ /* spin here until hardware takes it down */
++ while (1)
++ ;
+ }
+
+ int platform_cpu_disable(unsigned int cpu)
+diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
+index c5dbbb3..06faa97 100644
+--- a/arch/arm/mach-kirkwood/common.c
++++ b/arch/arm/mach-kirkwood/common.c
+@@ -88,7 +88,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
+
+ orion_ge00_init(eth_data, &kirkwood_mbus_dram_info,
+ GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
+- IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
++ IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk, 1600);
+ }
+
+
+@@ -102,7 +102,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
+
+ orion_ge01_init(eth_data, &kirkwood_mbus_dram_info,
+ GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
+- IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
++ IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk, 1600);
+ }
+
+
+diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
+index d90e244..570ee4d 100644
+--- a/arch/arm/mach-mv78xx0/common.c
++++ b/arch/arm/mach-mv78xx0/common.c
+@@ -202,7 +202,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge00_init(eth_data, &mv78xx0_mbus_dram_info,
+ GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
+- IRQ_MV78XX0_GE_ERR, get_tclk());
++ IRQ_MV78XX0_GE_ERR, get_tclk(),
++ MV643XX_TX_CSUM_DEFAULT_LIMIT);
+ }
+
+
+@@ -213,7 +214,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge01_init(eth_data, &mv78xx0_mbus_dram_info,
+ GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
+- NO_IRQ, get_tclk());
++ NO_IRQ, get_tclk(),
++ MV643XX_TX_CSUM_DEFAULT_LIMIT);
+ }
+
+
+diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
+index 53b68b8..20260db 100644
+--- a/arch/arm/mach-orion5x/common.c
++++ b/arch/arm/mach-orion5x/common.c
+@@ -95,7 +95,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge00_init(eth_data, &orion5x_mbus_dram_info,
+ ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
+- IRQ_ORION5X_ETH_ERR, orion5x_tclk);
++ IRQ_ORION5X_ETH_ERR, orion5x_tclk,
++ MV643XX_TX_CSUM_DEFAULT_LIMIT);
+ }
+
+
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index 1a8d4aa..8fda9f7 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -236,8 +236,6 @@ void __sync_icache_dcache(pte_t pteval)
+ struct page *page;
+ struct address_space *mapping;
+
+- if (!pte_present_user(pteval))
+- return;
+ if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
+ /* only flush non-aliasing VIPT caches for exec mappings */
+ return;
+diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
+index af3b92b..f9adbbb 100644
+--- a/arch/arm/plat-omap/dmtimer.c
++++ b/arch/arm/plat-omap/dmtimer.c
+@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
+
+ void omap_dm_timer_disable(struct omap_dm_timer *timer)
+ {
+- pm_runtime_put(&timer->pdev->dev);
++ pm_runtime_put_sync(&timer->pdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
+
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index 11dce87..8a6886a 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -263,10 +263,12 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk)
++ int tclk,
++ unsigned int tx_csum_limit)
+ {
+ fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
+ mapbase + 0x2000, SZ_16K - 1, irq_err);
++ orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
+ ge_complete(&orion_ge00_shared_data, mbus_dram_info, tclk,
+ orion_ge00_resources, irq, &orion_ge00_shared,
+ eth_data, &orion_ge00);
+@@ -317,10 +319,12 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk)
++ int tclk,
++ unsigned int tx_csum_limit)
+ {
+ fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
+ mapbase + 0x2000, SZ_16K - 1, irq_err);
++ orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
+ ge_complete(&orion_ge01_shared_data, mbus_dram_info, tclk,
+ orion_ge01_resources, irq, &orion_ge01_shared,
+ eth_data, &orion_ge01);
+diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
+index a2c0e31..b637dae 100644
+--- a/arch/arm/plat-orion/include/plat/common.h
++++ b/arch/arm/plat-orion/include/plat/common.h
+@@ -41,14 +41,16 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk);
++ int tclk,
++ unsigned int tx_csum_limit);
+
+ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
+ struct mbus_dram_target_info *mbus_dram_info,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk);
++ int tclk,
++ unsigned int tx_csum_limit);
+
+ void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
+ struct mbus_dram_target_info *mbus_dram_info,
+diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
+index 8a90b6a..1eedf8d 100644
+--- a/arch/arm/plat-s3c24xx/dma.c
++++ b/arch/arm/plat-s3c24xx/dma.c
+@@ -431,7 +431,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
+ * when necessary.
+ */
+
+-int s3c2410_dma_enqueue(unsigned int channel, void *id,
++int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
+ dma_addr_t data, int size)
+ {
+ struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 4054b31..c4b779b 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -247,7 +247,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
+
+-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
++#define ATOMIC_INIT(i) { (i) }
+
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+@@ -256,7 +256,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #ifdef CONFIG_64BIT
+
+-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
++#define ATOMIC64_INIT(i) { (i) }
+
+ static __inline__ s64
+ __atomic64_add_return(s64 i, atomic64_t *v)
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 7c5324f..cc20b0a 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -79,6 +79,7 @@ int main(void)
+ DEFINE(SIGSEGV, SIGSEGV);
+ DEFINE(NMI_MASK, NMI_MASK);
+ DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
++ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
+ #else
+ DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+ #endif /* CONFIG_PPC64 */
+diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
+index 2cc451a..6856062 100644
+--- a/arch/powerpc/kernel/dbell.c
++++ b/arch/powerpc/kernel/dbell.c
+@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
+
+ void doorbell_cause_ipi(int cpu, unsigned long data)
+ {
++ /* Order previous accesses vs. msgsnd, which is treated as a store */
++ mb();
+ ppc_msgsnd(PPC_DBELL, 0, data);
+ }
+
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index d834425..654fc53 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -380,6 +380,12 @@ _GLOBAL(ret_from_fork)
+ li r3,0
+ b syscall_exit
+
++ .section ".toc","aw"
++DSCR_DEFAULT:
++ .tc dscr_default[TC],dscr_default
++
++ .section ".text"
++
+ /*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+@@ -519,9 +525,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+ mr r1,r8 /* start using new stack pointer */
+ std r7,PACAKSAVE(r13)
+
+- ld r6,_CCR(r1)
+- mtcrf 0xFF,r6
+-
+ #ifdef CONFIG_ALTIVEC
+ BEGIN_FTR_SECTION
+ ld r0,THREAD_VRSAVE(r4)
+@@ -530,14 +533,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #endif /* CONFIG_ALTIVEC */
+ #ifdef CONFIG_PPC64
+ BEGIN_FTR_SECTION
++ lwz r6,THREAD_DSCR_INHERIT(r4)
++ ld r7,DSCR_DEFAULT@toc(2)
+ ld r0,THREAD_DSCR(r4)
+- cmpd r0,r25
+- beq 1f
++ cmpwi r6,0
++ bne 1f
++ ld r0,0(r7)
++1: cmpd r0,r25
++ beq 2f
+ mtspr SPRN_DSCR,r0
+-1:
++2:
+ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+ #endif
+
++ ld r6,_CCR(r1)
++ mtcrf 0xFF,r6
++
+ /* r3-r13 are destroyed -- Cort */
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 6457574..d687e3f 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -778,16 +778,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
+ #endif /* CONFIG_PPC_STD_MMU_64 */
+ #ifdef CONFIG_PPC64
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+- if (current->thread.dscr_inherit) {
+- p->thread.dscr_inherit = 1;
+- p->thread.dscr = current->thread.dscr;
+- } else if (0 != dscr_default) {
+- p->thread.dscr_inherit = 1;
+- p->thread.dscr = dscr_default;
+- } else {
+- p->thread.dscr_inherit = 0;
+- p->thread.dscr = 0;
+- }
++ p->thread.dscr_inherit = current->thread.dscr_inherit;
++ p->thread.dscr = current->thread.dscr;
+ }
+ #endif
+
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 6df7090..fe04b4a 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -214,8 +214,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+ char *message = (char *)&info->messages;
+
++ /*
++ * Order previous accesses before accesses in the IPI handler.
++ */
++ smp_mb();
+ message[msg] = 1;
+- mb();
++ /*
++ * cause_ipi functions are required to include a full barrier
++ * before doing whatever causes the IPI.
++ */
+ smp_ops->cause_ipi(cpu, info->data);
+ }
+
+@@ -227,7 +234,7 @@ irqreturn_t smp_ipi_demux(void)
+ mb(); /* order any irq clear */
+
+ do {
+- all = xchg_local(&info->messages, 0);
++ all = xchg(&info->messages, 0);
+
+ #ifdef __BIG_ENDIAN
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index ce035c1..55be64d 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -192,6 +192,14 @@ static ssize_t show_dscr_default(struct sysdev_class *class,
+ return sprintf(buf, "%lx\n", dscr_default);
+ }
+
++static void update_dscr(void *dummy)
++{
++ if (!current->thread.dscr_inherit) {
++ current->thread.dscr = dscr_default;
++ mtspr(SPRN_DSCR, dscr_default);
++ }
++}
++
+ static ssize_t __used store_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, const char *buf,
+ size_t count)
+@@ -204,6 +212,8 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class,
+ return -EINVAL;
+ dscr_default = val;
+
++ on_each_cpu(update_dscr, NULL, 1);
++
+ return count;
+ }
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 5459d14..82dcd4d 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -942,8 +942,9 @@ static int emulate_instruction(struct pt_regs *regs)
+ cpu_has_feature(CPU_FTR_DSCR)) {
+ PPC_WARN_EMULATED(mtdscr, regs);
+ rd = (instword >> 21) & 0x1f;
+- mtspr(SPRN_DSCR, regs->gpr[rd]);
++ current->thread.dscr = regs->gpr[rd];
+ current->thread.dscr_inherit = 1;
++ mtspr(SPRN_DSCR, current->thread.dscr);
+ return 0;
+ }
+ #endif
+diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
+index 9518d36..5c76bf7 100644
+--- a/arch/powerpc/sysdev/xics/icp-hv.c
++++ b/arch/powerpc/sysdev/xics/icp-hv.c
+@@ -27,33 +27,53 @@ static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
+ {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
++ unsigned int ret = XICS_IRQ_SPURIOUS;
+
+ rc = plpar_hcall(H_XIRR, retbuf, cppr);
+- if (rc != H_SUCCESS)
+- panic(" bad return code xirr - rc = %lx\n", rc);
+- return (unsigned int)retbuf[0];
++ if (rc == H_SUCCESS) {
++ ret = (unsigned int)retbuf[0];
++ } else {
++ pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
++ __func__, cppr, rc);
++ WARN_ON_ONCE(1);
++ }
++
++ return ret;
+ }
+
+ static inline void icp_hv_set_xirr(unsigned int value)
+ {
+ long rc = plpar_hcall_norets(H_EOI, value);
+- if (rc != H_SUCCESS)
+- panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
++ if (rc != H_SUCCESS) {
++ pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
++ __func__, value, rc);
++ WARN_ON_ONCE(1);
++ }
+ }
+
+ static inline void icp_hv_set_cppr(u8 value)
+ {
+ long rc = plpar_hcall_norets(H_CPPR, value);
+- if (rc != H_SUCCESS)
+- panic("bad return code cppr - rc = %lx\n", rc);
++ if (rc != H_SUCCESS) {
++ pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
++ __func__, value, rc);
++ WARN_ON_ONCE(1);
++ }
+ }
+
+ static inline void icp_hv_set_qirr(int n_cpu , u8 value)
+ {
+- long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
+- value);
+- if (rc != H_SUCCESS)
+- panic("bad return code qirr - rc = %lx\n", rc);
++ int hw_cpu = get_hard_smp_processor_id(n_cpu);
++ long rc;
++
++ /* Make sure all previous accesses are ordered before IPI sending */
++ mb();
++ rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
++ if (rc != H_SUCCESS) {
++ pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
++ "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
++ WARN_ON_ONCE(1);
++ }
+ }
+
+ static void icp_hv_eoi(struct irq_data *d)
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index b2c7179..bb104b4 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
+ memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
+
+ xen_max_p2m_pfn = PFN_DOWN(start + size);
++ for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
++ unsigned long mfn = pfn_to_mfn(pfn);
++
++ if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
++ continue;
++ WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
++ pfn, mfn);
+
+- for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ }
+ }
+
+ static unsigned long __init xen_release_chunk(unsigned long start,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index fb65915..608257a 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -386,6 +386,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE(0x1b4b, 0x917a),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
++ { PCI_DEVICE(0x1b4b, 0x9192),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(0x1b4b, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
+
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 8323fc3..3f1799b 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -1625,10 +1625,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- if (!req->flags) {
+- DRM_ERROR("no operation set\n");
++ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+- }
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+@@ -1641,7 +1639,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
+
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set) {
+- DRM_ERROR("crtc does not support cursor\n");
+ ret = -ENXIO;
+ goto out;
+ }
+@@ -1654,7 +1651,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+- DRM_ERROR("crtc does not support cursor\n");
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1692,14 +1688,11 @@ int drm_mode_addfb(struct drm_device *dev,
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- if ((config->min_width > r->width) || (r->width > config->max_width)) {
+- DRM_ERROR("mode new framebuffer width not within limits\n");
++ if ((config->min_width > r->width) || (r->width > config->max_width))
+ return -EINVAL;
+- }
+- if ((config->min_height > r->height) || (r->height > config->max_height)) {
+- DRM_ERROR("mode new framebuffer height not within limits\n");
++
++ if ((config->min_height > r->height) || (r->height > config->max_height))
+ return -EINVAL;
+- }
+
+ mutex_lock(&dev->mode_config.mutex);
+
+@@ -1756,7 +1749,6 @@ int drm_mode_rmfb(struct drm_device *dev,
+ obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+ /* TODO check that we really get a framebuffer back. */
+ if (!obj) {
+- DRM_ERROR("mode invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1767,7 +1759,6 @@ int drm_mode_rmfb(struct drm_device *dev,
+ found = 1;
+
+ if (!found) {
+- DRM_ERROR("tried to remove a fb that we didn't own\n");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1814,7 +1805,6 @@ int drm_mode_getfb(struct drm_device *dev,
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+- DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1850,7 +1840,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+- DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out_err1;
+ }
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index a1ee634..0c1a99b 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -66,6 +66,8 @@
+ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+ /* use +hsync +vsync for detailed mode */
+ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
++/* Force reduced-blanking timings for detailed modes */
++#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+
+ struct detailed_mode_closure {
+ struct drm_connector *connector;
+@@ -85,6 +87,9 @@ static struct edid_quirk {
+ int product_id;
+ u32 quirks;
+ } edid_quirk_list[] = {
++ /* ASUS VW222S */
++ { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
++
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+@@ -120,6 +125,9 @@ static struct edid_quirk {
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
++
++ /* ViewSonic VA2026w */
++ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+ };
+
+ /*** DDC fetch and block validation ***/
+@@ -863,12 +871,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
++
++ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
++ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
++ if (!mode)
++ return NULL;
++
++ goto set_size;
++ }
++
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+- mode->type = DRM_MODE_TYPE_DRIVER;
+-
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = cpu_to_le16(1088);
+
+@@ -892,8 +907,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+- drm_mode_set_name(mode);
+-
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+ }
+@@ -903,6 +916,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
++set_size:
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+@@ -916,6 +930,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ mode->height_mm = edid->height_cm * 10;
+ }
+
++ mode->type = DRM_MODE_TYPE_DRIVER;
++ drm_mode_set_name(mode);
++
+ return mode;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 578ddfc..c8b5bc1 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2006,10 +2006,22 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ if (IS_G4X(dev)) {
++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ } else if (IS_GEN4(dev)) {
++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ } else {
++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ }
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index fd53122..4a5e662 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1419,14 +1419,20 @@
+ #define DPC_HOTPLUG_INT_STATUS (1 << 28)
+ #define HDMID_HOTPLUG_INT_STATUS (1 << 27)
+ #define DPD_HOTPLUG_INT_STATUS (1 << 27)
++/* CRT/TV common between gen3+ */
+ #define CRT_HOTPLUG_INT_STATUS (1 << 11)
+ #define TV_HOTPLUG_INT_STATUS (1 << 10)
+ #define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+ #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+ #define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+ #define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++/* SDVO is different across gen3/4 */
++#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
++#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
++#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
++#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
++#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
++#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+
+ /* SDVO port control */
+ #define SDVOB 0x61140
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 3eed270..6c3fb44 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1072,8 +1072,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+ {
+ u32 val = I915_READ(reg);
+- WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
+- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
++ WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
++ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+ }
+
+@@ -1089,13 +1089,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+- WARN(adpa_pipe_enabled(dev_priv, val, pipe),
++ WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+- WARN(lvds_pipe_enabled(dev_priv, val, pipe),
++ WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+@@ -1437,7 +1437,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+ {
+ u32 val = I915_READ(reg);
+- if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
++ if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
+ DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
+ reg, pipe);
+ I915_WRITE(reg, val & ~PORT_ENABLE);
+@@ -1459,12 +1459,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+- if (adpa_pipe_enabled(dev_priv, val, pipe))
++ if (adpa_pipe_enabled(dev_priv, pipe, val))
+ I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+- if (lvds_pipe_enabled(dev_priv, val, pipe)) {
++ if (lvds_pipe_enabled(dev_priv, pipe, val)) {
+ DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
+ I915_WRITE(reg, val & ~LVDS_PORT_EN);
+ POSTING_READ(reg);
+@@ -2852,16 +2852,14 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
+
+ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+ {
+- struct drm_i915_gem_object *obj;
+- struct drm_i915_private *dev_priv;
++ struct drm_device *dev = crtc->dev;
+
+ if (crtc->fb == NULL)
+ return;
+
+- obj = to_intel_framebuffer(crtc->fb)->obj;
+- dev_priv = crtc->dev->dev_private;
+- wait_event(dev_priv->pending_flip_queue,
+- atomic_read(&obj->pending_flip) == 0);
++ mutex_lock(&dev->struct_mutex);
++ intel_finish_fb(crtc->fb);
++ mutex_unlock(&dev->struct_mutex);
+ }
+
+ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+@@ -3322,23 +3320,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
+- /* Flush any pending WAITs before we disable the pipe. Note that
+- * we need to drop the struct_mutex in order to acquire it again
+- * during the lowlevel dpms routines around a couple of the
+- * operations. It does not look trivial nor desirable to move
+- * that locking higher. So instead we leave a window for the
+- * submission of further commands on the fb before we can actually
+- * disable it. This race with userspace exists anyway, and we can
+- * only rely on the pipe being disabled by userspace after it
+- * receives the hotplug notification and has flushed any pending
+- * batches.
+- */
+- if (crtc->fb) {
+- mutex_lock(&dev->struct_mutex);
+- intel_finish_fb(crtc->fb);
+- mutex_unlock(&dev->struct_mutex);
+- }
+-
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index ceec71b..f07bde2 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -752,7 +752,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+@@ -760,7 +760,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index a8d8ee5..bbf247c 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -2514,6 +2514,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
++ u32 hotplug_mask;
+ int i;
+
+ intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+@@ -2544,10 +2545,17 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ }
+ }
+
+- if (IS_SDVOB(sdvo_reg))
+- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+- else
+- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
++ hotplug_mask = 0;
++ if (IS_G4X(dev)) {
++ hotplug_mask = IS_SDVOB(sdvo_reg) ?
++ SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
++ } else if (IS_GEN4(dev)) {
++ hotplug_mask = IS_SDVOB(sdvo_reg) ?
++ SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
++ } else {
++ hotplug_mask = IS_SDVOB(sdvo_reg) ?
++ SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
++ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+
+@@ -2555,14 +2563,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+- /* Set up hotplug command - note paranoia about contents of reply.
+- * We assume that the hardware is in a sane state, and only touch
+- * the bits we think we understand.
+- */
+- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+- &intel_sdvo->hotplug_active, 2);
+- intel_sdvo->hotplug_active[0] &= ~0x3;
+-
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+@@ -2570,6 +2570,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ goto err;
+ }
+
++ /* Only enable the hotplug irq if we need it, to work around noisy
++ * hotplug lines.
++ */
++ if (intel_sdvo->hotplug_active[0])
++ dev_priv->hotplug_supported_mask |= hotplug_mask;
++
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index b12fd2c..6adef06 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -381,7 +381,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+- ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
++ ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 757c549..ceffd20 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1446,14 +1446,98 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+ }
+ }
+
++/**
++ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
++ */
++static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_crtc *test_crtc;
++ struct radeon_crtc *radeon_test_crtc;
++ u32 pll_in_use = 0;
++
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc == test_crtc)
++ continue;
++
++ radeon_test_crtc = to_radeon_crtc(test_crtc);
++ if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
++ pll_in_use |= (1 << radeon_test_crtc->pll_id);
++ }
++ return pll_in_use;
++}
++
++/**
++ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
++ * also in DP mode. For DP, a single PPLL can be used for all DP
++ * crtcs/encoders.
++ */
++static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_encoder *test_encoder;
++ struct radeon_crtc *radeon_test_crtc;
++
++ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
++ if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
++ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
++ /* for DP use the same PLL for all */
++ radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
++ if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
++ return radeon_test_crtc->pll_id;
++ }
++ }
++ }
++ return ATOM_PPLL_INVALID;
++}
++
++/**
++ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
++ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
++ * monitors a dedicated PPLL must be used. If a particular board has
++ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
++ * as there is no need to program the PLL itself. If we are not able to
++ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
++ * avoid messing up an existing monitor.
++ *
++ * Asic specific PLL information
++ *
++ * DCE 6.1
++ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
++ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
++ *
++ * DCE 6.0
++ * - PPLL0 is available to all UNIPHY (DP only)
++ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
++ *
++ * DCE 5.0
++ * - DCPLL is available to all UNIPHY (DP only)
++ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
++ *
++ * DCE 3.0/4.0/4.1
++ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
++ *
++ */
+ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+- struct drm_crtc *test_crtc;
+- uint32_t pll_in_use = 0;
++ u32 pll_in_use;
++ int pll;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+@@ -1461,35 +1545,39 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+ * DCE4: PPLL or ext clock
+- * DCE5: DCPLL or ext clock
++ * DCE5: PPLL, DCPLL, or ext clock
+ *
+ * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+ * PPLL/DCPLL programming and only program the DP DTO for the
+ * crtc virtual pixel clock.
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+- if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
++ if (rdev->clock.dp_extclk)
++ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
++ else if (ASIC_IS_DCE5(rdev))
++ /* use DCPLL for all DP */
++ return ATOM_DCPLL;
++ else {
++ /* use the same PPLL for all DP monitors */
++ pll = radeon_get_shared_dp_ppll(crtc);
++ if (pll != ATOM_PPLL_INVALID)
++ return pll;
++ }
+ }
++ break;
+ }
+ }
+-
+- /* otherwise, pick one of the plls */
+- list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+- struct radeon_crtc *radeon_test_crtc;
+-
+- if (crtc == test_crtc)
+- continue;
+-
+- radeon_test_crtc = to_radeon_crtc(test_crtc);
+- if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
+- (radeon_test_crtc->pll_id <= ATOM_PPLL2))
+- pll_in_use |= (1 << radeon_test_crtc->pll_id);
+- }
+- if (!(pll_in_use & 1))
++ /* all other cases */
++ pll_in_use = radeon_get_pll_use_mask(crtc);
++ if (!(pll_in_use & (1 << ATOM_PPLL2)))
++ return ATOM_PPLL2;
++ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+- return ATOM_PPLL2;
++ DRM_ERROR("unable to allocate a PPLL\n");
++ return ATOM_PPLL_INVALID;
+ } else
++ /* use PPLL1 or PPLL2 */
+ return radeon_crtc->crtc_id;
+
+ }
+@@ -1578,10 +1666,25 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
+ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_atom_ss ss;
++ int i;
+
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
++ for (i = 0; i < rdev->num_crtc; i++) {
++ if (rdev->mode_info.crtcs[i] &&
++ rdev->mode_info.crtcs[i]->enabled &&
++ i != radeon_crtc->crtc_id &&
++ radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
++ /* one other crtc is using this pll don't turn
++ * off the pll
++ */
++ goto done;
++ }
++ }
++
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+@@ -1592,7 +1695,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ default:
+ break;
+ }
+- radeon_crtc->pll_id = -1;
++done:
++ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ }
+
+ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+@@ -1641,6 +1745,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+- radeon_crtc->pll_id = -1;
++ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 5351ee1..382e141 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1344,6 +1344,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = NULL;
+ struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+@@ -1355,12 +1357,38 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+- /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
+- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ if (!connector)
++ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
++ else
++ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
++
++ /* setup and enable the encoder */
++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ atombios_dig_encoder_setup(encoder,
++ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
++ dig->panel_mode);
++ if (ext_encoder) {
++ if (ASIC_IS_DCE41(rdev))
++ atombios_external_encoder_setup(encoder, ext_encoder,
++ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
++ }
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
++ } else if (ASIC_IS_DCE4(rdev)) {
++ /* setup and enable the encoder */
++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
++ } else {
++ /* setup and enable the encoder and transmitter */
++ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
++ /* some early dce3.2 boards have a bug in their transmitter control table */
++ if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
++ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+@@ -1377,10 +1405,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- else
++ } else if (ASIC_IS_DCE4(rdev)) {
++ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
++ } else {
++ /* disable the encoder and transmitter */
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
++ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
++ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+@@ -1805,10 +1842,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+
++ /* need to call this here rather than in prepare() since we need some crtc info */
++ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
++
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+ atombios_yuv_setup(encoder, true);
+@@ -1827,38 +1866,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-
+- if (!connector)
+- dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+- else
+- dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+-
+- /* setup and enable the encoder */
+- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+- atombios_dig_encoder_setup(encoder,
+- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+- dig->panel_mode);
+- } else if (ASIC_IS_DCE4(rdev)) {
+- /* disable the transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- /* setup and enable the encoder */
+- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+-
+- /* enable the transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- } else {
+- /* disable the encoder and transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+-
+- /* setup and enable the encoder and transmitter */
+- atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- }
++ /* handled in dpms */
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+@@ -1879,14 +1887,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ break;
+ }
+
+- if (ext_encoder) {
+- if (ASIC_IS_DCE41(rdev))
+- atombios_external_encoder_setup(encoder, ext_encoder,
+- EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+- else
+- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+- }
+-
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+@@ -2059,7 +2059,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+ }
+
+ radeon_atom_output_lock(encoder, true);
+- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+@@ -2080,6 +2079,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+
+ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+ {
++ /* need to call this here as we need the crtc set up */
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ radeon_atom_output_lock(encoder, false);
+ }
+@@ -2120,14 +2120,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+- if (ASIC_IS_DCE4(rdev))
+- /* disable the transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- else {
+- /* disable the encoder and transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+- }
++ /* handled in dpms */
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 9231564..c5762e3 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -761,7 +761,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ if (rdev->flags & RADEON_IS_AGP)
+ rdev->need_dma32 = true;
+ if ((rdev->flags & RADEON_IS_PCI) &&
+- (rdev->family < CHIP_RS400))
++ (rdev->family <= CHIP_RS740))
+ rdev->need_dma32 = true;
+
+ dma_bits = rdev->need_dma32 ? 32 : 40;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index dff8fc7..033fc96 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -178,6 +178,7 @@ static struct pci_device_id vmw_pci_id_list[] = {
+ {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+ {0, 0, 0}
+ };
++MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
+
+ static int enable_fbdev;
+
+@@ -1088,6 +1089,11 @@ static struct drm_driver driver = {
+ .master_drop = vmw_master_drop,
+ .open = vmw_driver_open,
+ .postclose = vmw_postclose,
++
++ .dumb_create = vmw_dumb_create,
++ .dumb_map_offset = vmw_dumb_map_offset,
++ .dumb_destroy = vmw_dumb_destroy,
++
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index dc27970..0e3fa7d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -641,6 +641,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
+ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
++int vmw_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args);
++
++int vmw_dumb_map_offset(struct drm_file *file_priv,
++ struct drm_device *dev, uint32_t handle,
++ uint64_t *offset);
++int vmw_dumb_destroy(struct drm_file *file_priv,
++ struct drm_device *dev,
++ uint32_t handle);
+ /**
+ * Overlay control - vmwgfx_overlay.c
+ */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 1c7f09e..0795d17 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -1950,3 +1950,76 @@ err_ref:
+ vmw_resource_unreference(&res);
+ return ret;
+ }
++
++
++int vmw_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args)
++{
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ struct vmw_master *vmaster = vmw_master(file_priv->master);
++ struct vmw_user_dma_buffer *vmw_user_bo;
++ struct ttm_buffer_object *tmp;
++ int ret;
++
++ args->pitch = args->width * ((args->bpp + 7) / 8);
++ args->size = args->pitch * args->height;
++
++ vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
++ if (vmw_user_bo == NULL)
++ return -ENOMEM;
++
++ ret = ttm_read_lock(&vmaster->lock, true);
++ if (ret != 0) {
++ kfree(vmw_user_bo);
++ return ret;
++ }
++
++ ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
++ &vmw_vram_sys_placement, true,
++ &vmw_user_dmabuf_destroy);
++ if (ret != 0)
++ goto out_no_dmabuf;
++
++ tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
++ ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
++ &vmw_user_bo->base,
++ false,
++ ttm_buffer_type,
++ &vmw_user_dmabuf_release, NULL);
++ if (unlikely(ret != 0))
++ goto out_no_base_object;
++
++ args->handle = vmw_user_bo->base.hash.key;
++
++out_no_base_object:
++ ttm_bo_unref(&tmp);
++out_no_dmabuf:
++ ttm_read_unlock(&vmaster->lock);
++ return ret;
++}
++
++int vmw_dumb_map_offset(struct drm_file *file_priv,
++ struct drm_device *dev, uint32_t handle,
++ uint64_t *offset)
++{
++ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++ struct vmw_dma_buffer *out_buf;
++ int ret;
++
++ ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
++ if (ret != 0)
++ return -EINVAL;
++
++ *offset = out_buf->base.addr_space_offset;
++ vmw_dmabuf_unreference(&out_buf);
++ return 0;
++}
++
++int vmw_dumb_destroy(struct drm_file *file_priv,
++ struct drm_device *dev,
++ uint32_t handle)
++{
++ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
++ handle, TTM_REF_USAGE);
++}
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index d21f6d0..b5cc078 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -350,6 +350,7 @@ config HID_MULTITOUCH
+ - Lumio CrystalTouch panels
+ - MosArt dual-touch panels
+ - PenMount dual touch panels
++ - PixArt optical touch screen
+ - Pixcir dual touch panels
+ - eGalax dual-touch panels, including the Joojoo and Wetab tablets
+ - Stantum multitouch panels
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 5cc029f..0c8bea9 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1507,6 +1507,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e4317a2..ab75a4e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -593,6 +593,11 @@
+ #define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
+ #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+
++#define USB_VENDOR_ID_PIXART 0x093a
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
++
+ #define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
+ #define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 995fc4c..13af0f1 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -93,6 +93,7 @@ struct mt_class {
+ #define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
+ #define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
+ #define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
++#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
+
+ /* vendor specific classes */
+ #define MT_CLS_3M 0x0101
+@@ -155,6 +156,9 @@ struct mt_class mt_classes[] = {
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2 },
++ { .name = MT_CLS_INRANGE_CONTACTNUMBER,
++ .quirks = MT_QUIRK_VALID_IS_INRANGE |
++ MT_QUIRK_SLOT_IS_CONTACTNUMBER },
+
+ /*
+ * vendor specific classes
+@@ -744,6 +748,17 @@ static const struct hid_device_id mt_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
+ USB_DEVICE_ID_PENMOUNT_PCI) },
+
++ /* PixArt optical touch screen */
++ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
++ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
++ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
++ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
++ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
++ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
++ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
++ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
++ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
++
+ /* PixCir-based panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 1fe6b80..afb73af 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,10 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index 00e9851..83d2fbd6 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
+ }
++ }, {
++ /* Old interface reads the same sensor for fan0 and fan1 */
++ .ident = "Asus M5A78L",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
++ }
+ },
+ { }
+ };
+diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
+index 0018c7d..1a174f0 100644
+--- a/drivers/hwmon/twl4030-madc-hwmon.c
++++ b/drivers/hwmon/twl4030-madc-hwmon.c
+@@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+- struct twl4030_madc_request req;
++ struct twl4030_madc_request req = {
++ .channels = 1 << attr->index,
++ .method = TWL4030_MADC_SW2,
++ .type = TWL4030_MADC_WAIT,
++ };
+ long val;
+
+- req.channels = (1 << attr->index);
+- req.method = TWL4030_MADC_SW2;
+- req.func_cb = NULL;
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index a3afac4..60f593c 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -103,6 +103,8 @@ config I2C_I801
+ Patsburg (PCH)
+ DH89xxCC (PCH)
+ Panther Point (PCH)
++ Lynx Point (PCH)
++ Lynx Point-LP (PCH)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+@@ -349,9 +351,13 @@ config I2C_DAVINCI
+ devices such as DaVinci NIC.
+ For details please see http://www.ti.com/davinci
+
++config I2C_DESIGNWARE_CORE
++ tristate
++
+ config I2C_DESIGNWARE_PLATFORM
+ tristate "Synopsys DesignWare Platfrom"
+ depends on HAVE_CLK
++ select I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+@@ -362,6 +368,7 @@ config I2C_DESIGNWARE_PLATFORM
+ config I2C_DESIGNWARE_PCI
+ tristate "Synopsys DesignWare PCI"
+ depends on PCI
++ select I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index fba6da6..d6b8779 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
+ obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
+ obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
+ obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
++obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
+ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
+-i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o
++i2c-designware-platform-objs := i2c-designware-platdrv.o
+ obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
+-i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o
++i2c-designware-pci-objs := i2c-designware-pcidrv.o
+ obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
+ obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
+ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index df87992..6193349 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -25,6 +25,7 @@
+ * ----------------------------------------------------------------------------
+ *
+ */
++#include <linux/export.h>
+ #include <linux/clk.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+@@ -305,6 +306,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
+ dw_writel(dev, dev->master_cfg , DW_IC_CON);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_init);
+
+ /*
+ * Waiting for bus not busy
+@@ -557,12 +559,14 @@ done:
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_xfer);
+
+ u32 i2c_dw_func(struct i2c_adapter *adap)
+ {
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ return dev->functionality;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_func);
+
+ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
+ {
+@@ -667,17 +671,20 @@ tx_aborted:
+
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_isr);
+
+ void i2c_dw_enable(struct dw_i2c_dev *dev)
+ {
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_enable);
+
+ u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
+ {
+ return dw_readl(dev, DW_IC_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
+
+ void i2c_dw_disable(struct dw_i2c_dev *dev)
+ {
+@@ -688,18 +695,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
+ dw_readl(dev, DW_IC_CLR_INTR);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_disable);
+
+ void i2c_dw_clear_int(struct dw_i2c_dev *dev)
+ {
+ dw_readl(dev, DW_IC_CLR_INTR);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
+
+ void i2c_dw_disable_int(struct dw_i2c_dev *dev)
+ {
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
+
+ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
+ {
+ return dw_readl(dev, DW_IC_COMP_PARAM_1);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index ab26840d..817d025 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -51,6 +51,8 @@
+ Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
+ DH89xxCC (PCH) 0x2330 32 hard yes yes yes
+ Panther Point (PCH) 0x1e22 32 hard yes yes yes
++ Lynx Point (PCH) 0x8c22 32 hard yes yes yes
++ Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
+
+ Features supported by this driver:
+ Software PEC no
+@@ -145,6 +147,8 @@
+ #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+ #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
+ #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+
+ struct i801_priv {
+ struct i2c_adapter adapter;
+@@ -633,6 +637,8 @@ static const struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
+ { 0, }
+ };
+
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index b4cfc6c..d4ec371 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -177,6 +177,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ },
+ },
+ {
++ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
++ },
++ },
++ {
++ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
++ },
++ },
++ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index d497db0..509135f 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -16,7 +16,6 @@
+ #include <linux/sched.h>
+ #include "isdnloop.h"
+
+-static char *revision = "$Revision: 1.11.6.7 $";
+ static char *isdnloop_id = "loop0";
+
+ MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
+@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
+ static int __init
+ isdnloop_init(void)
+ {
+- char *p;
+- char rev[10];
+-
+- if ((p = strchr(revision, ':'))) {
+- strcpy(rev, p + 1);
+- p = strchr(rev, '$');
+- *p = 0;
+- } else
+- strcpy(rev, " ??? ");
+- printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
+-
+ if (isdnloop_id)
+ return (isdnloop_addcard(isdnloop_id));
+
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 34416d4..74793af 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1339,7 +1339,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+- if (req->cmd_flags & REQ_SECURE)
++ if (req->cmd_flags & REQ_SECURE &&
++ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_issue_discard_rq(mq, req);
+@@ -1614,6 +1615,8 @@ static int mmc_add_disk(struct mmc_blk_data *md)
+ return ret;
+ }
+
++#define CID_MANFID_SAMSUNG 0x15
++
+ static const struct mmc_fixup blk_fixups[] =
+ {
+ MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+@@ -1644,6 +1647,28 @@ static const struct mmc_fixup blk_fixups[] =
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
++ /*
++ * On these Samsung MoviNAND parts, performing secure erase or
++ * secure trim can result in unrecoverable corruption due to a
++ * firmware bug.
++ */
++ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++
+ END_FIXUP
+ };
+
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 99b449d..f201bed 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -279,11 +279,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
+ writel(stat & MXS_MMC_IRQ_BITS,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+
++ spin_unlock(&host->lock);
++
+ if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
+ mmc_signal_sdio_irq(host->mmc);
+
+- spin_unlock(&host->lock);
+-
+ if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
+@@ -628,10 +628,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+-
+- if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+- mmc_signal_sdio_irq(host->mmc);
+-
+ } else {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+@@ -640,6 +636,10 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
++
++ if (enable && readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
++ mmc_signal_sdio_irq(host->mmc);
++
+ }
+
+ static const struct mmc_host_ops mxs_mmc_ops = {
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index c3b08f1..62ca03a 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
+ int div = 1;
+ u32 temp;
+
++ if (clock == 0)
++ goto out;
++
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+- if (clock == 0)
+- goto out;
+-
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 890754c..95b29f5 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -346,7 +346,7 @@ retry:
+ */
+ err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
+ vid_hdr, 0);
+- kfree(new_seb);
++ kmem_cache_free(si->scan_leb_slab, new_seb);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+@@ -359,7 +359,7 @@ write_error:
+ list_add(&new_seb->u.list, &si->erase);
+ goto retry;
+ }
+- kfree(new_seb);
++ kmem_cache_free(si->scan_leb_slab, new_seb);
+ out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
+index 330140e..9bcc39a 100644
+--- a/drivers/net/can/mcp251x.c
++++ b/drivers/net/can/mcp251x.c
+@@ -83,6 +83,11 @@
+ #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
+ #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
+ #define INSTRUCTION_RESET 0xC0
++#define RTS_TXB0 0x01
++#define RTS_TXB1 0x02
++#define RTS_TXB2 0x04
++#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
++
+
+ /* MPC251x registers */
+ #define CANSTAT 0x0e
+@@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
+ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+ int tx_buf_idx)
+ {
++ struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+ u32 sid, eid, exide, rtr;
+ u8 buf[SPI_TRANSFER_BUF_LEN];
+
+@@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+ buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
+ memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
+ mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
+- mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
++
++ /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
++ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
++ mcp251x_spi_trans(priv->spi, 1);
+ }
+
+ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 83199fd..d0722a7 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
++ dev->features |= NETIF_F_HW_VLAN_RX;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index b1cd41b..021463b 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
+ }
+
+ if (adapter->rx_queue.queue_addr != NULL) {
+- if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+- dma_unmap_single(dev,
+- adapter->rx_queue.queue_dma,
+- adapter->rx_queue.queue_len,
+- DMA_BIDIRECTIONAL);
+- adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+- }
+- kfree(adapter->rx_queue.queue_addr);
++ dma_free_coherent(dev, adapter->rx_queue.queue_len,
++ adapter->rx_queue.queue_addr,
++ adapter->rx_queue.queue_dma);
+ adapter->rx_queue.queue_addr = NULL;
+ }
+
+@@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev)
+ goto err_out;
+ }
+
++ dev = &adapter->vdev->dev;
++
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+- adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+- GFP_KERNEL);
++ adapter->rx_queue.queue_addr =
++ dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
++ &adapter->rx_queue.queue_dma, GFP_KERNEL);
+
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
+@@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev)
+ goto err_out;
+ }
+
+- dev = &adapter->vdev->dev;
+-
+ adapter->buffer_list_dma = dma_map_single(dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+- adapter->rx_queue.queue_dma = dma_map_single(dev,
+- adapter->rx_queue.queue_addr,
+- adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+- (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+- (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
++ (dma_mapping_error(dev, adapter->filter_list_dma))) {
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index f478a22..8e362bb 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -302,6 +302,7 @@ struct e1000_adapter {
+ */
+ struct e1000_ring *tx_ring /* One per active queue */
+ ____cacheline_aligned_in_smp;
++ u32 tx_fifo_limit;
+
+ struct napi_struct napi;
+
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 64d3f98..0182649 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -3386,6 +3386,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
+ }
+
+ /*
++ * Alignment of Tx data is on an arbitrary byte boundary with the
++ * maximum size per Tx descriptor limited only to the transmit
++ * allocation of the packet buffer minus 96 bytes with an upper
++ * limit of 24KB due to receive synchronization limitations.
++ */
++ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
++ 24 << 10);
++
++ /*
+ * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ * fit in receive buffer and early-receive not supported.
+ */
+@@ -4647,13 +4656,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+ return 1;
+ }
+
+-#define E1000_MAX_PER_TXD 8192
+-#define E1000_MAX_TXD_PWR 12
+-
+ static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct sk_buff *skb, unsigned int first,
+- unsigned int max_per_txd, unsigned int nr_frags,
+- unsigned int mss)
++ unsigned int max_per_txd, unsigned int nr_frags)
+ {
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+@@ -4882,20 +4887,19 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
++ BUG_ON(size > adapter->tx_ring->count);
++
+ if (e1000_desc_unused(adapter->tx_ring) >= size)
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+ }
+
+-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int first;
+- unsigned int max_per_txd = E1000_MAX_PER_TXD;
+- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb_headlen(skb);
+ unsigned int nr_frags;
+@@ -4915,18 +4919,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+- /*
+- * The controller does a simple calculation to
+- * make sure there is enough room in the FIFO before
+- * initiating the DMA for each buffer. The calc is:
+- * 4 = ceil(buffer len/mss). To make sure we don't
+- * overrun the FIFO, adjust the max buffer len if mss
+- * drops.
+- */
+ if (mss) {
+ u8 hdr_len;
+- max_per_txd = min(mss << 2, max_per_txd);
+- max_txd_pwr = fls(max_per_txd) - 1;
+
+ /*
+ * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+@@ -4956,12 +4950,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ count++;
+ count++;
+
+- count += TXD_USE_COUNT(len, max_txd_pwr);
++ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+- max_txd_pwr);
++ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
++ adapter->tx_fifo_limit);
+
+ if (adapter->hw.mac.tx_pkt_filtering)
+ e1000_transfer_dhcp_info(adapter, skb);
+@@ -5000,12 +4994,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ /* if count is 0 then mapping error has occurred */
+- count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
++ count = e1000_tx_map(adapter, skb, first, adapter->tx_fifo_limit,
++ nr_frags);
+ if (count) {
+ e1000_tx_queue(adapter, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+-
++ e1000_maybe_stop_tx(netdev,
++ (MAX_SKB_FRAGS *
++ DIV_ROUND_UP(PAGE_SIZE,
++ adapter->tx_fifo_limit) + 2));
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+@@ -6150,8 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
+ adapter->hw.phy.autoneg_advertised = 0x2f;
+
+ /* ring size defaults */
+- adapter->rx_ring->count = 256;
+- adapter->tx_ring->count = 256;
++ adapter->rx_ring->count = E1000_DEFAULT_RXD;
++ adapter->tx_ring->count = E1000_DEFAULT_TXD;
+
+ /*
+ * Initial Wake on LAN setting - If APM wake is enabled in
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index d5731f1..a6611f1 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1383,6 +1383,11 @@ static int efx_probe_all(struct efx_nic *efx)
+ goto fail2;
+ }
+
++ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
++ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
++ rc = -EINVAL;
++ goto fail3;
++ }
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ rc = efx_probe_channels(efx);
+ if (rc)
+@@ -1973,6 +1978,7 @@ static int efx_register_netdev(struct efx_nic *efx)
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
++ net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+
+ /* Clear MAC statistics */
+ efx->mac_op->update_stats(efx);
+diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
+index 4764793..1355245 100644
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -34,6 +34,7 @@ extern netdev_tx_t
+ efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+ extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
++extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+
+ /* RX */
+ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+@@ -56,10 +57,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+ #define EFX_MAX_EVQ_SIZE 16384UL
+ #define EFX_MIN_EVQ_SIZE 512UL
+
+-/* The smallest [rt]xq_entries that the driver supports. Callers of
+- * efx_wake_queue() assume that they can subsequently send at least one
+- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
+-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
++/* Maximum number of TCP segments we support for soft-TSO */
++#define EFX_TSO_MAX_SEGS 100
++
++/* The smallest [rt]xq_entries that the driver supports. RX minimum
++ * is a bit arbitrary. For TX, we must have space for at least 2
++ * TSO skbs.
++ */
++#define EFX_RXQ_MIN_ENT 128U
++#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+
+ /* Filters */
+ extern int efx_probe_filters(struct efx_nic *efx);
+diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
+index f3cd96d..90158c9 100644
+--- a/drivers/net/ethernet/sfc/ethtool.c
++++ b/drivers/net/ethernet/sfc/ethtool.c
+@@ -690,21 +690,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+ {
+ struct efx_nic *efx = netdev_priv(net_dev);
++ u32 txq_entries;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+- if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+- ring->tx_pending < EFX_MIN_RING_SIZE) {
++ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+ netif_err(efx, drv, efx->net_dev,
+- "TX and RX queues cannot be smaller than %ld\n",
+- EFX_MIN_RING_SIZE);
++ "RX queues cannot be smaller than %u\n",
++ EFX_RXQ_MIN_ENT);
+ return -EINVAL;
+ }
+
+- return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
++ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
++ if (txq_entries != ring->tx_pending)
++ netif_warn(efx, drv, efx->net_dev,
++ "increasing TX queue size to minimum of %u\n",
++ txq_entries);
++
++ return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
+ }
+
+ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
+index 5fb24d3..66ece48 100644
+--- a/drivers/net/ethernet/sfc/nic.h
++++ b/drivers/net/ethernet/sfc/nic.h
+@@ -65,6 +65,9 @@ enum {
+ #define FALCON_GMAC_LOOPBACKS \
+ (1 << LOOPBACK_GMAC)
+
++/* Alignment of PCIe DMA boundaries (4KB) */
++#define EFX_PAGE_SIZE 4096
++
+ /**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index df88c543..807d515 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -115,6 +115,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
+ return len;
+ }
+
++unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
++{
++ /* Header and payload descriptor for each output segment, plus
++ * one for every input fragment boundary within a segment
++ */
++ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
++
++ /* Possibly one more per segment for the alignment workaround */
++ if (EFX_WORKAROUND_5391(efx))
++ max_descs += EFX_TSO_MAX_SEGS;
++
++ /* Possibly more for PCIe page boundaries within input fragments */
++ if (PAGE_SIZE > EFX_PAGE_SIZE)
++ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
++ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
++
++ return max_descs;
++}
++
+ /*
+ * Add a socket buffer to a TX queue
+ *
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index f8a6853..ad6a9d9 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+ goto tx_error;
+
+- rt = ip_route_output_ports(&init_net, &fl4, NULL,
++ rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
+ opt->dst_addr.sin_addr.s_addr,
+ opt->src_addr.sin_addr.s_addr,
+ 0, 0, IPPROTO_GRE,
+@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ po->chan.private = sk;
+ po->chan.ops = &pptp_chan_ops;
+
+- rt = ip_route_output_ports(&init_net, &fl4, sk,
++ rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
+ opt->dst_addr.sin_addr.s_addr,
+ opt->src_addr.sin_addr.s_addr,
+ 0, 0,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+index a1670e3..93e6179 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+@@ -232,6 +232,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
+ struct iwl_priv *priv = file->private_data;
+ size_t bufsz;
+
++ if (!iwl_is_ready_rf(priv->shrd))
++ return -EAGAIN;
++
+ /* default is to dump the entire data segment */
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+index 5c29281..8533ba2 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+@@ -303,7 +303,7 @@ int iwl_queue_space(const struct iwl_queue *q);
+ ******************************************************/
+ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
+ char **buf, bool display);
+-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
++int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+ void iwl_dump_csr(struct iwl_trans *trans);
+
+ /*****************************************************
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+index 1daf01e..17fb25d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+@@ -678,7 +678,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
+
+ iwl_dump_nic_error_log(trans);
+ iwl_dump_csr(trans);
+- iwl_dump_fh(trans, NULL, false);
++ iwl_dump_fh(trans, NULL);
+ iwl_dump_nic_event_log(trans, false, NULL, false);
+ #ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+index 4661a64..75da4bc 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+@@ -1541,13 +1541,9 @@ static const char *get_fh_string(int cmd)
+ }
+ }
+
+-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
++int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+ {
+ int i;
+-#ifdef CONFIG_IWLWIFI_DEBUG
+- int pos = 0;
+- size_t bufsz = 0;
+-#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+@@ -1559,29 +1555,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+-#ifdef CONFIG_IWLWIFI_DEBUG
+- if (display) {
+- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
++
++#ifdef CONFIG_IWLWIFI_DEBUGFS
++ if (buf) {
++ int pos = 0;
++ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
++
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
++
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
++
++ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+- }
++
+ return pos;
+ }
+ #endif
++
+ IWL_ERR(trans, "FH register values:\n");
+- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
++ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+- }
++
+ return 0;
+ }
+
+@@ -1929,11 +1931,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ size_t count, loff_t *ppos)
+ {
+ struct iwl_trans *trans = file->private_data;
+- char *buf;
++ char *buf = NULL;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+- ret = pos = iwl_dump_fh(trans, &buf, true);
++ ret = pos = iwl_dump_fh(trans, &buf);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
+index 3a6b402..0ea85f4 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -1624,6 +1625,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
++ rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
++ rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2400pci_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
+index d3a4a68..7564ae9 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.h
++++ b/drivers/net/wireless/rt2x00/rt2400pci.h
+@@ -670,6 +670,7 @@
+ #define GPIOCSR_BIT5 FIELD32(0x00000020)
+ #define GPIOCSR_BIT6 FIELD32(0x00000040)
+ #define GPIOCSR_BIT7 FIELD32(0x00000080)
++#define GPIOCSR_BIT8 FIELD32(0x00000100)
+
+ /*
+ * BBPPCSR: BBP Pin control register.
+diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
+index dcc0e1f..aa10c48 100644
+--- a/drivers/net/wireless/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/rt2x00/rt2500pci.c
+@@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -1942,6 +1943,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
++ rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
++ rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2500pci_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
+index 53c5f87..22ed6df 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.c
++++ b/drivers/net/wireless/rt2x00/rt2500usb.c
+@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
+ u16 reg;
+
+ rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
+- return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
++ return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
+ }
+
+ #ifdef CONFIG_RT2X00_LIB_LEDS
+@@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u16 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -1781,6 +1782,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
++ rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
++ rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2500usb_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
+index b493306..196bd51 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.h
++++ b/drivers/net/wireless/rt2x00/rt2500usb.h
+@@ -189,14 +189,15 @@
+ * MAC_CSR19: GPIO control register.
+ */
+ #define MAC_CSR19 0x0426
+-#define MAC_CSR19_BIT0 FIELD32(0x0001)
+-#define MAC_CSR19_BIT1 FIELD32(0x0002)
+-#define MAC_CSR19_BIT2 FIELD32(0x0004)
+-#define MAC_CSR19_BIT3 FIELD32(0x0008)
+-#define MAC_CSR19_BIT4 FIELD32(0x0010)
+-#define MAC_CSR19_BIT5 FIELD32(0x0020)
+-#define MAC_CSR19_BIT6 FIELD32(0x0040)
+-#define MAC_CSR19_BIT7 FIELD32(0x0080)
++#define MAC_CSR19_BIT0 FIELD16(0x0001)
++#define MAC_CSR19_BIT1 FIELD16(0x0002)
++#define MAC_CSR19_BIT2 FIELD16(0x0004)
++#define MAC_CSR19_BIT3 FIELD16(0x0008)
++#define MAC_CSR19_BIT4 FIELD16(0x0010)
++#define MAC_CSR19_BIT5 FIELD16(0x0020)
++#define MAC_CSR19_BIT6 FIELD16(0x0040)
++#define MAC_CSR19_BIT7 FIELD16(0x0080)
++#define MAC_CSR19_BIT8 FIELD16(0x0100)
+
+ /*
+ * MAC_CSR20: LED control register.
+diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
+index 837b460..518157d 100644
+--- a/drivers/net/wireless/rt2x00/rt2800pci.c
++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
+@@ -935,6 +935,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -948,6 +949,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
++ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
++ rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2800_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index ae7528b..b66a61b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -621,8 +621,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
+ skb_pull(entry->skb, RXINFO_DESC_SIZE);
+
+ /*
+- * FIXME: we need to check for rx_pkt_len validity
++ * Check for rx_pkt_len validity. Return if invalid, leaving
++ * rxdesc->size zeroed out by the upper level.
+ */
++ if (unlikely(rx_pkt_len == 0 ||
++ rx_pkt_len > entry->queue->data_size)) {
++ ERROR(entry->queue->rt2x00dev,
++ "Bad frame size %d, forcing to 0\n", rx_pkt_len);
++ return;
++ }
++
+ rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
+
+ /*
+@@ -690,6 +698,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -703,6 +712,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
++ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
++ rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2800_probe_hw_mode(rt2x00dev);
+@@ -1111,6 +1128,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x1690, 0x0744) },
+ { USB_DEVICE(0x1690, 0x0761) },
+ { USB_DEVICE(0x1690, 0x0764) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x179d) },
+ /* Cisco */
+ { USB_DEVICE(0x167b, 0x4001) },
+ /* EnGenius */
+@@ -1163,7 +1182,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1760) },
+ { USB_DEVICE(0x0b05, 0x1761) },
+ { USB_DEVICE(0x0b05, 0x1790) },
+- { USB_DEVICE(0x0b05, 0x179d) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3262) },
+ { USB_DEVICE(0x13d3, 0x3284) },
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index 21b529b..f099b30 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -624,7 +624,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
+ */
+ if (unlikely(rxdesc.size == 0 ||
+ rxdesc.size > entry->queue->data_size)) {
+- WARNING(rt2x00dev, "Wrong frame size %d max %d.\n",
++ ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
+ rxdesc.size, entry->queue->data_size);
+ dev_kfree_skb(entry->skb);
+ goto renew_skb;
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
+index d69f88c..3e058e5 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2832,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Disable power saving.
+@@ -2850,6 +2851,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
++ rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
++ rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt61pci_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
+index e3cd6db..8f3da5a 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.h
++++ b/drivers/net/wireless/rt2x00/rt61pci.h
+@@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry {
+ #define MAC_CSR13_BIT10 FIELD32(0x00000400)
+ #define MAC_CSR13_BIT11 FIELD32(0x00000800)
+ #define MAC_CSR13_BIT12 FIELD32(0x00001000)
++#define MAC_CSR13_BIT13 FIELD32(0x00002000)
+
+ /*
+ * MAC_CSR14: LED control register.
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
+index cfb19db..2ad468d 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.c
++++ b/drivers/net/wireless/rt2x00/rt73usb.c
+@@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -2190,6 +2191,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
++ rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
++ rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt73usb_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
+index 9f6b470..df1cc11 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.h
++++ b/drivers/net/wireless/rt2x00/rt73usb.h
+@@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry {
+ #define MAC_CSR13_BIT10 FIELD32(0x00000400)
+ #define MAC_CSR13_BIT11 FIELD32(0x00000800)
+ #define MAC_CSR13_BIT12 FIELD32(0x00001000)
++#define MAC_CSR13_BIT13 FIELD32(0x00002000)
++#define MAC_CSR13_BIT14 FIELD32(0x00004000)
++#define MAC_CSR13_BIT15 FIELD32(0x00008000)
+
+ /*
+ * MAC_CSR14: LED control register.
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 29a994f..7c471eb 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -4125,7 +4125,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->completion_lock);
+- spin_lock_init(&poll_aen_lock);
+
+ mutex_init(&instance->aen_mutex);
+ mutex_init(&instance->reset_mutex);
+@@ -5520,6 +5519,8 @@ static int __init megasas_init(void)
+ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
+ MEGASAS_EXT_VERSION);
+
++ spin_lock_init(&poll_aen_lock);
++
+ support_poll_for_event = 2;
+ support_device_change = 1;
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index e903077..98cb5e6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -2353,10 +2353,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ }
+
+ /* command line tunables for max controller queue depth */
+- if (max_queue_depth != -1)
+- max_request_credit = (max_queue_depth < facts->RequestCredit)
+- ? max_queue_depth : facts->RequestCredit;
+- else
++ if (max_queue_depth != -1 && max_queue_depth != 0) {
++ max_request_credit = min_t(u16, max_queue_depth +
++ ioc->hi_priority_depth + ioc->internal_depth,
++ facts->RequestCredit);
++ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
++ max_request_credit = MAX_HBA_QUEUE_DEPTH;
++ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+@@ -2431,7 +2434,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+- ioc->shost->can_queue = ioc->scsiio_depth - (2);
++ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
+ "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
+
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 456b131..c83571e 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -41,6 +41,8 @@
+
+ #include <trace/events/scsi.h>
+
++static void scsi_eh_done(struct scsi_cmnd *scmd);
++
+ #define SENSE_TIMEOUT (10*HZ)
+
+ /*
+@@ -240,6 +242,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
+ if (! scsi_command_normalize_sense(scmd, &sshdr))
+ return FAILED; /* no valid sense data */
+
++ if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
++ /*
++ * nasty: for mid-layer issued TURs, we need to return the
++ * actual sense data without any recovery attempt. For eh
++ * issued ones, we need to try to recover and interpret
++ */
++ return SUCCESS;
++
+ if (scsi_sense_is_deferred(&sshdr))
+ return NEEDS_RETRY;
+
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index a48b59c..c6c80c9 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -776,6 +776,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ sdev->model = (char *) (sdev->inquiry + 16);
+ sdev->rev = (char *) (sdev->inquiry + 32);
+
++ if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
++ /*
++ * sata emulation layer device. This is a hack to work around
++ * the SATL power management specifications which state that
++ * when the SATL detects the device has gone into standby
++ * mode, it shall respond with NOT READY.
++ */
++ sdev->allow_restart = 1;
++ }
++
+ if (*bflags & BLIST_ISROM) {
+ sdev->type = TYPE_ROM;
+ sdev->removable = 1;
+diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
+index 3141dc8..a48fe88 100644
+--- a/drivers/staging/comedi/drivers/das08.c
++++ b/drivers/staging/comedi/drivers/das08.c
+@@ -385,7 +385,7 @@ static const struct das08_board_struct das08_boards[] = {
+ .ai = das08_ai_rinsn,
+ .ai_nbits = 16,
+ .ai_pg = das08_pg_none,
+- .ai_encoding = das08_encode12,
++ .ai_encoding = das08_encode16,
+ .ao = das08jr_ao_winsn,
+ .ao_nbits = 16,
+ .di = das08jr_di_rbits,
+@@ -655,7 +655,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev,
+ int chan;
+
+ lsb = data[0] & 0xff;
+- msb = (data[0] >> 8) & 0xf;
++ msb = (data[0] >> 8) & 0xff;
+
+ chan = CR_CHAN(insn->chanspec);
+
+diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
+index 0e26d5f..495ee12 100644
+--- a/drivers/staging/rtl8712/recv_linux.c
++++ b/drivers/staging/rtl8712/recv_linux.c
+@@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
+ if (skb == NULL)
+ goto _recv_indicatepkt_drop;
+ skb->data = precv_frame->u.hdr.rx_data;
+-#ifdef NET_SKBUFF_DATA_USES_OFFSET
+- skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail -
+- precv_frame->u.hdr.rx_head);
+-#else
+- skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail;
+-#endif
+ skb->len = precv_frame->u.hdr.len;
++ skb_set_tail_pointer(skb, skb->len);
+ if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index c0edf97..08021f4 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -200,7 +200,7 @@ s_vProcessRxMACHeader (
+ } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
+ cbHeaderSize += 6;
+ pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+- if ((*pwType == cpu_to_le16(ETH_P_IPX)) ||
++ if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
+ (*pwType == cpu_to_le16(0xF380))) {
+ cbHeaderSize -= 8;
+ pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index 9b64b10..fe21868 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -1701,7 +1701,7 @@ s_bPacketToWirelessUsb(
+ // 802.1H
+ if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
+ if (pDevice->dwDiagRefCount == 0) {
+- if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) ||
++ if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) ||
+ (psEthHeader->wType == cpu_to_le16(0xF380))) {
+ memcpy((PBYTE) (pbyPayloadHead),
+ abySNAP_Bridgetunnel, 6);
+@@ -2840,10 +2840,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ Packet_Type = skb->data[ETH_HLEN+1];
+ Descriptor_type = skb->data[ETH_HLEN+1+1+2];
+ Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
+- if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
+- /* 802.1x OR eapol-key challenge frame transfer */
+- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
+- (Packet_Type == 3)) {
++ if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
++ /* 802.1x OR eapol-key challenge frame transfer */
++ if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
++ (Packet_Type == 3)) {
+ bTxeapol_key = TRUE;
+ if(!(Key_info & BIT3) && //WPA or RSN group-key challenge
+ (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
+@@ -2989,19 +2989,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ }
+ }
+
+- if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
+- if (pDevice->byBBType != BB_TYPE_11A) {
+- pDevice->wCurrentRate = RATE_1M;
+- pDevice->byACKRate = RATE_1M;
+- pDevice->byTopCCKBasicRate = RATE_1M;
+- pDevice->byTopOFDMBasicRate = RATE_6M;
+- } else {
+- pDevice->wCurrentRate = RATE_6M;
+- pDevice->byACKRate = RATE_6M;
+- pDevice->byTopCCKBasicRate = RATE_1M;
+- pDevice->byTopOFDMBasicRate = RATE_6M;
+- }
+- }
++ if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
++ if (pDevice->byBBType != BB_TYPE_11A) {
++ pDevice->wCurrentRate = RATE_1M;
++ pDevice->byACKRate = RATE_1M;
++ pDevice->byTopCCKBasicRate = RATE_1M;
++ pDevice->byTopOFDMBasicRate = RATE_6M;
++ } else {
++ pDevice->wCurrentRate = RATE_6M;
++ pDevice->byACKRate = RATE_6M;
++ pDevice->byTopCCKBasicRate = RATE_1M;
++ pDevice->byTopOFDMBasicRate = RATE_6M;
++ }
++ }
+
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
+@@ -3017,7 +3017,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+
+ if (bNeedEncryption == TRUE) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
+- if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) {
++ if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
+ bNeedEncryption = FALSE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
+diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
+index 16ad9fe..4306475 100644
+--- a/drivers/staging/zcache/zcache-main.c
++++ b/drivers/staging/zcache/zcache-main.c
+@@ -1223,13 +1223,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+ {
+- int ret = 0;
+-
+ BUG_ON(!is_ephemeral(pool));
+- zbud_decompress((struct page *)(data), pampd);
++ if (zbud_decompress((struct page *)(data), pampd) < 0)
++ return -EINVAL;
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ atomic_dec(&zcache_curr_eph_pampd_count);
+- return ret;
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 163fc90..8e68f79 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -130,6 +130,7 @@
+ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
+ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
+ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
++#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
+ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
+ #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
+@@ -635,22 +636,11 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
+ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
+ {
+ unsigned int val;
+- unsigned int ufcr_rfdiv;
+-
+- /* set receiver / transmitter trigger level.
+- * RFDIV is set such way to satisfy requested uartclk value
+- */
+- val = TXTL << 10 | RXTL;
+- ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+- / sport->port.uartclk;
+-
+- if(!ufcr_rfdiv)
+- ufcr_rfdiv = 1;
+-
+- val |= UFCR_RFDIV_REG(ufcr_rfdiv);
+
++ /* set receiver / transmitter trigger level */
++ val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
++ val |= TXTL << UFCR_TXTL_SHF | RXTL;
+ writel(val, sport->port.membase + UFCR);
+-
+ return 0;
+ }
+
+@@ -725,6 +715,7 @@ static int imx_startup(struct uart_port *port)
+ }
+ }
+
++ spin_lock_irqsave(&sport->port.lock, flags);
+ /*
+ * Finally, clear and enable interrupts
+ */
+@@ -778,7 +769,6 @@ static int imx_startup(struct uart_port *port)
+ /*
+ * Enable modem status interrupts
+ */
+- spin_lock_irqsave(&sport->port.lock,flags);
+ imx_enable_ms(&sport->port);
+ spin_unlock_irqrestore(&sport->port.lock,flags);
+
+@@ -808,10 +798,13 @@ static void imx_shutdown(struct uart_port *port)
+ {
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
++ unsigned long flags;
+
++ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_TXEN);
+ writel(temp, sport->port.membase + UCR2);
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+@@ -840,12 +833,14 @@ static void imx_shutdown(struct uart_port *port)
+ * Disable all interrupts, port and break condition.
+ */
+
++ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+ if (USE_IRDA(sport))
+ temp &= ~(UCR1_IREN);
+
+ writel(temp, sport->port.membase + UCR1);
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ static void
+@@ -1119,6 +1114,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ struct imx_port *sport = imx_ports[co->index];
+ unsigned int old_ucr1, old_ucr2, ucr1;
++ unsigned long flags;
++
++ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /*
+ * First, save UCR1/2 and then disable interrupts
+@@ -1145,6 +1143,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
+
+ writel(old_ucr1, sport->port.membase + UCR1);
+ writel(old_ucr2, sport->port.membase + UCR2);
++
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ /*
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 32d3adc..8b2a9d8 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -96,6 +96,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04b4, 0x0526), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* Microchip Joss Optical infrared touchboard device */
++ { USB_DEVICE(0x04d8, 0x000c), .driver_info =
++ USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index fef1db3..2023733 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
+ else {
+ qtd = list_entry (qh->qtd_list.next,
+ struct ehci_qtd, qtd_list);
+- /* first qtd may already be partially processed */
+- if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
++ /*
++ * first qtd may already be partially processed.
++ * If we come here during unlink, the QH overlay region
++ * might have reference to the just unlinked qtd. The
++ * qtd is updated in qh_completions(). Update the QH
++ * overlay here.
++ */
++ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
++ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
++ }
+ }
+
+ if (qtd)
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 833b3c6..d0ec2f0 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -75,7 +75,9 @@
+ #define NB_PIF0_PWRDOWN_1 0x01100013
+
+ #define USB_INTEL_XUSB2PR 0xD0
++#define USB_INTEL_USB2PRM 0xD4
+ #define USB_INTEL_USB3_PSSEN 0xD8
++#define USB_INTEL_USB3PRM 0xDC
+
+ static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+@@ -772,10 +774,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ return;
+ }
+
+- ports_available = 0xffffffff;
++ /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
++ * Indicate the ports that can be changed from OS.
++ */
++ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
++ &ports_available);
++
++ dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
++ ports_available);
++
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+- * Register, to turn on SuperSpeed terminations for all
+- * available ports.
++ * Register, to turn on SuperSpeed terminations for the
++ * switchable ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ cpu_to_le32(ports_available));
+@@ -785,7 +795,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+- ports_available = 0xffffffff;
++ /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
++ * Indicate the USB 2.0 ports to be controlled by the xHCI host.
++ */
++
++ pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
++ &ports_available);
++
++ dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
++ ports_available);
++
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+@@ -800,6 +819,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ }
+ EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+
++void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
++{
++ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
++ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
++}
++EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
++
+ /**
+ * PCI Quirks for xHCI.
+ *
+@@ -815,12 +841,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ void __iomem *op_reg_base;
+ u32 val;
+ int timeout;
++ int len = pci_resource_len(pdev, 0);
+
+ if (!mmio_resource_enabled(pdev, 0))
+ return;
+
+- base = ioremap_nocache(pci_resource_start(pdev, 0),
+- pci_resource_len(pdev, 0));
++ base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+ if (base == NULL)
+ return;
+
+@@ -830,9 +856,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ */
+ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
+ do {
++ if ((ext_cap_offset + sizeof(val)) > len) {
++ /* We're reading garbage from the controller */
++ dev_warn(&pdev->dev,
++ "xHCI controller failing to respond");
++ return;
++ }
++
+ if (!ext_cap_offset)
+ /* We've reached the end of the extended capabilities */
+ goto hc_init;
++
+ val = readl(base + ext_cap_offset);
+ if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
+ break;
+@@ -863,9 +897,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ /* Disable any BIOS SMIs and clear all SMI events*/
+ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
++hc_init:
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+-hc_init:
++
+ op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+ /* Wait for the host controller to be ready before writing any
+diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
+index b1002a8..7f69a39 100644
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -10,10 +10,12 @@ void usb_amd_quirk_pll_disable(void);
+ void usb_amd_quirk_pll_enable(void);
+ bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
++void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+ #else
+ static inline void usb_amd_quirk_pll_disable(void) {}
+ static inline void usb_amd_quirk_pll_enable(void) {}
+ static inline void usb_amd_dev_put(void) {}
++static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+ #endif /* CONFIG_PCI */
+
+ #endif /* __LINUX_USB_PCI_QUIRKS_H */
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index fd8a2c2..978860b 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -469,11 +469,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
++ } else {
++ /*
++ * If CAS bit isn't set but the Port is already at
++ * Compliance Mode, fake a connection so the USB core
++ * notices the Compliance state and resets the port.
++ * This resolves an issue generated by the SN65LVPE502CP
++ * in which sometimes the port enters compliance mode
++ * caused by a delay on the host-device negotiation.
++ */
++ if (pls == USB_SS_PORT_LS_COMP_MOD)
++ pls |= USB_PORT_STAT_CONNECTION;
+ }
++
+ /* update status field */
+ *status |= pls;
+ }
+
++/*
++ * Function for Compliance Mode Quirk.
++ *
++ * This Function verifies if all xhc USB3 ports have entered U0, if so,
++ * the compliance mode timer is deleted. A port won't enter
++ * compliance mode if it has previously entered U0.
++ */
++void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
++{
++ u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
++ bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
++
++ if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
++ return;
++
++ if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
++ xhci->port_status_u0 |= 1 << wIndex;
++ if (xhci->port_status_u0 == all_ports_seen_u0) {
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
++ }
++ }
++}
++
+ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+ {
+@@ -618,6 +655,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ /* Update Port Link State for super speed ports*/
+ if (hcd->speed == HCD_USB3) {
+ xhci_hub_report_link_state(&status, temp);
++ /*
++ * Verify if all USB3 Ports Have entered U0 already.
++ * Delete Compliance Mode Timer if so.
++ */
++ xhci_del_comp_mod_timer(xhci, temp, wIndex);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 07c72a4..bddcbfc 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -90,6 +90,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
++ /*
++ * PPT desktop boards DH77EB and DH77DF will power back on after
++ * a few seconds of being shutdown. The fix for this is to
++ * switch the ports from xHCI to EHCI on shutdown. We can't use
++ * DMI information to find those particular boards (since each
++ * vendor will change the board name), so we have to key off all
++ * PPT chipsets.
++ */
++ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index fb0981e..c7c530c 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -145,25 +145,34 @@ static void next_trb(struct xhci_hcd *xhci,
+ */
+ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+ {
+- union xhci_trb *next = ++(ring->dequeue);
+ unsigned long long addr;
+
+ ring->deq_updates++;
+- /* Update the dequeue pointer further if that was a link TRB or we're at
+- * the end of an event ring segment (which doesn't have link TRBS)
+- */
+- while (last_trb(xhci, ring, ring->deq_seg, next)) {
+- if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+- ring->cycle_state = (ring->cycle_state ? 0 : 1);
+- if (!in_interrupt())
+- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+- ring,
+- (unsigned int) ring->cycle_state);
++
++ do {
++ /*
++ * Update the dequeue pointer further if that was a link TRB or
++ * we're at the end of an event ring segment (which doesn't have
++ * link TRBS)
++ */
++ if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
++ if (consumer && last_trb_on_last_seg(xhci, ring,
++ ring->deq_seg, ring->dequeue)) {
++ if (!in_interrupt())
++ xhci_dbg(xhci, "Toggle cycle state "
++ "for ring %p = %i\n",
++ ring,
++ (unsigned int)
++ ring->cycle_state);
++ ring->cycle_state = (ring->cycle_state ? 0 : 1);
++ }
++ ring->deq_seg = ring->deq_seg->next;
++ ring->dequeue = ring->deq_seg->trbs;
++ } else {
++ ring->dequeue++;
+ }
+- ring->deq_seg = ring->deq_seg->next;
+- ring->dequeue = ring->deq_seg->trbs;
+- next = ring->dequeue;
+- }
++ } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
++
+ addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f7c0a2a..09872ee 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -26,6 +26,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/slab.h>
++#include <linux/dmi.h>
+
+ #include "xhci.h"
+
+@@ -387,6 +388,95 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+
+ #endif
+
++static void compliance_mode_recovery(unsigned long arg)
++{
++ struct xhci_hcd *xhci;
++ struct usb_hcd *hcd;
++ u32 temp;
++ int i;
++
++ xhci = (struct xhci_hcd *)arg;
++
++ for (i = 0; i < xhci->num_usb3_ports; i++) {
++ temp = xhci_readl(xhci, xhci->usb3_ports[i]);
++ if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
++ /*
++ * Compliance Mode Detected. Letting USB Core
++ * handle the Warm Reset
++ */
++ xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
++ i + 1);
++ xhci_dbg(xhci, "Attempting Recovery routine!\n");
++ hcd = xhci->shared_hcd;
++
++ if (hcd->state == HC_STATE_SUSPENDED)
++ usb_hcd_resume_root_hub(hcd);
++
++ usb_hcd_poll_rh_status(hcd);
++ }
++ }
++
++ if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
++ mod_timer(&xhci->comp_mode_recovery_timer,
++ jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
++}
++
++/*
++ * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
++ * that causes ports behind that hardware to enter compliance mode sometimes.
++ * The quirk creates a timer that polls every 2 seconds the link state of
++ * each host controller's port and recovers it by issuing a Warm reset
++ * if Compliance mode is detected, otherwise the port will become "dead" (no
++ * device connections or disconnections will be detected anymore). Becasue no
++ * status event is generated when entering compliance mode (per xhci spec),
++ * this quirk is needed on systems that have the failing hardware installed.
++ */
++static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
++{
++ xhci->port_status_u0 = 0;
++ init_timer(&xhci->comp_mode_recovery_timer);
++
++ xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
++ xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
++ xhci->comp_mode_recovery_timer.expires = jiffies +
++ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
++
++ set_timer_slack(&xhci->comp_mode_recovery_timer,
++ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
++ add_timer(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
++}
++
++/*
++ * This function identifies the systems that have installed the SN65LVPE502CP
++ * USB3.0 re-driver and that need the Compliance Mode Quirk.
++ * Systems:
++ * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
++ */
++static bool compliance_mode_recovery_timer_quirk_check(void)
++{
++ const char *dmi_product_name, *dmi_sys_vendor;
++
++ dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
++ dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
++
++ if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
++ return false;
++
++ if (strstr(dmi_product_name, "Z420") ||
++ strstr(dmi_product_name, "Z620") ||
++ strstr(dmi_product_name, "Z820"))
++ return true;
++
++ return false;
++}
++
++static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
++{
++ return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
++}
++
++
+ /*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+@@ -410,6 +500,12 @@ int xhci_init(struct usb_hcd *hcd)
+ retval = xhci_mem_init(xhci, GFP_KERNEL);
+ xhci_dbg(xhci, "Finished xhci_init\n");
+
++ /* Initializing Compliance Mode Recovery Data If Needed */
++ if (compliance_mode_recovery_timer_quirk_check()) {
++ xhci->quirks |= XHCI_COMP_MODE_QUIRK;
++ compliance_mode_recovery_timer_init(xhci);
++ }
++
+ return retval;
+ }
+
+@@ -618,6 +714,11 @@ void xhci_stop(struct usb_hcd *hcd)
+ del_timer_sync(&xhci->event_ring_timer);
+ #endif
+
++ /* Deleting Compliance Mode Recovery Timer */
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++ (!(xhci_all_ports_seen_u0(xhci))))
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_dev_put();
+
+@@ -648,6 +749,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
++ if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
++ usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
++
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+ spin_unlock_irq(&xhci->lock);
+@@ -791,6 +895,16 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ }
+ spin_unlock_irq(&xhci->lock);
+
++ /*
++ * Deleting Compliance Mode Recovery Timer because the xHCI Host
++ * is about to be suspended.
++ */
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++ (!(xhci_all_ports_seen_u0(xhci)))) {
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
++ }
++
+ /* step 5: remove core well power */
+ /* synchronize irq when using MSI-X */
+ xhci_msix_sync_irqs(xhci);
+@@ -923,6 +1037,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
++
++ /*
++ * If system is subject to the Quirk, Compliance Mode Timer needs to
++ * be re-initialized Always after a system resume. Ports are subject
++ * to suffer the Compliance Mode issue again. It doesn't matter if
++ * ports have entered previously to U0 before system's suspension.
++ */
++ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
++ compliance_mode_recovery_timer_init(xhci);
++
+ return retval;
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 7a56805..44d518a 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1471,6 +1471,8 @@ struct xhci_hcd {
+ #define XHCI_SW_BW_CHECKING (1 << 8)
+ #define XHCI_AMD_0x96_HOST (1 << 9)
+ #define XHCI_TRUST_TX_LENGTH (1 << 10)
++#define XHCI_SPURIOUS_REBOOT (1 << 13)
++#define XHCI_COMP_MODE_QUIRK (1 << 14)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+@@ -1487,6 +1489,11 @@ struct xhci_hcd {
+ unsigned sw_lpm_support:1;
+ /* support xHCI 1.0 spec USB2 hardware LPM */
+ unsigned hw_lpm_support:1;
++ /* Compliance Mode Recovery Data */
++ struct timer_list comp_mode_recovery_timer;
++ u32 port_status_u0;
++/* Compliance Mode Timer Triggered every 2 seconds */
++#define COMP_MODE_RCVRY_MSECS 2000
+ };
+
+ /* convert between an HCD pointer and the corresponding EHCI_HCD */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b3182bb..7324bea 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -704,6 +704,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+@@ -804,13 +805,32 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+- { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
++ USB_CLASS_VENDOR_SPEC,
++ USB_SUBCLASS_VENDOR_SPEC, 0x00) },
+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+ { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
++ { USB_DEVICE(FTDI_VID, PI_C865_PID) },
++ { USB_DEVICE(FTDI_VID, PI_C857_PID) },
++ { USB_DEVICE(PI_VID, PI_C866_PID) },
++ { USB_DEVICE(PI_VID, PI_C663_PID) },
++ { USB_DEVICE(PI_VID, PI_C725_PID) },
++ { USB_DEVICE(PI_VID, PI_E517_PID) },
++ { USB_DEVICE(PI_VID, PI_C863_PID) },
+ { USB_DEVICE(PI_VID, PI_E861_PID) },
++ { USB_DEVICE(PI_VID, PI_C867_PID) },
++ { USB_DEVICE(PI_VID, PI_E609_PID) },
++ { USB_DEVICE(PI_VID, PI_E709_PID) },
++ { USB_DEVICE(PI_VID, PI_100F_PID) },
++ { USB_DEVICE(PI_VID, PI_1011_PID) },
++ { USB_DEVICE(PI_VID, PI_1012_PID) },
++ { USB_DEVICE(PI_VID, PI_1013_PID) },
++ { USB_DEVICE(PI_VID, PI_1014_PID) },
++ { USB_DEVICE(PI_VID, PI_1015_PID) },
++ { USB_DEVICE(PI_VID, PI_1016_PID) },
+ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 54b4258..06f6fd2 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -75,6 +75,9 @@
+ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+ #define FTDI_OPENDCC_GBM_PID 0xBFDC
+
++/* NZR SEM 16+ USB (http://www.nzr.de) */
++#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
++
+ /*
+ * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
+ */
+@@ -539,7 +542,10 @@
+ /*
+ * Microchip Technology, Inc.
+ *
+- * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
++ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
++ * used by single function CDC ACM class based firmware demo
++ * applications. The VID/PID has also been used in firmware
++ * emulating FTDI serial chips by:
+ * Hornby Elite - Digital Command Control Console
+ * http://www.hornby.com/hornby-dcc/controllers/
+ */
+@@ -791,8 +797,27 @@
+ * Physik Instrumente
+ * http://www.physikinstrumente.com/en/products/
+ */
++/* These two devices use the VID of FTDI */
++#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */
++#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */
++
+ #define PI_VID 0x1a72 /* Vendor ID */
+-#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
++#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */
++#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */
++#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */
++#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */
++#define PI_C863_PID 0x1007 /* PI C-863 */
++#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */
++#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */
++#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */
++#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */
++#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */
++#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */
++#define PI_1012_PID 0x1012 /* PI Motion Controller */
++#define PI_1013_PID 0x1013 /* PI Motion Controller */
++#define PI_1014_PID 0x1014 /* PI Device */
++#define PI_1015_PID 0x1015 /* PI Device */
++#define PI_1016_PID 0x1016 /* PI Digital Servo Module */
+
+ /*
+ * Kondo Kagaku Co.Ltd.
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 113560d..c068b4d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1090,6 +1090,10 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
++
+ { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
+ { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
+ { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
+diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
+index 70aa47d..f7c1753 100644
+--- a/drivers/video/omap2/omapfb/omapfb-main.c
++++ b/drivers/video/omap2/omapfb/omapfb-main.c
+@@ -1183,7 +1183,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
+ break;
+
+ if (regno < 16) {
+- u16 pal;
++ u32 pal;
+ pal = ((red >> (16 - var->red.length)) <<
+ var->red.offset) |
+ ((green >> (16 - var->green.length)) <<
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 284798a..89588e7 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -231,7 +231,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ return ret;
+
+ if (hwdev && hwdev->coherent_dma_mask)
+- dma_mask = hwdev->coherent_dma_mask;
++ dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+
+ phys = virt_to_phys(ret);
+ dev_addr = xen_phys_to_bus(phys);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 0bb785f..51574d4 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -882,7 +882,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
+ if (!buf) {
+ mutex_unlock(&cinode->lock_mutex);
+ FreeXid(xid);
+- return rc;
++ return -ENOMEM;
+ }
+
+ for (i = 0; i < 2; i++) {
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index af11098..7c7556b 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -640,6 +640,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct dentry *lower_old_dir_dentry;
+ struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
++ struct inode *target_inode;
+
+ lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
+ lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
+@@ -647,6 +648,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ dget(lower_new_dentry);
+ lower_old_dir_dentry = dget_parent(lower_old_dentry);
+ lower_new_dir_dentry = dget_parent(lower_new_dentry);
++ target_inode = new_dentry->d_inode;
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+@@ -662,6 +664,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ lower_new_dir_dentry->d_inode, lower_new_dentry);
+ if (rc)
+ goto out_lock;
++ if (target_inode)
++ fsstack_copy_attr_all(target_inode,
++ ecryptfs_inode_to_lower(target_inode));
+ fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
+ if (new_dir != old_dir)
+ fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
+diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
+index 5b3f907..71b263f 100644
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -3072,6 +3072,8 @@ static int ext3_do_update_inode(handle_t *handle,
+ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct buffer_head *bh = iloc->bh;
+ int err = 0, rc, block;
++ int need_datasync = 0;
++ __le32 disksize;
+
+ again:
+ /* we can't allow multiple procs in here at once, its a bit racey */
+@@ -3109,7 +3111,11 @@ again:
+ raw_inode->i_gid_high = 0;
+ }
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+- raw_inode->i_size = cpu_to_le32(ei->i_disksize);
++ disksize = cpu_to_le32(ei->i_disksize);
++ if (disksize != raw_inode->i_size) {
++ need_datasync = 1;
++ raw_inode->i_size = disksize;
++ }
+ raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+ raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+ raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+@@ -3125,8 +3131,11 @@ again:
+ if (!S_ISREG(inode->i_mode)) {
+ raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+ } else {
+- raw_inode->i_size_high =
+- cpu_to_le32(ei->i_disksize >> 32);
++ disksize = cpu_to_le32(ei->i_disksize >> 32);
++ if (disksize != raw_inode->i_size_high) {
++ raw_inode->i_size_high = disksize;
++ need_datasync = 1;
++ }
+ if (ei->i_disksize > 0x7fffffffULL) {
+ struct super_block *sb = inode->i_sb;
+ if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
+@@ -3179,6 +3188,8 @@ again:
+ ext3_clear_inode_state(inode, EXT3_STATE_NEW);
+
+ atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
++ if (need_datasync)
++ atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
+ out_brelse:
+ brelse (bh);
+ ext3_std_error(inode->i_sb, err);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 2aaf3ea..5c029fb 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1524,6 +1524,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+ req->pages[req->num_pages] = page;
+ req->num_pages++;
+
++ offset = 0;
+ num -= this_num;
+ total_len += this_num;
+ index++;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 50a15fa..b78b5b6 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = jiffies;
+
+- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
++ memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ else
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 5195fd6..dba87e6 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -633,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page **pages, unsigned int count, int plus)
+ {
+ struct inode *dir = dentry->d_inode;
+- __be32 *verf = NFS_COOKIEVERF(dir);
++ __be32 *verf = NFS_I(dir)->cookieverf;
+ struct nfs3_readdirargs arg = {
+ .fh = NFS_FH(dir),
+ .cookie = cookie,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d20221d..61796a40 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3025,11 +3025,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name,
+ (unsigned long long)cookie);
+- nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
++ nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
+ res.pgbase = args.pgbase;
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ if (status >= 0) {
+- memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
++ memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
+ status += args.pgbase;
+ }
+
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index bdd5bdc..00818c8 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -6113,7 +6113,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ status = decode_open(xdr, res);
+ if (status)
+ goto out;
+- if (decode_getfh(xdr, &res->fh) != 0)
++ status = decode_getfh(xdr, &res->fh);
++ if (status)
+ goto out;
+ if (decode_getfattr(xdr, res->f_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 6e85ec6..e42d6f6 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1820,6 +1820,7 @@ static int nfs_validate_mount_data(void *options,
+
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ args->nfs_server.addrlen = sizeof(data->addr);
++ args->nfs_server.port = ntohs(data->addr.sin_port);
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+@@ -2538,6 +2539,7 @@ static int nfs4_validate_mount_data(void *options,
+ return -EFAULT;
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
++ args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+
+ if (data->auth_flavourlen) {
+ if (data->auth_flavourlen > 1)
+diff --git a/fs/stat.c b/fs/stat.c
+index 8806b89..7b21801 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -57,12 +57,13 @@ EXPORT_SYMBOL(vfs_getattr);
+
+ int vfs_fstat(unsigned int fd, struct kstat *stat)
+ {
+- struct file *f = fget(fd);
++ int fput_needed;
++ struct file *f = fget_raw_light(fd, &fput_needed);
+ int error = -EBADF;
+
+ if (f) {
+ error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
+- fput(f);
++ fput_light(f, fput_needed);
+ }
+ return error;
+ }
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index d567b84..874c9e3 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -39,20 +39,24 @@
+ #include "udf_i.h"
+ #include "udf_sb.h"
+
+-static int udf_adinicb_readpage(struct file *file, struct page *page)
++static void __udf_adinicb_readpage(struct page *page)
+ {
+ struct inode *inode = page->mapping->host;
+ char *kaddr;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+- BUG_ON(!PageLocked(page));
+-
+ kaddr = kmap(page);
+- memset(kaddr, 0, PAGE_CACHE_SIZE);
+ memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
++ memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ kunmap(page);
++}
++
++static int udf_adinicb_readpage(struct file *file, struct page *page)
++{
++ BUG_ON(!PageLocked(page));
++ __udf_adinicb_readpage(page);
+ unlock_page(page);
+
+ return 0;
+@@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page,
+ return 0;
+ }
+
++static int udf_adinicb_write_begin(struct file *file,
++ struct address_space *mapping, loff_t pos,
++ unsigned len, unsigned flags, struct page **pagep,
++ void **fsdata)
++{
++ struct page *page;
++
++ if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
++ return -EIO;
++ page = grab_cache_page_write_begin(mapping, 0, flags);
++ if (!page)
++ return -ENOMEM;
++ *pagep = page;
++
++ if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
++ __udf_adinicb_readpage(page);
++ return 0;
++}
++
+ static int udf_adinicb_write_end(struct file *file,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+@@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file,
+ const struct address_space_operations udf_adinicb_aops = {
+ .readpage = udf_adinicb_readpage,
+ .writepage = udf_adinicb_writepage,
+- .write_begin = simple_write_begin,
+- .write_end = udf_adinicb_write_end,
++ .write_begin = udf_adinicb_write_begin,
++ .write_end = udf_adinicb_write_end,
+ };
+
+ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index ddd46db..7639f18 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -277,8 +277,9 @@ struct drm_mode_mode_cmd {
+ struct drm_mode_modeinfo mode;
+ };
+
+-#define DRM_MODE_CURSOR_BO (1<<0)
+-#define DRM_MODE_CURSOR_MOVE (1<<1)
++#define DRM_MODE_CURSOR_BO 0x01
++#define DRM_MODE_CURSOR_MOVE 0x02
++#define DRM_MODE_CURSOR_FLAGS 0x03
+
+ /*
+ * depending on the value in flags different members are used.
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index ad81e1c..445f978 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -226,7 +226,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
+
+ static inline __printf(2, 3)
+ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+-{ return 0; }
++{ return -ENOMEM; }
+
+ static inline int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
+diff --git a/include/linux/ktime.h b/include/linux/ktime.h
+index 603bec2..06177ba10 100644
+--- a/include/linux/ktime.h
++++ b/include/linux/ktime.h
+@@ -58,13 +58,6 @@ union ktime {
+
+ typedef union ktime ktime_t; /* Kill this */
+
+-#define KTIME_MAX ((s64)~((u64)1 << 63))
+-#if (BITS_PER_LONG == 64)
+-# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+-#else
+-# define KTIME_SEC_MAX LONG_MAX
+-#endif
+-
+ /*
+ * ktime_t definitions when using the 64-bit scalar representation:
+ */
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index c8ef9bc..87967ee 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -219,6 +219,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
+ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
+ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
++#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
+ /* byte mode */
+ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
+ #define MMC_NO_POWER_NOTIFICATION 0
+diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
+index 30b0c4e..43e038a 100644
+--- a/include/linux/mv643xx_eth.h
++++ b/include/linux/mv643xx_eth.h
+@@ -15,6 +15,8 @@
+ #define MV643XX_ETH_SIZE_REG_4 0x2224
+ #define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
+
++#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0
++
+ struct mv643xx_eth_shared_platform_data {
+ struct mbus_dram_target_info *dram;
+ struct platform_device *shared_smi;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index cb52340..00ca32b 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1299,6 +1299,8 @@ struct net_device {
+ /* for setting kernel sock attribute on TCP connection setup */
+ #define GSO_MAX_SIZE 65536
+ unsigned int gso_max_size;
++#define GSO_MAX_SEGS 65535
++ u16 gso_max_segs;
+
+ #ifdef CONFIG_DCB
+ /* Data Center Bridging netlink ops */
+@@ -1511,6 +1513,8 @@ struct packet_type {
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb);
++ bool (*id_match)(struct packet_type *ptype,
++ struct sock *sk);
+ void *af_packet_priv;
+ struct list_head list;
+ };
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 92ecf55..33c52a2 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -261,11 +261,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
+ return NFS_SERVER(inode)->nfs_client->rpc_ops;
+ }
+
+-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
+-{
+- return NFS_I(inode)->cookieverf;
+-}
+-
+ static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
+ {
+ struct nfs_server *nfss = NFS_SERVER(inode);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 2aaee0c..67cc215 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2124,7 +2124,7 @@
+ #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
+ #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
+ #define PCI_DEVICE_ID_NX2_5706S 0x16aa
+-#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab
++#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
+ #define PCI_DEVICE_ID_NX2_5708S 0x16ac
+ #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
+ #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index b1f8912..b669be6 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -794,7 +794,7 @@ struct perf_event {
+ struct hw_perf_event hw;
+
+ struct perf_event_context *ctx;
+- struct file *filp;
++ atomic_long_t refcount;
+
+ /*
+ * These accumulate total time (in nanoseconds) that children
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index 15518a1..0a4cd10 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
+ void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
+ int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
++ void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*rpcbind)(struct rpc_task *task);
+ void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
+ void (*connect)(struct rpc_task *task);
+@@ -274,6 +275,8 @@ void xprt_connect(struct rpc_task *task);
+ void xprt_reserve(struct rpc_task *task);
+ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
++void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
++void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+ int xprt_prepare_transmit(struct rpc_task *task);
+ void xprt_transmit(struct rpc_task *task);
+ void xprt_end_transmit(struct rpc_task *task);
+diff --git a/include/linux/time.h b/include/linux/time.h
+index b306178..8c0216e 100644
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
+ return ts_delta;
+ }
+
++#define KTIME_MAX ((s64)~((u64)1 << 63))
++#if (BITS_PER_LONG == 64)
++# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
++#else
++# define KTIME_SEC_MAX LONG_MAX
++#endif
++
+ /*
+ * Returns true if the timespec is norm, false if denorm:
+ */
+-#define timespec_valid(ts) \
+- (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
++static inline bool timespec_valid(const struct timespec *ts)
++{
++ /* Dates before 1970 are bogus */
++ if (ts->tv_sec < 0)
++ return false;
++ /* Can't have more nanoseconds then a second */
++ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
++ return false;
++ return true;
++}
++
++static inline bool timespec_valid_strict(const struct timespec *ts)
++{
++ if (!timespec_valid(ts))
++ return false;
++ /* Disallow values that could overflow ktime_t */
++ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
++ return false;
++ return true;
++}
+
+ extern void read_persistent_clock(struct timespec *ts);
+ extern void read_boot_clock(struct timespec *ts);
+diff --git a/include/net/scm.h b/include/net/scm.h
+index d456f4c..0c0017c 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
+ }
+
+ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
+- struct scm_cookie *scm)
++ struct scm_cookie *scm, bool forcecreds)
+ {
+ memset(scm, 0, sizeof(*scm));
++ if (forcecreds)
++ scm_set_cred(scm, task_tgid(current), current_cred());
+ unix_get_peersec_dgram(sock, scm);
+ if (msg->msg_controllen <= 0)
+ return 0;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 32e3937..ddf523c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -194,6 +194,7 @@ struct sock_common {
+ * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
+ * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
+ * @sk_gso_max_size: Maximum GSO segment size to build
++ * @sk_gso_max_segs: Maximum number of GSO segments
+ * @sk_lingertime: %SO_LINGER l_linger setting
+ * @sk_backlog: always used with the per-socket spinlock held
+ * @sk_callback_lock: used with the callbacks in the end of this struct
+@@ -310,6 +311,7 @@ struct sock {
+ int sk_route_nocaps;
+ int sk_gso_type;
+ unsigned int sk_gso_max_size;
++ u16 sk_gso_max_segs;
+ int sk_rcvlowat;
+ unsigned long sk_lingertime;
+ struct sk_buff_head sk_error_queue;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 58690af..7d1f05e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3011,12 +3011,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+ /*
+ * Called when the last reference to the file is gone.
+ */
+-static int perf_release(struct inode *inode, struct file *file)
++static void put_event(struct perf_event *event)
+ {
+- struct perf_event *event = file->private_data;
+ struct task_struct *owner;
+
+- file->private_data = NULL;
++ if (!atomic_long_dec_and_test(&event->refcount))
++ return;
+
+ rcu_read_lock();
+ owner = ACCESS_ONCE(event->owner);
+@@ -3051,7 +3051,13 @@ static int perf_release(struct inode *inode, struct file *file)
+ put_task_struct(owner);
+ }
+
+- return perf_event_release_kernel(event);
++ perf_event_release_kernel(event);
++}
++
++static int perf_release(struct inode *inode, struct file *file)
++{
++ put_event(file->private_data);
++ return 0;
+ }
+
+ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3304,7 +3310,7 @@ unlock:
+
+ static const struct file_operations perf_fops;
+
+-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
++static struct file *perf_fget_light(int fd, int *fput_needed)
+ {
+ struct file *file;
+
+@@ -3318,7 +3324,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+ return ERR_PTR(-EBADF);
+ }
+
+- return file->private_data;
++ return file;
+ }
+
+ static int perf_event_set_output(struct perf_event *event,
+@@ -3350,19 +3356,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ case PERF_EVENT_IOC_SET_OUTPUT:
+ {
++ struct file *output_file = NULL;
+ struct perf_event *output_event = NULL;
+ int fput_needed = 0;
+ int ret;
+
+ if (arg != -1) {
+- output_event = perf_fget_light(arg, &fput_needed);
+- if (IS_ERR(output_event))
+- return PTR_ERR(output_event);
++ output_file = perf_fget_light(arg, &fput_needed);
++ if (IS_ERR(output_file))
++ return PTR_ERR(output_file);
++ output_event = output_file->private_data;
+ }
+
+ ret = perf_event_set_output(event, output_event);
+ if (output_event)
+- fput_light(output_event->filp, fput_needed);
++ fput_light(output_file, fput_needed);
+
+ return ret;
+ }
+@@ -5912,6 +5920,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+
+ mutex_init(&event->mmap_mutex);
+
++ atomic_long_set(&event->refcount, 1);
+ event->cpu = cpu;
+ event->attr = *attr;
+ event->group_leader = group_leader;
+@@ -6182,12 +6191,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ return event_fd;
+
+ if (group_fd != -1) {
+- group_leader = perf_fget_light(group_fd, &fput_needed);
+- if (IS_ERR(group_leader)) {
+- err = PTR_ERR(group_leader);
++ group_file = perf_fget_light(group_fd, &fput_needed);
++ if (IS_ERR(group_file)) {
++ err = PTR_ERR(group_file);
+ goto err_fd;
+ }
+- group_file = group_leader->filp;
++ group_leader = group_file->private_data;
+ if (flags & PERF_FLAG_FD_OUTPUT)
+ output_event = group_leader;
+ if (flags & PERF_FLAG_FD_NO_GROUP)
+@@ -6322,7 +6331,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ put_ctx(gctx);
+ }
+
+- event->filp = event_file;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+
+@@ -6412,7 +6420,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_free;
+ }
+
+- event->filp = NULL;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ perf_install_in_context(ctx, event, cpu);
+@@ -6461,7 +6468,7 @@ static void sync_child_event(struct perf_event *child_event,
+ * Release the parent event, if this was the last
+ * reference to it.
+ */
+- fput(parent_event->filp);
++ put_event(parent_event);
+ }
+
+ static void
+@@ -6537,9 +6544,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ *
+ * __perf_event_exit_task()
+ * sync_child_event()
+- * fput(parent_event->filp)
+- * perf_release()
+- * mutex_lock(&ctx->mutex)
++ * put_event()
++ * mutex_lock(&ctx->mutex)
+ *
+ * But since its the parent context it won't be the same instance.
+ */
+@@ -6607,7 +6613,7 @@ static void perf_free_event(struct perf_event *event,
+ list_del_init(&event->child_list);
+ mutex_unlock(&parent->child_mutex);
+
+- fput(parent->filp);
++ put_event(parent);
+
+ perf_group_detach(event);
+ list_del_event(event, ctx);
+@@ -6687,6 +6693,12 @@ inherit_event(struct perf_event *parent_event,
+ NULL, NULL);
+ if (IS_ERR(child_event))
+ return child_event;
++
++ if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
++ free_event(child_event);
++ return NULL;
++ }
++
+ get_ctx(child_ctx);
+
+ /*
+@@ -6728,14 +6740,6 @@ inherit_event(struct perf_event *parent_event,
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+ /*
+- * Get a reference to the parent filp - we will fput it
+- * when the child event exits. This is safe to do because
+- * we are in the parent and we know that the filp still
+- * exists and has a nonzero count:
+- */
+- atomic_long_inc(&parent_event->filp->f_count);
+-
+- /*
+ * Link this into the parent event's child list
+ */
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 03e67d4..5ee1ac0 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -382,7 +382,7 @@ int do_settimeofday(const struct timespec *tv)
+ struct timespec ts_delta;
+ unsigned long flags;
+
+- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ if (!timespec_valid_strict(tv))
+ return -EINVAL;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+@@ -417,6 +417,8 @@ EXPORT_SYMBOL(do_settimeofday);
+ int timekeeping_inject_offset(struct timespec *ts)
+ {
+ unsigned long flags;
++ struct timespec tmp;
++ int ret = 0;
+
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+@@ -425,9 +427,16 @@ int timekeeping_inject_offset(struct timespec *ts)
+
+ timekeeping_forward_now();
+
++ tmp = timespec_add(xtime, *ts);
++ if (!timespec_valid_strict(&tmp)) {
++ ret = -EINVAL;
++ goto error;
++ }
++
+ xtime = timespec_add(xtime, *ts);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
++error: /* even if we error out, we forwarded the time, so call update */
+ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+@@ -435,7 +444,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+ /* signal hrtimers about time change */
+ clock_was_set();
+
+- return 0;
++ return ret;
+ }
+ EXPORT_SYMBOL(timekeeping_inject_offset);
+
+@@ -582,7 +591,20 @@ void __init timekeeping_init(void)
+ struct timespec now, boot;
+
+ read_persistent_clock(&now);
++ if (!timespec_valid_strict(&now)) {
++ pr_warn("WARNING: Persistent clock returned invalid value!\n"
++ " Check your CMOS/BIOS settings.\n");
++ now.tv_sec = 0;
++ now.tv_nsec = 0;
++ }
++
+ read_boot_clock(&boot);
++ if (!timespec_valid_strict(&boot)) {
++ pr_warn("WARNING: Boot clock returned invalid value!\n"
++ " Check your CMOS/BIOS settings.\n");
++ boot.tv_sec = 0;
++ boot.tv_nsec = 0;
++ }
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+@@ -627,7 +649,7 @@ static void update_sleep_time(struct timespec t)
+ */
+ static void __timekeeping_inject_sleeptime(struct timespec *delta)
+ {
+- if (!timespec_valid(delta)) {
++ if (!timespec_valid_strict(delta)) {
+ printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
+ "sleep delta value!\n");
+ return;
+@@ -1011,6 +1033,10 @@ static void update_wall_time(void)
+ #else
+ offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
+ #endif
++ /* Check if there's really nothing to do */
++ if (offset < timekeeper.cycle_interval)
++ return;
++
+ timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
+
+ /*
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index a650bee..979d4de 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3437,14 +3437,17 @@ static int __cpuinit trustee_thread(void *__gcwq)
+
+ for_each_busy_worker(worker, i, pos, gcwq) {
+ struct work_struct *rebind_work = &worker->rebind_work;
++ unsigned long worker_flags = worker->flags;
+
+ /*
+ * Rebind_work may race with future cpu hotplug
+ * operations. Use a separate flag to mark that
+- * rebinding is scheduled.
++ * rebinding is scheduled. The morphing should
++ * be atomic.
+ */
+- worker->flags |= WORKER_REBIND;
+- worker->flags &= ~WORKER_ROGUE;
++ worker_flags |= WORKER_REBIND;
++ worker_flags &= ~WORKER_ROGUE;
++ ACCESS_ONCE(worker->flags) = worker_flags;
+
+ /* queue rebind_work, wq doesn't matter, use the default one */
+ if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index c0007f9..11b8d47 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2533,7 +2533,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ break;
+
+ default:
+- BUG();
++ return -EINVAL;
+ }
+
+ l = strlen(policy_modes[mode]);
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 14ff9fe..0ca06e8 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -784,6 +784,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
+
+ if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
+ return -ENOTCONN;
++ memset(&pvc, 0, sizeof(pvc));
+ pvc.sap_family = AF_ATMPVC;
+ pvc.sap_addr.itf = vcc->dev->number;
+ pvc.sap_addr.vpi = vcc->vpi;
+diff --git a/net/atm/pvc.c b/net/atm/pvc.c
+index 3a73491..ae03240 100644
+--- a/net/atm/pvc.c
++++ b/net/atm/pvc.c
+@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
+ return -ENOTCONN;
+ *sockaddr_len = sizeof(struct sockaddr_atmpvc);
+ addr = (struct sockaddr_atmpvc *)sockaddr;
++ memset(addr, 0, sizeof(*addr));
+ addr->sap_family = AF_ATMPVC;
+ addr->sap_addr.itf = vcc->dev->number;
+ addr->sap_addr.vpi = vcc->vpi;
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index f6afe3d..8361ee4 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -388,6 +388,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
+ *addr_len = sizeof(*haddr);
+ haddr->hci_family = AF_BLUETOOTH;
+ haddr->hci_dev = hdev->id;
++ haddr->hci_channel= 0;
+
+ release_sock(sk);
+ return 0;
+@@ -671,6 +672,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
+ {
+ struct hci_filter *f = &hci_pi(sk)->filter;
+
++ memset(&uf, 0, sizeof(uf));
+ uf.type_mask = f->type_mask;
+ uf.opcode = f->opcode;
+ uf.event_mask[0] = *((u32 *) f->event_mask + 0);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 5c406d3..6dedd6f 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -293,6 +293,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
++ memset(la, 0, sizeof(struct sockaddr_l2));
+ addr->sa_family = AF_BLUETOOTH;
+ *len = sizeof(struct sockaddr_l2);
+
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 5417f61..7ee4ead 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -547,6 +547,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
++ memset(sa, 0, sizeof(*sa));
+ sa->rc_family = AF_BLUETOOTH;
+ sa->rc_channel = rfcomm_pi(sk)->channel;
+ if (peer)
+@@ -835,6 +836,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
+ }
+
+ sec.level = rfcomm_pi(sk)->sec_level;
++ sec.key_size = 0;
+
+ len = min_t(unsigned int, len, sizeof(sec));
+ if (copy_to_user(optval, (char *) &sec, len))
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index c258796..bc1eb56 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -471,7 +471,7 @@ static int rfcomm_get_dev_list(void __user *arg)
+
+ size = sizeof(*dl) + dev_num * sizeof(*di);
+
+- dl = kmalloc(size, GFP_KERNEL);
++ dl = kzalloc(size, GFP_KERNEL);
+ if (!dl)
+ return -ENOMEM;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4b18703..832ba6d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1059,6 +1059,8 @@ rollback:
+ */
+ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+ {
++ char *new_ifalias;
++
+ ASSERT_RTNL();
+
+ if (len >= IFALIASZ)
+@@ -1072,9 +1074,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+ return 0;
+ }
+
+- dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+- if (!dev->ifalias)
++ new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
++ if (!new_ifalias)
+ return -ENOMEM;
++ dev->ifalias = new_ifalias;
+
+ strlcpy(dev->ifalias, alias, len+1);
+ return len;
+@@ -1628,6 +1631,19 @@ static inline int deliver_skb(struct sk_buff *skb,
+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ }
+
++static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
++{
++ if (ptype->af_packet_priv == NULL)
++ return false;
++
++ if (ptype->id_match)
++ return ptype->id_match(ptype, skb->sk);
++ else if ((struct sock *)ptype->af_packet_priv == skb->sk)
++ return true;
++
++ return false;
++}
++
+ /*
+ * Support routine. Sends outgoing frames to any network
+ * taps currently in use.
+@@ -1645,8 +1661,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+- (ptype->af_packet_priv == NULL ||
+- (struct sock *)ptype->af_packet_priv != skb->sk)) {
++ (!skb_loop_sk(ptype, skb))) {
+ if (pt_prev) {
+ deliver_skb(skb2, pt_prev, skb->dev);
+ pt_prev = ptype;
+@@ -2108,6 +2123,9 @@ u32 netif_skb_features(struct sk_buff *skb)
+ __be16 protocol = skb->protocol;
+ u32 features = skb->dev->features;
+
++ if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
++ features &= ~NETIF_F_GSO_MASK;
++
+ if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+@@ -5990,6 +6008,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ dev_net_set(dev, &init_net);
+
+ dev->gso_max_size = GSO_MAX_SIZE;
++ dev->gso_max_segs = GSO_MAX_SEGS;
+
+ INIT_LIST_HEAD(&dev->napi_list);
+ INIT_LIST_HEAD(&dev->unreg_list);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 8d095b9..018fd41 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1308,6 +1308,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ } else {
+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ sk->sk_gso_max_size = dst->dev->gso_max_size;
++ sk->sk_gso_max_segs = dst->dev->gso_max_segs;
+ }
+ }
+ }
+diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
+index 3d604e1..4caf63f 100644
+--- a/net/dccp/ccids/ccid3.c
++++ b/net/dccp/ccids/ccid3.c
+@@ -532,6 +532,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
+ case DCCP_SOCKOPT_CCID_TX_INFO:
+ if (len < sizeof(tfrc))
+ return -EINVAL;
++ memset(&tfrc, 0, sizeof(tfrc));
+ tfrc.tfrctx_x = hc->tx_x;
+ tfrc.tfrctx_x_recv = hc->tx_x_recv;
+ tfrc.tfrctx_x_calc = hc->tx_x_calc;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index d2aae27..0064394 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -125,6 +125,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
+ static struct kmem_cache *mrt_cachep __read_mostly;
+
+ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
++static void ipmr_free_table(struct mr_table *mrt);
++
+ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
+@@ -132,6 +134,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
+ struct sk_buff *pkt, vifi_t vifi, int assert);
+ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm);
++static void mroute_clean_tables(struct mr_table *mrt);
+ static void ipmr_expire_process(unsigned long arg);
+
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+@@ -272,7 +275,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
+
+ list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
+ list_del(&mrt->list);
+- kfree(mrt);
++ ipmr_free_table(mrt);
+ }
+ fib_rules_unregister(net->ipv4.mr_rules_ops);
+ }
+@@ -300,7 +303,7 @@ static int __net_init ipmr_rules_init(struct net *net)
+
+ static void __net_exit ipmr_rules_exit(struct net *net)
+ {
+- kfree(net->ipv4.mrt);
++ ipmr_free_table(net->ipv4.mrt);
+ }
+ #endif
+
+@@ -337,6 +340,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ return mrt;
+ }
+
++static void ipmr_free_table(struct mr_table *mrt)
++{
++ del_timer_sync(&mrt->ipmr_expire_timer);
++ mroute_clean_tables(mrt);
++ kfree(mrt);
++}
++
+ /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
+
+ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index ad466a7..043d49b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -740,7 +740,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
+ old_size_goal + mss_now > xmit_size_goal)) {
+ xmit_size_goal = old_size_goal;
+ } else {
+- tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
++ tp->xmit_size_goal_segs =
++ min_t(u16, xmit_size_goal / mss_now,
++ sk->sk_gso_max_segs);
+ xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
+ }
+ }
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 850c737..6cebfd2 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -290,7 +290,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
+ left = tp->snd_cwnd - in_flight;
+ if (sk_can_gso(sk) &&
+ left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
+- left * tp->mss_cache < sk->sk_gso_max_size)
++ left * tp->mss_cache < sk->sk_gso_max_size &&
++ left < sk->sk_gso_max_segs)
+ return 1;
+ return left <= tcp_max_burst(tp);
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index c51dd5b..921cbac 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1318,21 +1318,21 @@ static void tcp_cwnd_validate(struct sock *sk)
+ * when we would be allowed to send the split-due-to-Nagle skb fully.
+ */
+ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
+- unsigned int mss_now, unsigned int cwnd)
++ unsigned int mss_now, unsigned int max_segs)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+- u32 needed, window, cwnd_len;
++ u32 needed, window, max_len;
+
+ window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
+- cwnd_len = mss_now * cwnd;
++ max_len = mss_now * max_segs;
+
+- if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
+- return cwnd_len;
++ if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
++ return max_len;
+
+ needed = min(skb->len, window);
+
+- if (cwnd_len <= needed)
+- return cwnd_len;
++ if (max_len <= needed)
++ return max_len;
+
+ return needed - needed % mss_now;
+ }
+@@ -1560,7 +1560,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ limit = min(send_win, cong_win);
+
+ /* If a full-sized TSO skb can be sent, do it. */
+- if (limit >= sk->sk_gso_max_size)
++ if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
++ sk->sk_gso_max_segs * tp->mss_cache))
+ goto send_now;
+
+ /* Middle in queue won't get any more data, full sendable already? */
+@@ -1786,7 +1787,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ limit = mss_now;
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
+ limit = tcp_mss_split_point(sk, skb, mss_now,
+- cwnd_quota);
++ min_t(unsigned int,
++ cwnd_quota,
++ sk->sk_gso_max_segs));
+
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index a5521c5..aef80d7 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -493,8 +493,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+- rcu_read_lock();
+- for_each_netdev_rcu(net, dev) {
++ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ int changed = (!idev->cnf.forwarding) ^ (!newf);
+@@ -503,7 +502,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
+ dev_forward_change(idev);
+ }
+ }
+- rcu_read_unlock();
+ }
+
+ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 89ff8c6..7501b22 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1253,11 +1253,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+ /* Remove from tunnel list */
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_del_rcu(&tunnel->list);
++ kfree_rcu(tunnel, rcu);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+- synchronize_rcu();
+
+ atomic_dec(&l2tp_tunnel_count);
+- kfree(tunnel);
+ }
+
+ /* Create a socket for the tunnel, if one isn't set up by
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index a16a48e..4393794 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -157,6 +157,7 @@ struct l2tp_tunnel_cfg {
+
+ struct l2tp_tunnel {
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
++ struct rcu_head rcu;
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[L2TP_HASH_SIZE];
+ /* hashed list of sessions,
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index a18e6c3..99a60d5 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -966,14 +966,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct sockaddr_llc sllc;
+ struct sock *sk = sock->sk;
+ struct llc_sock *llc = llc_sk(sk);
+- int rc = 0;
++ int rc = -EBADF;
+
+ memset(&sllc, 0, sizeof(sllc));
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+ *uaddrlen = sizeof(sllc);
+- memset(uaddr, 0, *uaddrlen);
+ if (peer) {
+ rc = -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED)
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index e1a66cf..72f4253 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2713,6 +2713,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+ {
+ struct ip_vs_timeout_user t;
+
++ memset(&t, 0, sizeof(t));
+ __ip_vs_get_timeouts(net, &t);
+ if (copy_to_user(user, &t, sizeof(t)) != 0)
+ ret = -EFAULT;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a99fb41..38b78b9 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1333,7 +1333,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (NULL == siocb->scm)
+ siocb->scm = &scm;
+
+- err = scm_send(sock, msg, siocb->scm);
++ err = scm_send(sock, msg, siocb->scm, true);
+ if (err < 0)
+ return err;
+
+@@ -1344,7 +1344,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ dst_pid = addr->nl_pid;
+ dst_group = ffs(addr->nl_groups);
+ err = -EPERM;
+- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
++ if ((dst_group || dst_pid) &&
++ !netlink_capable(sock, NL_NONROOT_SEND))
+ goto out;
+ } else {
+ dst_pid = nlk->dst_pid;
+@@ -2103,6 +2104,7 @@ static void __init netlink_add_usersock_entry(void)
+ rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
+ nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+ nl_table[NETLINK_USERSOCK].registered = 1;
++ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
+
+ netlink_table_ungrab();
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index d9d4970..85afc13 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1281,6 +1281,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
+ spin_unlock(&f->lock);
+ }
+
++bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
++{
++ if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
++ return true;
++
++ return false;
++}
++
+ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ {
+ struct packet_sock *po = pkt_sk(sk);
+@@ -1333,6 +1341,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ match->prot_hook.dev = po->prot_hook.dev;
+ match->prot_hook.func = packet_rcv_fanout;
+ match->prot_hook.af_packet_priv = match;
++ match->prot_hook.id_match = match_fanout_group;
+ dev_add_pack(&match->prot_hook);
+ list_add(&match->list, &fanout_list);
+ }
+@@ -1931,7 +1940,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+
+ if (likely(po->tx_ring.pg_vec)) {
+ ph = skb_shinfo(skb)->destructor_arg;
+- BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
+ BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
+ atomic_dec(&po->tx_ring.pending);
+ __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
+diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
+index b77f5a0..bdacd8d 100644
+--- a/net/sched/act_gact.c
++++ b/net/sched/act_gact.c
+@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
+ struct tcf_common *pc;
+ int ret = 0;
+ int err;
++#ifdef CONFIG_GACT_PROB
++ struct tc_gact_p *p_parm = NULL;
++#endif
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
+ #ifndef CONFIG_GACT_PROB
+ if (tb[TCA_GACT_PROB] != NULL)
+ return -EOPNOTSUPP;
++#else
++ if (tb[TCA_GACT_PROB]) {
++ p_parm = nla_data(tb[TCA_GACT_PROB]);
++ if (p_parm->ptype >= MAX_RAND)
++ return -EINVAL;
++ }
+ #endif
+
+ pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
+@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
+ spin_lock_bh(&gact->tcf_lock);
+ gact->tcf_action = parm->action;
+ #ifdef CONFIG_GACT_PROB
+- if (tb[TCA_GACT_PROB] != NULL) {
+- struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
++ if (p_parm) {
+ gact->tcfg_paction = p_parm->paction;
+ gact->tcfg_pval = p_parm->pval;
+ gact->tcfg_ptype = p_parm->ptype;
+@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
+
+ spin_lock(&gact->tcf_lock);
+ #ifdef CONFIG_GACT_PROB
+- if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
++ if (gact->tcfg_ptype)
+ action = gact_rand[gact->tcfg_ptype](gact);
+ else
+ action = gact->tcf_action;
+diff --git a/net/socket.c b/net/socket.c
+index 273cbce..68879db 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2645,6 +2645,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
+ if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
+ return -EFAULT;
+
++ memset(&ifc, 0, sizeof(ifc));
+ if (ifc32.ifcbuf == 0) {
+ ifc32.ifc_len = 0;
+ ifc.ifc_len = 0;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 3ac9789..ffba207 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -962,11 +962,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ return false;
+ }
+
+-static void xprt_alloc_slot(struct rpc_task *task)
++void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+ {
+- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_rqst *req;
+
++ spin_lock(&xprt->reserve_lock);
+ if (!list_empty(&xprt->free)) {
+ req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
+ list_del(&req->rq_list);
+@@ -987,12 +987,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
+ default:
+ task->tk_status = -EAGAIN;
+ }
++ spin_unlock(&xprt->reserve_lock);
+ return;
+ out_init_req:
+ task->tk_status = 0;
+ task->tk_rqstp = req;
+ xprt_request_init(task, xprt);
++ spin_unlock(&xprt->reserve_lock);
++}
++EXPORT_SYMBOL_GPL(xprt_alloc_slot);
++
++void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
++{
++ /* Note: grabbing the xprt_lock_write() ensures that we throttle
++ * new slot allocation if the transport is congested (i.e. when
++ * reconnecting a stream transport or when out of socket write
++ * buffer space).
++ */
++ if (xprt_lock_write(xprt, task)) {
++ xprt_alloc_slot(xprt, task);
++ xprt_release_write(xprt, task);
++ }
+ }
++EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
+
+ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ {
+@@ -1076,20 +1093,9 @@ void xprt_reserve(struct rpc_task *task)
+ if (task->tk_rqstp != NULL)
+ return;
+
+- /* Note: grabbing the xprt_lock_write() here is not strictly needed,
+- * but ensures that we throttle new slot allocation if the transport
+- * is congested (e.g. if reconnecting or if we're out of socket
+- * write buffer space).
+- */
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+- if (!xprt_lock_write(xprt, task))
+- return;
+-
+- spin_lock(&xprt->reserve_lock);
+- xprt_alloc_slot(task);
+- spin_unlock(&xprt->reserve_lock);
+- xprt_release_write(xprt, task);
++ xprt->ops->alloc_slot(xprt, task);
+ }
+
+ static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 06cdbff..5d9202d 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
+ static struct rpc_xprt_ops xprt_rdma_procs = {
+ .reserve_xprt = xprt_rdma_reserve_xprt,
+ .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
++ .alloc_slot = xprt_alloc_slot,
+ .release_request = xprt_release_rqst_cong, /* ditto */
+ .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
+ .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1a6edc7..c5391af 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2422,6 +2422,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
+ static struct rpc_xprt_ops xs_local_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xs_tcp_release_xprt,
++ .alloc_slot = xprt_alloc_slot,
+ .rpcbind = xs_local_rpcbind,
+ .set_port = xs_local_set_port,
+ .connect = xs_connect,
+@@ -2438,6 +2439,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
+ .set_buffer_size = xs_udp_set_buffer_size,
+ .reserve_xprt = xprt_reserve_xprt_cong,
+ .release_xprt = xprt_release_xprt_cong,
++ .alloc_slot = xprt_alloc_slot,
+ .rpcbind = rpcb_getport_async,
+ .set_port = xs_set_port,
+ .connect = xs_connect,
+@@ -2455,6 +2457,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
+ static struct rpc_xprt_ops xs_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xs_tcp_release_xprt,
++ .alloc_slot = xprt_lock_and_alloc_slot,
+ .rpcbind = rpcb_getport_async,
+ .set_port = xs_set_port,
+ .connect = xs_connect,
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d99678a..317bfe3 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1435,7 +1435,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (NULL == siocb->scm)
+ siocb->scm = &tmp_scm;
+ wait_for_unix_gc();
+- err = scm_send(sock, msg, siocb->scm);
++ err = scm_send(sock, msg, siocb->scm, false);
+ if (err < 0)
+ return err;
+
+@@ -1596,7 +1596,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (NULL == siocb->scm)
+ siocb->scm = &tmp_scm;
+ wait_for_unix_gc();
+- err = scm_send(sock, msg, siocb->scm);
++ err = scm_send(sock, msg, siocb->scm, false);
+ if (err < 0)
+ return err;
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index f3be54e..b0187e7 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2312,6 +2312,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
+ }
+ if (codec->patch_ops.free)
+ codec->patch_ops.free(codec);
++ memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
+ codec->proc_widget_hook = NULL;
+ codec->spec = NULL;
+ free_hda_cache(&codec->amp_cache);
+@@ -2324,7 +2325,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
+ codec->num_pcms = 0;
+ codec->pcm_info = NULL;
+ codec->preset = NULL;
+- memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
+ codec->slave_dig_outs = NULL;
+ codec->spdif_status_reset = 0;
+ module_put(codec->owner);
+diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
+index 764cc93..075d5aa 100644
+--- a/sound/pci/ice1712/prodigy_hifi.c
++++ b/sound/pci/ice1712/prodigy_hifi.c
+@@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
+ }
+
+ static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
++static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
+
+ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
+ {
+@@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
+ .info = ak4396_dac_vol_info,
+ .get = ak4396_dac_vol_get,
+ .put = ak4396_dac_vol_put,
+- .tlv = { .p = db_scale_wm_dac },
++ .tlv = { .p = ak4396_db_scale },
+ },
+ };
+
diff --git a/3.2.54/1030_linux-3.2.31.patch b/3.2.54/1030_linux-3.2.31.patch
new file mode 100644
index 0000000..c6accf5
--- /dev/null
+++ b/3.2.54/1030_linux-3.2.31.patch
@@ -0,0 +1,3327 @@
+diff --git a/Makefile b/Makefile
+index 9fd7e60..fd9c414 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 30
++SUBLEVEL = 31
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 9c18ebd..d63632f 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -648,6 +648,7 @@ __armv7_mmu_cache_on:
+ mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+ #endif
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
++ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
+ orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
+ orr r0, r0, #0x003c @ write buffer
+ #ifdef CONFIG_MMU
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 73ef56c..bda833c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -160,7 +160,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
+ #endif
+
+ #ifdef P6_NOP1
+-static const unsigned char __initconst_or_module p6nops[] =
++static const unsigned char p6nops[] =
+ {
+ P6_NOP1,
+ P6_NOP2,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 44d4393..a1e21ae 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1289,6 +1289,10 @@ asmlinkage void __init xen_start_kernel(void)
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
++
++ /* Avoid searching for BIOS MP tables */
++ x86_init.mpparse.find_smp_config = x86_init_noop;
++ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ }
+ #ifdef CONFIG_PCI
+ /* PCI BIOS service won't work from a PV guest. */
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index bb104b4..6e5a7f1 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -16,6 +16,7 @@
+ #include <asm/e820.h>
+ #include <asm/setup.h>
+ #include <asm/acpi.h>
++#include <asm/numa.h>
+ #include <asm/xen/hypervisor.h>
+ #include <asm/xen/hypercall.h>
+
+@@ -431,4 +432,7 @@ void __init xen_arch_setup(void)
+ boot_option_idle_override = IDLE_HALT;
+ WARN_ON(set_pm_idle_to_default());
+ fiddle_vdso();
++#ifdef CONFIG_NUMA
++ numa_off = 1;
++#endif
+ }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c04ad68..321e23e 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4118,6 +4118,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+
+ /* Devices which aren't very happy with higher link speeds */
+ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
++ { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
+
+ /*
+ * Devices which choke on SETXFER. Applies only if both the
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index de0435e..887f68f 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -35,6 +35,7 @@ new_skb(ulong len)
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->protocol = __constant_htons(ETH_P_AOE);
++ skb_checksum_none_assert(skb);
+ }
+ return skb;
+ }
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index 38aa6dd..da33111 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
++ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev,
+ "%p has protocol error\n", c);
+ break;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index c3f0ee1..86848c6 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -445,6 +445,14 @@ static void nbd_clear_que(struct nbd_device *lo)
+ req->errors++;
+ nbd_end_request(req);
+ }
++
++ while (!list_empty(&lo->waiting_queue)) {
++ req = list_entry(lo->waiting_queue.next, struct request,
++ queuelist);
++ list_del_init(&req->queuelist);
++ req->errors++;
++ nbd_end_request(req);
++ }
+ }
+
+
+@@ -594,6 +602,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->file = NULL;
+ nbd_clear_que(lo);
+ BUG_ON(!list_empty(&lo->queue_head));
++ BUG_ON(!list_empty(&lo->waiting_queue));
+ if (file)
+ fput(file);
+ return 0;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index f1bd44f..5c6709d 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -62,6 +62,7 @@ static struct usb_device_id ath3k_table[] = {
+
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0CF3, 0x3002) },
++ { USB_DEVICE(0x0CF3, 0xE019) },
+ { USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0489, 0xE03D) },
+@@ -76,12 +77,15 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
++ { USB_DEVICE(0x0930, 0x0219) },
++ { USB_DEVICE(0x0489, 0xe057) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C) },
++ { USB_DEVICE(0x0489, 0xE036) },
+
+ { } /* Terminating entry */
+ };
+@@ -100,9 +104,12 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index fc4bcd6..6f95d98 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = {
+ /* Generic Bluetooth USB device */
+ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+
++ /* Apple-specific (Broadcom) devices */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
++
+ /* Broadcom SoftSailing reporting vendor specific */
+ { USB_DEVICE(0x0a5c, 0x21e1) },
+
+@@ -102,15 +105,14 @@ static struct usb_device_id btusb_table[] = {
+
+ /* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0489, 0xe042) },
+- { USB_DEVICE(0x0a5c, 0x21e3) },
+- { USB_DEVICE(0x0a5c, 0x21e6) },
+- { USB_DEVICE(0x0a5c, 0x21e8) },
+- { USB_DEVICE(0x0a5c, 0x21f3) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
+ /* Foxconn - Hon Hai */
+ { USB_DEVICE(0x0489, 0xe033) },
+
++ /*Broadcom devices with vendor specific id */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
++
+ { } /* Terminating entry */
+ };
+
+@@ -125,6 +127,7 @@ static struct usb_device_id blacklist_table[] = {
+
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+@@ -139,12 +142,15 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
+
+ /* Broadcom BCM2035 */
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index ad683ec..b7fe343 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -32,7 +32,6 @@
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/cpumask.h>
+-#include <linux/sched.h> /* for current / set_cpus_allowed() */
+ #include <linux/io.h>
+ #include <linux/delay.h>
+
+@@ -1132,16 +1131,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
+ return res;
+ }
+
+-/* Driver entry point to switch to the target frequency */
+-static int powernowk8_target(struct cpufreq_policy *pol,
+- unsigned targfreq, unsigned relation)
++struct powernowk8_target_arg {
++ struct cpufreq_policy *pol;
++ unsigned targfreq;
++ unsigned relation;
++};
++
++static long powernowk8_target_fn(void *arg)
+ {
+- cpumask_var_t oldmask;
++ struct powernowk8_target_arg *pta = arg;
++ struct cpufreq_policy *pol = pta->pol;
++ unsigned targfreq = pta->targfreq;
++ unsigned relation = pta->relation;
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ u32 checkfid;
+ u32 checkvid;
+ unsigned int newstate;
+- int ret = -EIO;
++ int ret;
+
+ if (!data)
+ return -EINVAL;
+@@ -1149,29 +1155,16 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ checkfid = data->currfid;
+ checkvid = data->currvid;
+
+- /* only run on specific CPU from here on. */
+- /* This is poor form: use a workqueue or smp_call_function_single */
+- if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
+- return -ENOMEM;
+-
+- cpumask_copy(oldmask, tsk_cpus_allowed(current));
+- set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
+-
+- if (smp_processor_id() != pol->cpu) {
+- printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
+- goto err_out;
+- }
+-
+ if (pending_bit_stuck()) {
+ printk(KERN_ERR PFX "failing targ, change pending bit set\n");
+- goto err_out;
++ return -EIO;
+ }
+
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
+ pol->cpu, targfreq, pol->min, pol->max, relation);
+
+ if (query_current_values_with_pending_wait(data))
+- goto err_out;
++ return -EIO;
+
+ if (cpu_family != CPU_HW_PSTATE) {
+ pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+@@ -1189,7 +1182,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+
+ if (cpufreq_frequency_table_target(pol, data->powernow_table,
+ targfreq, relation, &newstate))
+- goto err_out;
++ return -EIO;
+
+ mutex_lock(&fidvid_mutex);
+
+@@ -1202,9 +1195,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ ret = transition_frequency_fidvid(data, newstate);
+ if (ret) {
+ printk(KERN_ERR PFX "transition frequency failed\n");
+- ret = 1;
+ mutex_unlock(&fidvid_mutex);
+- goto err_out;
++ return 1;
+ }
+ mutex_unlock(&fidvid_mutex);
+
+@@ -1213,12 +1205,25 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ data->powernow_table[newstate].index);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+- ret = 0;
+
+-err_out:
+- set_cpus_allowed_ptr(current, oldmask);
+- free_cpumask_var(oldmask);
+- return ret;
++ return 0;
++}
++
++/* Driver entry point to switch to the target frequency */
++static int powernowk8_target(struct cpufreq_policy *pol,
++ unsigned targfreq, unsigned relation)
++{
++ struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
++ .relation = relation };
++
++ /*
++ * Must run on @pol->cpu. cpufreq core is responsible for ensuring
++ * that we're bound to the current CPU and pol->cpu stays online.
++ */
++ if (smp_processor_id() == pol->cpu)
++ return powernowk8_target_fn(&pta);
++ else
++ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+ }
+
+ /* Driver entry point to verify the policy and range of frequencies */
+diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
+index 79dcf6e..c60d9c1 100644
+--- a/drivers/dma/at_hdmac.c
++++ b/drivers/dma/at_hdmac.c
+@@ -678,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ flags);
+
+ if (unlikely(!atslave || !sg_len)) {
+- dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
++ dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
+ return NULL;
+ }
+
+@@ -706,6 +706,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
++ if (unlikely(!len)) {
++ dev_dbg(chan2dev(chan),
++ "prep_slave_sg: sg(%d) data length is zero\n", i);
++ goto err;
++ }
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+@@ -740,6 +745,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
++ if (unlikely(!len)) {
++ dev_dbg(chan2dev(chan),
++ "prep_slave_sg: sg(%d) data length is zero\n", i);
++ goto err;
++ }
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+@@ -773,6 +783,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+
+ err_desc_get:
+ dev_err(chan2dev(chan), "not enough descriptors available\n");
++err:
+ atc_desc_put(atchan, first);
+ return NULL;
+ }
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 57104147..e8eedb7 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -858,6 +858,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
+ /* Initialize channel parameters */
+ num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
++ if (!pdmac->peripherals) {
++ ret = -ENOMEM;
++ dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
++ goto probe_err4;
++ }
+
+ for (i = 0; i < num_chan; i++) {
+ pch = &pdmac->peripherals[i];
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 0db57b5..da71881 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -554,7 +554,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
+ {
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct csrow_info *csr;
+- int i, j, banks, ranks, rows, cols, size, npages;
++ unsigned i, j, banks, ranks, rows, cols, npages;
++ u64 size;
+ int csrow = 0;
+ unsigned long last_page = 0;
+ u32 reg;
+@@ -626,10 +627,10 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
+ cols = numcol(mtr);
+
+ /* DDR3 has 8 I/O banks */
+- size = (rows * cols * banks * ranks) >> (20 - 3);
++ size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
+ npages = MiB_TO_PAGES(size);
+
+- debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
++ debugf0("mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ pvt->sbridge_dev->mc, i, j,
+ size, npages,
+ banks, ranks, rows, cols);
+diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
+index 5b69480..2c40776 100644
+--- a/drivers/gpio/gpio-lpc32xx.c
++++ b/drivers/gpio/gpio-lpc32xx.c
+@@ -295,6 +295,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
+ {
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
++ __set_gpio_level_p012(group, pin, value);
+ __set_gpio_dir_p012(group, pin, 0);
+
+ return 0;
+@@ -305,6 +306,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
+ {
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
++ __set_gpio_level_p3(group, pin, value);
+ __set_gpio_dir_p3(group, pin, 0);
+
+ return 0;
+@@ -313,6 +315,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
+ static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
+ int value)
+ {
++ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
++
++ __set_gpo_level_p3(group, pin, value);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 548a400..e48e01e 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3357,7 +3357,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+- BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
++ if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
++ return -EBUSY;
+ WARN_ON(i915_verify_lists(dev));
+
+ if (obj->gtt_space != NULL) {
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 9cd81ba..c2a64f4 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -271,7 +271,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+- if (intel_hdmi->has_audio)
++ if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+ enable_bits |= SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index ceffd20..a4011b0 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1446,98 +1446,14 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+ }
+ }
+
+-/**
+- * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+- *
+- * @crtc: drm crtc
+- *
+- * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+- */
+-static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+-{
+- struct drm_device *dev = crtc->dev;
+- struct drm_crtc *test_crtc;
+- struct radeon_crtc *radeon_test_crtc;
+- u32 pll_in_use = 0;
+-
+- list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+- if (crtc == test_crtc)
+- continue;
+-
+- radeon_test_crtc = to_radeon_crtc(test_crtc);
+- if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+- pll_in_use |= (1 << radeon_test_crtc->pll_id);
+- }
+- return pll_in_use;
+-}
+-
+-/**
+- * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+- *
+- * @crtc: drm crtc
+- *
+- * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+- * also in DP mode. For DP, a single PPLL can be used for all DP
+- * crtcs/encoders.
+- */
+-static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+-{
+- struct drm_device *dev = crtc->dev;
+- struct drm_encoder *test_encoder;
+- struct radeon_crtc *radeon_test_crtc;
+-
+- list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+- if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
+- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+- /* for DP use the same PLL for all */
+- radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
+- if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+- return radeon_test_crtc->pll_id;
+- }
+- }
+- }
+- return ATOM_PPLL_INVALID;
+-}
+-
+-/**
+- * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+- *
+- * @crtc: drm crtc
+- *
+- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+- * monitors a dedicated PPLL must be used. If a particular board has
+- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+- * as there is no need to program the PLL itself. If we are not able to
+- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+- * avoid messing up an existing monitor.
+- *
+- * Asic specific PLL information
+- *
+- * DCE 6.1
+- * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+- * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+- *
+- * DCE 6.0
+- * - PPLL0 is available to all UNIPHY (DP only)
+- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+- *
+- * DCE 5.0
+- * - DCPLL is available to all UNIPHY (DP only)
+- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+- *
+- * DCE 3.0/4.0/4.1
+- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+- *
+- */
+ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+- u32 pll_in_use;
+- int pll;
++ struct drm_crtc *test_crtc;
++ uint32_t pll_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+@@ -1545,7 +1461,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+ * DCE4: PPLL or ext clock
+- * DCE5: PPLL, DCPLL, or ext clock
++ * DCE5: DCPLL or ext clock
+ *
+ * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+ * PPLL/DCPLL programming and only program the DP DTO for the
+@@ -1553,31 +1469,29 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+ if (rdev->clock.dp_extclk)
+- /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else if (ASIC_IS_DCE5(rdev))
+- /* use DCPLL for all DP */
+ return ATOM_DCPLL;
+- else {
+- /* use the same PPLL for all DP monitors */
+- pll = radeon_get_shared_dp_ppll(crtc);
+- if (pll != ATOM_PPLL_INVALID)
+- return pll;
+- }
+ }
+- break;
+ }
+ }
+- /* all other cases */
+- pll_in_use = radeon_get_pll_use_mask(crtc);
+- if (!(pll_in_use & (1 << ATOM_PPLL2)))
+- return ATOM_PPLL2;
+- if (!(pll_in_use & (1 << ATOM_PPLL1)))
++
++ /* otherwise, pick one of the plls */
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ struct radeon_crtc *radeon_test_crtc;
++
++ if (crtc == test_crtc)
++ continue;
++
++ radeon_test_crtc = to_radeon_crtc(test_crtc);
++ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
++ (radeon_test_crtc->pll_id <= ATOM_PPLL2))
++ pll_in_use |= (1 << radeon_test_crtc->pll_id);
++ }
++ if (!(pll_in_use & 1))
+ return ATOM_PPLL1;
+- DRM_ERROR("unable to allocate a PPLL\n");
+- return ATOM_PPLL_INVALID;
++ return ATOM_PPLL2;
+ } else
+- /* use PPLL1 or PPLL2 */
+ return radeon_crtc->crtc_id;
+
+ }
+@@ -1696,7 +1610,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ break;
+ }
+ done:
+- radeon_crtc->pll_id = ATOM_PPLL_INVALID;
++ radeon_crtc->pll_id = -1;
+ }
+
+ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+@@ -1745,6 +1659,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+- radeon_crtc->pll_id = ATOM_PPLL_INVALID;
++ radeon_crtc->pll_id = -1;
+ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 0c8bea9..a21e763 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1026,7 +1026,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
+ return report;
+ }
+
+-void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ int interrupt)
+ {
+ struct hid_report_enum *report_enum = hid->report_enum + type;
+@@ -1034,10 +1034,11 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ unsigned int a;
+ int rsize, csize = size;
+ u8 *cdata = data;
++ int ret = 0;
+
+ report = hid_get_report(report_enum, data);
+ if (!report)
+- return;
++ goto out;
+
+ if (report_enum->numbered) {
+ cdata++;
+@@ -1057,14 +1058,19 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+
+ if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
+ hid->hiddev_report_event(hid, report);
+- if (hid->claimed & HID_CLAIMED_HIDRAW)
+- hidraw_report_event(hid, data, size);
++ if (hid->claimed & HID_CLAIMED_HIDRAW) {
++ ret = hidraw_report_event(hid, data, size);
++ if (ret)
++ goto out;
++ }
+
+ for (a = 0; a < report->maxfield; a++)
+ hid_input_field(hid, report->field[a], cdata, interrupt);
+
+ if (hid->claimed & HID_CLAIMED_INPUT)
+ hidinput_report_event(hid, report);
++out:
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hid_report_raw_event);
+
+@@ -1141,7 +1147,7 @@ nomem:
+ }
+ }
+
+- hid_report_raw_event(hid, type, data, size, interrupt);
++ ret = hid_report_raw_event(hid, type, data, size, interrupt);
+
+ unlock:
+ up(&hid->driver_lock);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 2eac8c5..8821ecc 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -185,6 +185,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
+ static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type);
++static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
+
+ static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+@@ -225,6 +226,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ dbg_hid("%s: device list is empty\n", __func__);
++ djrcv_dev->querying_devices = false;
+ return;
+ }
+
+@@ -235,6 +237,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ return;
+ }
+
++ if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
++ /* The device is already known. No need to reallocate it. */
++ dbg_hid("%s: device is already known\n", __func__);
++ return;
++ }
++
+ dj_hiddev = hid_allocate_device();
+ if (IS_ERR(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
+@@ -298,6 +306,7 @@ static void delayedwork_callback(struct work_struct *work)
+ struct dj_report dj_report;
+ unsigned long flags;
+ int count;
++ int retval;
+
+ dbg_hid("%s\n", __func__);
+
+@@ -330,6 +339,25 @@ static void delayedwork_callback(struct work_struct *work)
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ break;
+ default:
++ /* A normal report (i. e. not belonging to a pair/unpair notification)
++ * arriving here, means that the report arrived but we did not have a
++ * paired dj_device associated to the report's device_index, this
++ * means that the original "device paired" notification corresponding
++ * to this dj_device never arrived to this driver. The reason is that
++ * hid-core discards all packets coming from a device while probe() is
++ * executing. */
++ if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
++ /* ok, we don't know the device, just re-ask the
++ * receiver for the list of connected devices. */
++ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
++ if (!retval) {
++ /* everything went fine, so just leave */
++ break;
++ }
++ dev_err(&djrcv_dev->hdev->dev,
++ "%s:logi_dj_recv_query_paired_devices "
++ "error:%d\n", __func__, retval);
++ }
+ dbg_hid("%s: unexpected report type\n", __func__);
+ }
+ }
+@@ -360,6 +388,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
+ if (!djdev) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
++ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
++
++ if (schedule_work(&djrcv_dev->work) == 0) {
++ dbg_hid("%s: did not schedule the work item, was already "
++ "queued\n", __func__);
++ }
+ return;
+ }
+
+@@ -390,6 +424,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
+ if (dj_device == NULL) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
++ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
++
++ if (schedule_work(&djrcv_dev->work) == 0) {
++ dbg_hid("%s: did not schedule the work item, was already "
++ "queued\n", __func__);
++ }
+ return;
+ }
+
+@@ -428,27 +468,42 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+
+ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
+ {
+- struct dj_report dj_report;
++ struct dj_report *dj_report;
++ int retval;
++
++ /* no need to protect djrcv_dev->querying_devices */
++ if (djrcv_dev->querying_devices)
++ return 0;
+
+- memset(&dj_report, 0, sizeof(dj_report));
+- dj_report.report_id = REPORT_ID_DJ_SHORT;
+- dj_report.device_index = 0xFF;
+- dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
++ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
++ if (!dj_report)
++ return -ENOMEM;
++ dj_report->report_id = REPORT_ID_DJ_SHORT;
++ dj_report->device_index = 0xFF;
++ dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
++ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
++ kfree(dj_report);
++ return retval;
+ }
+
++
+ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ unsigned timeout)
+ {
+- struct dj_report dj_report;
++ struct dj_report *dj_report;
++ int retval;
+
+- memset(&dj_report, 0, sizeof(dj_report));
+- dj_report.report_id = REPORT_ID_DJ_SHORT;
+- dj_report.device_index = 0xFF;
+- dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
+- dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F;
+- dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
++ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
++ if (!dj_report)
++ return -ENOMEM;
++ dj_report->report_id = REPORT_ID_DJ_SHORT;
++ dj_report->device_index = 0xFF;
++ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
++ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
++ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
++ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
++ kfree(dj_report);
++ return retval;
+ }
+
+
+diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
+index fd28a5e..4a40003 100644
+--- a/drivers/hid/hid-logitech-dj.h
++++ b/drivers/hid/hid-logitech-dj.h
+@@ -101,6 +101,7 @@ struct dj_receiver_dev {
+ struct work_struct work;
+ struct kfifo notif_fifo;
+ spinlock_t lock;
++ bool querying_devices;
+ };
+
+ struct dj_device {
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index cf7d6d5..17d15bb 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -42,6 +42,7 @@ static struct cdev hidraw_cdev;
+ static struct class *hidraw_class;
+ static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
+ static DEFINE_MUTEX(minors_lock);
++static void drop_ref(struct hidraw *hid, int exists_bit);
+
+ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+ {
+@@ -87,13 +88,16 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
+ len = list->buffer[list->tail].len > count ?
+ count : list->buffer[list->tail].len;
+
+- if (copy_to_user(buffer, list->buffer[list->tail].value, len)) {
+- ret = -EFAULT;
+- goto out;
++ if (list->buffer[list->tail].value) {
++ if (copy_to_user(buffer, list->buffer[list->tail].value, len)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ ret = len;
+ }
+- ret = len;
+
+ kfree(list->buffer[list->tail].value);
++ list->buffer[list->tail].value = NULL;
+ list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ }
+ out:
+@@ -110,7 +114,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
+ __u8 *buf;
+ int ret = 0;
+
+- if (!hidraw_table[minor]) {
++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+@@ -258,7 +262,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
+ }
+
+ mutex_lock(&minors_lock);
+- if (!hidraw_table[minor]) {
++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+@@ -295,32 +299,12 @@ out:
+ static int hidraw_release(struct inode * inode, struct file * file)
+ {
+ unsigned int minor = iminor(inode);
+- struct hidraw *dev;
+ struct hidraw_list *list = file->private_data;
+- int ret;
+-
+- mutex_lock(&minors_lock);
+- if (!hidraw_table[minor]) {
+- ret = -ENODEV;
+- goto unlock;
+- }
+
++ drop_ref(hidraw_table[minor], 0);
+ list_del(&list->node);
+- dev = hidraw_table[minor];
+- if (!--dev->open) {
+- if (list->hidraw->exist) {
+- hid_hw_power(dev->hid, PM_HINT_NORMAL);
+- hid_hw_close(dev->hid);
+- } else {
+- kfree(list->hidraw);
+- }
+- }
+ kfree(list);
+- ret = 0;
+-unlock:
+- mutex_unlock(&minors_lock);
+-
+- return ret;
++ return 0;
+ }
+
+ static long hidraw_ioctl(struct file *file, unsigned int cmd,
+@@ -437,19 +421,29 @@ static const struct file_operations hidraw_ops = {
+ .llseek = noop_llseek,
+ };
+
+-void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
++int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
+ {
+ struct hidraw *dev = hid->hidraw;
+ struct hidraw_list *list;
++ int ret = 0;
+
+ list_for_each_entry(list, &dev->list, node) {
+- list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC);
++ int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
++
++ if (new_head == list->tail)
++ continue;
++
++ if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) {
++ ret = -ENOMEM;
++ break;
++ }
+ list->buffer[list->head].len = len;
+- list->head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
++ list->head = new_head;
+ kill_fasync(&list->fasync, SIGIO, POLL_IN);
+ }
+
+ wake_up_interruptible(&dev->wait);
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hidraw_report_event);
+
+@@ -512,21 +506,7 @@ EXPORT_SYMBOL_GPL(hidraw_connect);
+ void hidraw_disconnect(struct hid_device *hid)
+ {
+ struct hidraw *hidraw = hid->hidraw;
+-
+- mutex_lock(&minors_lock);
+- hidraw->exist = 0;
+-
+- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+-
+- hidraw_table[hidraw->minor] = NULL;
+-
+- if (hidraw->open) {
+- hid_hw_close(hid);
+- wake_up_interruptible(&hidraw->wait);
+- } else {
+- kfree(hidraw);
+- }
+- mutex_unlock(&minors_lock);
++ drop_ref(hidraw, 1);
+ }
+ EXPORT_SYMBOL_GPL(hidraw_disconnect);
+
+@@ -542,21 +522,28 @@ int __init hidraw_init(void)
+
+ if (result < 0) {
+ pr_warn("can't get major number\n");
+- result = 0;
+ goto out;
+ }
+
+ hidraw_class = class_create(THIS_MODULE, "hidraw");
+ if (IS_ERR(hidraw_class)) {
+ result = PTR_ERR(hidraw_class);
+- unregister_chrdev(hidraw_major, "hidraw");
+- goto out;
++ goto error_cdev;
+ }
+
+ cdev_init(&hidraw_cdev, &hidraw_ops);
+- cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES);
++ result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES);
++ if (result < 0)
++ goto error_class;
++
+ out:
+ return result;
++
++error_class:
++ class_destroy(hidraw_class);
++error_cdev:
++ unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
++ goto out;
+ }
+
+ void hidraw_exit(void)
+@@ -568,3 +555,23 @@ void hidraw_exit(void)
+ unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
+
+ }
++
++static void drop_ref(struct hidraw *hidraw, int exists_bit)
++{
++ mutex_lock(&minors_lock);
++ if (exists_bit) {
++ hid_hw_close(hidraw->hid);
++ hidraw->exist = 0;
++ if (hidraw->open)
++ wake_up_interruptible(&hidraw->wait);
++ } else {
++ --hidraw->open;
++ }
++
++ if (!hidraw->open && !hidraw->exist) {
++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
++ hidraw_table[hidraw->minor] = NULL;
++ kfree(hidraw);
++ }
++ mutex_unlock(&minors_lock);
++}
+diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
+index 5d760f3..08e2947 100644
+--- a/drivers/hwmon/ad7314.c
++++ b/drivers/hwmon/ad7314.c
+@@ -96,10 +96,18 @@ static ssize_t ad7314_show_temperature(struct device *dev,
+ }
+ }
+
++static ssize_t ad7314_show_name(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
++}
++
++static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL);
+ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ ad7314_show_temperature, NULL, 0);
+
+ static struct attribute *ad7314_attributes[] = {
++ &dev_attr_name.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+ };
+diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
+index 04450f8..685aae6 100644
+--- a/drivers/hwmon/ads7871.c
++++ b/drivers/hwmon/ads7871.c
+@@ -133,6 +133,12 @@ static ssize_t show_voltage(struct device *dev,
+ }
+ }
+
++static ssize_t ads7871_show_name(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
++}
++
+ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
+ static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
+ static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
+@@ -142,6 +148,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
+ static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
+ static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+
++static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
++
+ static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+@@ -151,6 +159,7 @@ static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
++ &dev_attr_name.attr,
+ NULL
+ };
+
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index e8e18ca..ac2d6cb 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -128,12 +128,12 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
+ * counter saturations resulting in bogus power readings.
+ * We correct this value ourselves to cope with older BIOSes.
+ */
+-static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
++static const struct pci_device_id affected_device[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+ { 0 }
+ };
+
+-static void __devinit tweak_runavg_range(struct pci_dev *pdev)
++static void tweak_runavg_range(struct pci_dev *pdev)
+ {
+ u32 val;
+
+@@ -157,6 +157,16 @@ static void __devinit tweak_runavg_range(struct pci_dev *pdev)
+ REG_TDP_RUNNING_AVERAGE, val);
+ }
+
++#ifdef CONFIG_PM
++static int fam15h_power_resume(struct pci_dev *pdev)
++{
++ tweak_runavg_range(pdev);
++ return 0;
++}
++#else
++#define fam15h_power_resume NULL
++#endif
++
+ static void __devinit fam15h_power_init_data(struct pci_dev *f4,
+ struct fam15h_power_data *data)
+ {
+@@ -255,6 +265,7 @@ static struct pci_driver fam15h_power_driver = {
+ .id_table = fam15h_power_id_table,
+ .probe = fam15h_power_probe,
+ .remove = __devexit_p(fam15h_power_remove),
++ .resume = fam15h_power_resume,
+ };
+
+ static int __init fam15h_power_init(void)
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index d4ec371..cd1a843 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -335,6 +335,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ },
+ {
+ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
++ },
++ },
++ {
++ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ },
+diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
+index 6777ca0..73ca321 100644
+--- a/drivers/iommu/intr_remapping.c
++++ b/drivers/iommu/intr_remapping.c
+@@ -752,6 +752,7 @@ int __init parse_ioapics_under_ir(void)
+ {
+ struct dmar_drhd_unit *drhd;
+ int ir_supported = 0;
++ int ioapic_idx;
+
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu = drhd->iommu;
+@@ -764,13 +765,20 @@ int __init parse_ioapics_under_ir(void)
+ }
+ }
+
+- if (ir_supported && ir_ioapic_num != nr_ioapics) {
+- printk(KERN_WARNING
+- "Not all IO-APIC's listed under remapping hardware\n");
+- return -1;
++ if (!ir_supported)
++ return 0;
++
++ for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
++ int ioapic_id = mpc_ioapic_id(ioapic_idx);
++ if (!map_ioapic_to_ir(ioapic_id)) {
++ pr_err(FW_BUG "ioapic %d has no mapping iommu, "
++ "interrupt remapping will be disabled\n",
++ ioapic_id);
++ return -1;
++ }
+ }
+
+- return ir_supported;
++ return 1;
+ }
+
+ int __init ir_dev_scope_init(void)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 8e91321..52848ab 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1350,17 +1350,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+ return q && blk_queue_nonrot(q);
+ }
+
+-static bool dm_table_is_nonrot(struct dm_table *t)
++static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
++{
++ struct request_queue *q = bdev_get_queue(dev->bdev);
++
++ return q && !blk_queue_add_random(q);
++}
++
++static bool dm_table_all_devices_attribute(struct dm_table *t,
++ iterate_devices_callout_fn func)
+ {
+ struct dm_target *ti;
+ unsigned i = 0;
+
+- /* Ensure that all underlying device are non-rotational. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
++ !ti->type->iterate_devices(ti, func, NULL))
+ return 0;
+ }
+
+@@ -1392,7 +1400,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ if (!dm_table_discard_zeroes_data(t))
+ q->limits.discard_zeroes_data = 0;
+
+- if (dm_table_is_nonrot(t))
++ /* Ensure that all underlying devices are non-rotational. */
++ if (dm_table_all_devices_attribute(t, device_is_nonrot))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+@@ -1400,6 +1409,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ dm_table_set_integrity(t);
+
+ /*
++ * Determine whether or not this queue's I/O timings contribute
++ * to the entropy pool, Only request-based targets use this.
++ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
++ * have it set.
++ */
++ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
++ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
++
++ /*
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 4720f68..502dcf7 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -866,10 +866,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
+ {
+ int r = error;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
++ dm_request_endio_fn rq_end_io = NULL;
+
+- if (mapped && rq_end_io)
+- r = rq_end_io(tio->ti, clone, error, &tio->info);
++ if (tio->ti) {
++ rq_end_io = tio->ti->type->rq_end_io;
++
++ if (mapped && rq_end_io)
++ r = rq_end_io(tio->ti, clone, error, &tio->info);
++ }
+
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+@@ -1566,15 +1570,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
+ int r, requeued = 0;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+- /*
+- * Hold the md reference here for the in-flight I/O.
+- * We can't rely on the reference count by device opener,
+- * because the device may be closed during the request completion
+- * when all bios are completed.
+- * See the comment in rq_completed() too.
+- */
+- dm_get(md);
+-
+ tio->ti = ti;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ switch (r) {
+@@ -1606,6 +1601,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
+ return requeued;
+ }
+
++static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
++{
++ struct request *clone;
++
++ blk_start_request(orig);
++ clone = orig->special;
++ atomic_inc(&md->pending[rq_data_dir(clone)]);
++
++ /*
++ * Hold the md reference here for the in-flight I/O.
++ * We can't rely on the reference count by device opener,
++ * because the device may be closed during the request completion
++ * when all bios are completed.
++ * See the comment in rq_completed() too.
++ */
++ dm_get(md);
++
++ return clone;
++}
++
+ /*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+@@ -1635,14 +1650,21 @@ static void dm_request_fn(struct request_queue *q)
+ pos = blk_rq_pos(rq);
+
+ ti = dm_table_find_target(map, pos);
+- BUG_ON(!dm_target_is_valid(ti));
++ if (!dm_target_is_valid(ti)) {
++ /*
++ * Must perform setup, that dm_done() requires,
++ * before calling dm_kill_unmapped_request
++ */
++ DMERR_LIMIT("request attempted access beyond the end of device");
++ clone = dm_start_request(md, rq);
++ dm_kill_unmapped_request(clone, -EIO);
++ continue;
++ }
+
+ if (ti->type->busy && ti->type->busy(ti))
+ goto delay_and_out;
+
+- blk_start_request(rq);
+- clone = rq->special;
+- atomic_inc(&md->pending[rq_data_dir(clone)]);
++ clone = dm_start_request(md, rq);
+
+ spin_unlock(q->queue_lock);
+ if (map_request(ti, clone, md))
+@@ -1662,8 +1684,6 @@ delay_and_out:
+ blk_delay_queue(q, HZ / 10);
+ out:
+ dm_table_put(map);
+-
+- return;
+ }
+
+ int dm_underlying_device_busy(struct request_queue *q)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 7a9eef6..0634ee5 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1226,14 +1226,16 @@ static int enough(struct r10conf *conf, int ignore)
+ do {
+ int n = conf->copies;
+ int cnt = 0;
++ int this = first;
+ while (n--) {
+- if (conf->mirrors[first].rdev &&
+- first != ignore)
++ if (conf->mirrors[this].rdev &&
++ this != ignore)
+ cnt++;
+- first = (first+1) % conf->raid_disks;
++ this = (this+1) % conf->raid_disks;
+ }
+ if (cnt == 0)
+ return 0;
++ first = (first + conf->near_copies) % conf->raid_disks;
+ } while (first != 0);
+ return 1;
+ }
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 6ce32a7..aaeaff2 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2712,8 +2712,9 @@ int sdhci_add_host(struct sdhci_host *host)
+ mmc_card_is_removable(mmc))
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+- /* UHS-I mode(s) supported by the host controller. */
+- if (host->version >= SDHCI_SPEC_300)
++ /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
++ if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
++ SDHCI_SUPPORT_DDR50))
+ mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+
+ /* SDR104 supports also implies SDR50 support */
+diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
+index 32778d5..46194bc 100644
+--- a/drivers/net/can/janz-ican3.c
++++ b/drivers/net/can/janz-ican3.c
+@@ -1250,7 +1250,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
+ */
+ static int ican3_reset_module(struct ican3_dev *mod)
+ {
+- u8 val = 1 << mod->num;
+ unsigned long start;
+ u8 runold, runnew;
+
+@@ -1264,8 +1263,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
+ runold = ioread8(mod->dpm + TARGET_RUNNING);
+
+ /* reset the module */
+- iowrite8(val, &mod->ctrl->reset_assert);
+- iowrite8(val, &mod->ctrl->reset_deassert);
++ iowrite8(0x00, &mod->dpmctrl->hwreset);
+
+ /* wait until the module has finished resetting and is running */
+ start = jiffies;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 2adc294..79c70ae 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -971,12 +971,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(ndev);
+
++ unregister_candev(ndev);
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap(priv->base);
+ release_mem_region(res->start, resource_size(res));
+- unregister_candev(ndev);
+ free_candev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 2c1a5c0..4c50ac0 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -554,14 +554,16 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp)
+ {
+- /* Do nothing if no IP/L4 csum validation was done */
+-
++ /* Do nothing if no L4 csum validation was done.
++ * We do not check whether IP csum was validated. For IPv4 we assume
++ * that if the card got as far as validating the L4 csum, it also
++ * validated the IP csum. IPv6 has no IP csum.
++ */
+ if (cqe->fast_path_cqe.status_flags &
+- (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
+ return;
+
+- /* If both IP/L4 validation were done, check if an error was found. */
++ /* If L4 validation was done, check if an error was found. */
+
+ if (cqe->fast_path_cqe.type_error_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 6b258d9..01bc102 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14013,9 +14013,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3) ||
+- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
++ tp->fw_needed) {
++ /* For firmware TSO, assume ASF is disabled.
++ * We'll disable TSO later if we discover ASF
++ * is enabled in tg3_get_eeprom_hw_cfg().
++ */
+ tg3_flag_set(tp, TSO_CAPABLE);
+- else {
++ } else {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+@@ -14290,6 +14294,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ */
+ tg3_get_eeprom_hw_cfg(tp);
+
++ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ tg3_flag_clear(tp, TSO_BUG);
++ tp->fw_needed = NULL;
++ }
++
+ if (tg3_flag(tp, ENABLE_APE)) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index 8cf3173..da5204d 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1351,6 +1351,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+ struct pci_dev *root = pdev->bus->self;
+ u32 aer_pos;
+
++ /* root bus? */
++ if (!root)
++ return;
++
+ if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+ adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+ return;
+diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
+index c97d2f5..bfc3b0d 100644
+--- a/drivers/net/ethernet/ti/davinci_cpdma.c
++++ b/drivers/net/ethernet/ti/davinci_cpdma.c
+@@ -851,6 +851,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
++ chan->count--;
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index bc9a4bb..1161584 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock)
+
+ po = pppox_sk(sk);
+
+- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
++ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ dev_put(po->pppoe_dev);
+ po->pppoe_dev = NULL;
+ }
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index fc147a5..6729585 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -1648,6 +1648,10 @@ static const struct usb_device_id products [] = {
+ USB_DEVICE (0x2001, 0x3c05),
+ .driver_info = (unsigned long) &ax88772_info,
+ }, {
++ // DLink DUB-E100 H/W Ver C1
++ USB_DEVICE (0x2001, 0x1a02),
++ .driver_info = (unsigned long) &ax88772_info,
++}, {
+ // Linksys USB1000
+ USB_DEVICE (0x1737, 0x0039),
+ .driver_info = (unsigned long) &ax88178_info,
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index 864448b..e773250 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
+ return -EIO;
+ }
+
+- *datap = *attrdata;
++ *datap = le16_to_cpu(*attrdata);
+
+ kfree(attrdata);
+ return result;
+diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
+index aaaca9a..3f575af 100644
+--- a/drivers/net/wan/ixp4xx_hss.c
++++ b/drivers/net/wan/ixp4xx_hss.c
+@@ -10,6 +10,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/module.h>
+ #include <linux/bitops.h>
+ #include <linux/cdev.h>
+ #include <linux/dma-mapping.h>
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+index 8918261..746202f 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+@@ -775,8 +775,11 @@ static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
+ {
+ char iovbuf[32];
+ int retcode;
++ __le32 arp_mode_le;
+
+- brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
++ arp_mode_le = cpu_to_le32(arp_mode);
++ brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
++ sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ retcode = retcode >= 0 ? 0 : retcode;
+@@ -792,8 +795,11 @@ static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
+ {
+ char iovbuf[32];
+ int retcode;
++ __le32 arp_enable_le;
+
+- brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4,
++ arp_enable_le = cpu_to_le32(arp_enable);
++
++ brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
+ iovbuf, sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+@@ -814,10 +820,10 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+ char buf[128], *ptr;
+ u32 dongle_align = BRCMF_SDALIGN;
+ u32 glom = 0;
+- u32 roaming = 1;
+- uint bcn_timeout = 3;
+- int scan_assoc_time = 40;
+- int scan_unassoc_time = 40;
++ __le32 roaming_le = cpu_to_le32(1);
++ __le32 bcn_timeout_le = cpu_to_le32(3);
++ __le32 scan_assoc_time_le = cpu_to_le32(40);
++ __le32 scan_unassoc_time_le = cpu_to_le32(40);
+ int i;
+
+ brcmf_os_proto_block(drvr);
+@@ -852,14 +858,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+
+ /* Setup timeout if Beacons are lost and roam is off to report
+ link down */
+- brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
++ brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf,
+ sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ /* Enable/Disable build-in roaming to allowed ext supplicant to take
+ of romaing */
+- brcmf_c_mkiovar("roam_off", (char *)&roaming, 4,
++ brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+@@ -874,9 +880,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+ sizeof(iovbuf));
+
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+- (char *)&scan_assoc_time, sizeof(scan_assoc_time));
++ (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+- (char *)&scan_unassoc_time, sizeof(scan_unassoc_time));
++ (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
+
+ /* Set and enable ARP offload feature */
+ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+index 5eddabe..e4e326a 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+@@ -498,8 +498,10 @@ static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
+ params_le->active_time = cpu_to_le32(-1);
+ params_le->passive_time = cpu_to_le32(-1);
+ params_le->home_time = cpu_to_le32(-1);
+- if (ssid && ssid->SSID_len)
+- memcpy(&params_le->ssid_le, ssid, sizeof(struct brcmf_ssid));
++ if (ssid && ssid->SSID_len) {
++ params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len);
++ memcpy(&params_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len);
++ }
+ }
+
+ static s32
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+index 9fc804d..7305a47 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+@@ -117,6 +117,7 @@
+
+ #define CHIP_VER_B BIT(4)
+ #define CHIP_92C_BITMASK BIT(0)
++#define CHIP_UNKNOWN BIT(7)
+ #define CHIP_92C_1T2R 0x03
+ #define CHIP_92C 0x01
+ #define CHIP_88C 0x00
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+index a3deaef..cb480d8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+@@ -1001,8 +1001,16 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
+ version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C :
+ VERSION_A_CHIP_88C;
+ } else {
+- version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C :
+- VERSION_B_CHIP_88C;
++ version = (enum version_8192c) (CHIP_VER_B |
++ ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) |
++ ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
++ if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 &
++ CHIP_VER_RTL_MASK)) {
++ version = (enum version_8192c)(version |
++ ((((value32 & CHIP_VER_RTL_MASK) == BIT(12))
++ ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) |
++ CHIP_VENDOR_UMC));
++ }
+ }
+
+ switch (version) {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+index f2aa33d..df852e8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+@@ -165,12 +165,14 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+
+ /* request fw */
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+- !IS_92C_SERIAL(rtlhal->version))
++ !IS_92C_SERIAL(rtlhal->version)) {
+ fw_name = "rtlwifi/rtl8192cfwU.bin";
+- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
++ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+- else
++ pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n");
++ } else {
+ fw_name = rtlpriv->cfg->fw_name;
++ }
+ err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 9ddf69e..74d38ca 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+ if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
+ return AE_OK;
+
++ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
++ if (ACPI_FAILURE(status)) {
++ warn("can't evaluate _ADR (%#x)\n", status);
++ return AE_OK;
++ }
++
++ device = (adr >> 16) & 0xffff;
++ function = adr & 0xffff;
++
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+@@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+ }
+ }
+
+- acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+- device = (adr >> 16) & 0xffff;
+- function = adr & 0xffff;
+-
+ newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
+ if (!newfunc)
+ return AE_NO_MEMORY;
+diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
+index edaccad..f75a4c8 100644
+--- a/drivers/platform/x86/asus-laptop.c
++++ b/drivers/platform/x86/asus-laptop.c
+@@ -823,9 +823,9 @@ static ssize_t show_infos(struct device *dev,
+ * The significance of others is yet to be found.
+ * If we don't find the method, we assume the device are present.
+ */
+- rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
++ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
+ if (!ACPI_FAILURE(rv))
+- len += sprintf(page + len, "HRWS value : %#x\n",
++ len += sprintf(page + len, "HWRS value : %#x\n",
+ (uint) temp);
+ /*
+ * Another value for userspace: the ASYM method returns 0x02 for
+@@ -1660,9 +1660,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
+ * The significance of others is yet to be found.
+ */
+ status =
+- acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
++ acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
+ if (!ACPI_FAILURE(status))
+- pr_notice(" HRWS returned %x", (int)hwrs_result);
++ pr_notice(" HWRS returned %x", (int)hwrs_result);
+
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
+index 20687d5..a3e98f1 100644
+--- a/drivers/rtc/rtc-twl.c
++++ b/drivers/rtc/rtc-twl.c
+@@ -462,6 +462,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
+ goto out1;
+ }
+
++ /* ensure interrupts are disabled, bootloaders can be strange */
++ ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
++ if (ret < 0)
++ dev_warn(&pdev->dev, "unable to disable interrupt\n");
++
+ /* init cached IRQ enable bits */
+ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ if (ret < 0)
+diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
+index 1ad0b82..1069974 100644
+--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
++++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
+@@ -1264,6 +1264,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+ int rc = 0;
+ u64 mask64;
+
++ memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1));
++ memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2));
++
+ bnx2i_adjust_qp_size(hba);
+
+ iscsi_init.flags =
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index b4d2c86..be9aad8 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1213,8 +1213,9 @@ static void complete_scsi_command(struct CommandList *cp)
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
++ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p has "
+- "protocol error \n", cp);
++ "protocol error\n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ cmd->result = DID_ERROR << 16;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 98cb5e6..17de348 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -1156,6 +1156,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ u16 message_control;
+
+
++ /* Check whether controller SAS2008 B0 controller,
++ if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
++ if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
++ ioc->pdev->revision == 0x01) {
++ return -EINVAL;
++ }
++
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 597fb9b..34d114a 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3039,15 +3039,20 @@ static int transport_generic_cmd_sequencer(
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ goto out_invalid_cdb_field;
+ }
+-
++ /*
++ * For the overflow case keep the existing fabric provided
++ * ->data_length. Otherwise for the underflow case, reset
++ * ->data_length to the smaller SCSI expected data transfer
++ * length.
++ */
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
++ cmd->data_length = size;
+ }
+- cmd->data_length = size;
+ }
+
+ /* reject any command that we don't have a handler for */
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 08b92a6..8d70fbc 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -236,6 +236,9 @@ struct eg20t_port {
+ int tx_dma_use;
+ void *rx_buf_virt;
+ dma_addr_t rx_buf_dma;
++
++ /* protect the eg20t_port private structure and io access to membase */
++ spinlock_t lock;
+ };
+
+ /**
+@@ -964,7 +967,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ unsigned int iid;
+ unsigned long flags;
+
+- spin_lock_irqsave(&priv->port.lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
+ handled = 0;
+ while ((iid = pch_uart_hal_get_iid(priv)) > 1) {
+ switch (iid) {
+@@ -1017,7 +1020,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ priv->int_dis_flag = 0;
+ }
+
+- spin_unlock_irqrestore(&priv->port.lock, flags);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_RETVAL(handled);
+ }
+
+@@ -1131,9 +1134,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl)
+ unsigned long flags;
+
+ priv = container_of(port, struct eg20t_port, port);
+- spin_lock_irqsave(&port->lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
+ pch_uart_hal_set_break(priv, ctl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ /* Grab any interrupt resources and initialise any low level driver state. */
+@@ -1284,7 +1287,8 @@ static void pch_uart_set_termios(struct uart_port *port,
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
+- spin_lock_irqsave(&port->lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
++ spin_lock(&port->lock);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
+@@ -1297,7 +1301,8 @@ static void pch_uart_set_termios(struct uart_port *port,
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ out:
+- spin_unlock_irqrestore(&port->lock, flags);
++ spin_unlock(&port->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ static const char *pch_uart_type(struct uart_port *port)
+@@ -1449,6 +1454,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
+ pci_enable_msi(pdev);
+ pci_set_master(pdev);
+
++ spin_lock_init(&priv->lock);
++
+ iobase = pci_resource_start(pdev, 0);
+ mapbase = pci_resource_start(pdev, 1);
+ priv->mapbase = mapbase;
+diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
+index d956965..3440812 100644
+--- a/drivers/usb/core/devices.c
++++ b/drivers/usb/core/devices.c
+@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
+ /* print devices for all busses */
+ list_for_each_entry(bus, &usb_bus_list, bus_list) {
+ /* recurse through all children of the root hub */
+- if (!bus->root_hub)
++ if (!bus_to_hcd(bus)->rh_registered)
+ continue;
+ usb_lock_device(bus->root_hub);
+ ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 8cb9304..032e5a6 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1002,10 +1002,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ if (retval) {
+ dev_err (parent_dev, "can't register root hub for %s, %d\n",
+ dev_name(&usb_dev->dev), retval);
+- }
+- mutex_unlock(&usb_bus_list_lock);
+-
+- if (retval == 0) {
++ } else {
+ spin_lock_irq (&hcd_root_hub_lock);
+ hcd->rh_registered = 1;
+ spin_unlock_irq (&hcd_root_hub_lock);
+@@ -1014,6 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ if (HCD_DEAD(hcd))
+ usb_hc_died (hcd); /* This time clean up */
+ }
++ mutex_unlock(&usb_bus_list_lock);
+
+ return retval;
+ }
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index 527736e..d584eaf 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -2292,10 +2292,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ hs_hcd->has_tt = 1;
+
+ retval = usb_add_hcd(hs_hcd, 0, 0);
+- if (retval != 0) {
+- usb_put_hcd(hs_hcd);
+- return retval;
+- }
++ if (retval)
++ goto put_usb2_hcd;
+
+ if (mod_data.is_super_speed) {
+ ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
+@@ -2314,6 +2312,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ put_usb3_hcd:
+ usb_put_hcd(ss_hcd);
+ dealloc_usb2_hcd:
++ usb_remove_hcd(hs_hcd);
++put_usb2_hcd:
+ usb_put_hcd(hs_hcd);
+ the_controller.hs_hcd = the_controller.ss_hcd = NULL;
+ return retval;
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 3c166d3..f62be89 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -813,6 +813,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
+ hpwdt_timer_reg = pci_mem_addr + 0x70;
+ hpwdt_timer_con = pci_mem_addr + 0x72;
+
++ /* Make sure that timer is disabled until /dev/watchdog is opened */
++ hpwdt_stop();
++
+ /* Make sure that we have a valid soft_margin */
+ if (hpwdt_change_timer(soft_margin))
+ hpwdt_change_timer(DEFAULT_MARGIN);
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 1b2e180..667776e 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -327,6 +327,6 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
+ }
+
+ ctoUCS_out:
+- return i;
++ return j;
+ }
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index eb723d3..63c0c6b 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -311,7 +311,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
+ * Inform try_to_ascend() that we are no longer attached to the
+ * dentry tree
+ */
+- dentry->d_flags |= DCACHE_DISCONNECTED;
++ dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ if (parent)
+ spin_unlock(&parent->d_lock);
+ dentry_iput(dentry);
+@@ -968,7 +968,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
+ * or deletion
+ */
+ if (new != old->d_parent ||
+- (old->d_flags & DCACHE_DISCONNECTED) ||
++ (old->d_flags & DCACHE_DENTRY_KILLED) ||
+ (!locked && read_seqretry(&rename_lock, seq))) {
+ spin_unlock(&new->d_lock);
+ new = NULL;
+@@ -1054,6 +1054,8 @@ positive:
+ return 1;
+
+ rename_retry:
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+@@ -1156,6 +1158,8 @@ out:
+ rename_retry:
+ if (found)
+ return found;
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+@@ -2922,6 +2926,8 @@ resume:
+ return;
+
+ rename_retry:
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 53c3bce..0be1aa4 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -123,9 +123,6 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+
+ err = ERR_PTR(-ENOMEM);
+ inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
+- if (h)
+- sysctl_head_finish(h);
+-
+ if (!inode)
+ goto out;
+
+@@ -134,6 +131,8 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+ d_add(dentry, inode);
+
+ out:
++ if (h)
++ sysctl_head_finish(h);
+ sysctl_head_finish(head);
+ return err;
+ }
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 4eb8c80..1dfe974 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -219,6 +219,8 @@ struct dentry_operations {
+ #define DCACHE_MANAGED_DENTRY \
+ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
+
++#define DCACHE_DENTRY_KILLED 0x100000
++
+ extern seqlock_t rename_lock;
+
+ static inline int dname_external(struct dentry *dentry)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index c235e4e..331e2ef 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -875,7 +875,7 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
+ return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0;
+ }
+
+-void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ int interrupt);
+
+ extern int hid_generic_init(void);
+diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
+index 4b88e69..45e9fcb 100644
+--- a/include/linux/hidraw.h
++++ b/include/linux/hidraw.h
+@@ -76,13 +76,13 @@ struct hidraw_list {
+ #ifdef CONFIG_HIDRAW
+ int hidraw_init(void);
+ void hidraw_exit(void);
+-void hidraw_report_event(struct hid_device *, u8 *, int);
++int hidraw_report_event(struct hid_device *, u8 *, int);
+ int hidraw_connect(struct hid_device *);
+ void hidraw_disconnect(struct hid_device *);
+ #else
+ static inline int hidraw_init(void) { return 0; }
+ static inline void hidraw_exit(void) { }
+-static inline void hidraw_report_event(struct hid_device *hid, u8 *data, int len) { }
++static inline int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { return 0; }
+ static inline int hidraw_connect(struct hid_device *hid) { return -1; }
+ static inline void hidraw_disconnect(struct hid_device *hid) { }
+ #endif
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index 935699b..6bea2c2 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -20,7 +20,7 @@
+ #include <linux/compiler.h>
+ #include <linux/mutex.h>
+
+-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
++#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+
+ struct memory_block {
+ unsigned long start_section_nr;
+diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
+index 22e61fd..28e493b 100644
+--- a/include/linux/xfrm.h
++++ b/include/linux/xfrm.h
+@@ -84,6 +84,8 @@ struct xfrm_replay_state {
+ __u32 bitmap;
+ };
+
++#define XFRMA_REPLAY_ESN_MAX 4096
++
+ struct xfrm_replay_state_esn {
+ unsigned int bmp_len;
+ __u32 oseq;
+diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
+index 15b97d5..fe810d4 100644
+--- a/include/net/bluetooth/smp.h
++++ b/include/net/bluetooth/smp.h
+@@ -131,7 +131,7 @@ struct smp_chan {
+ };
+
+ /* SMP Commands */
+-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
++int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
+ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
+ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index b203e14..921f627 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -269,6 +269,9 @@ struct xfrm_replay {
+ int (*check)(struct xfrm_state *x,
+ struct sk_buff *skb,
+ __be32 net_seq);
++ int (*recheck)(struct xfrm_state *x,
++ struct sk_buff *skb,
++ __be32 net_seq);
+ void (*notify)(struct xfrm_state *x, int event);
+ int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+ };
+diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
+index a9c87ad..a9536da 100644
+--- a/include/trace/events/kmem.h
++++ b/include/trace/events/kmem.h
+@@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc,
+
+ TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ __entry->page,
+- page_to_pfn(__entry->page),
++ __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->order,
+ __entry->migratetype,
+ show_gfp_flags(__entry->gfp_flags))
+@@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page,
+
+ TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ __entry->page,
+- page_to_pfn(__entry->page),
++ __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->order,
+ __entry->migratetype,
+ __entry->order == 0)
+diff --git a/kernel/async.c b/kernel/async.c
+index 80b74b8..009f516 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -88,6 +88,13 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
+ {
+ struct async_entry *entry;
+
++ if (!running) { /* just check the entry count */
++ if (atomic_read(&entry_count))
++ return 0; /* smaller than any cookie */
++ else
++ return next_cookie;
++ }
++
+ if (!list_empty(running)) {
+ entry = list_first_entry(running,
+ struct async_entry, list);
+@@ -238,9 +245,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
+ */
+ void async_synchronize_full(void)
+ {
+- do {
+- async_synchronize_cookie(next_cookie);
+- } while (!list_empty(&async_running) || !list_empty(&async_pending));
++ async_synchronize_cookie_domain(next_cookie, NULL);
+ }
+ EXPORT_SYMBOL_GPL(async_synchronize_full);
+
+@@ -260,7 +265,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
+ /**
+ * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
+ * @cookie: async_cookie_t to use as checkpoint
+- * @running: running list to synchronize on
++ * @running: running list to synchronize on, NULL indicates all lists
+ *
+ * This function waits until all asynchronous function calls for the
+ * synchronization domain specified by the running list @list submitted
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 46a1d3c..84a524b 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2080,6 +2080,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
+ * (of no affect) on systems that are actively using CPU hotplug
+ * but making no active use of cpusets.
+ *
++ * The only exception to this is suspend/resume, where we don't
++ * modify cpusets at all.
++ *
+ * This routine ensures that top_cpuset.cpus_allowed tracks
+ * cpu_active_mask on each CPU hotplug (cpuhp) event.
+ *
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 5a8a66e..234e152 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1019,6 +1019,22 @@ NORET_TYPE void do_exit(long code)
+
+ preempt_disable();
+ exit_rcu();
++
++ /*
++ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
++ * when the following two conditions become true.
++ * - There is race condition of mmap_sem (It is acquired by
++ * exit_mm()), and
++ * - SMI occurs before setting TASK_RUNINNG.
++ * (or hypervisor of virtual machine switches to other guest)
++ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
++ *
++ * To avoid it, we have to wait for releasing tsk->pi_lock which
++ * is held by try_to_wake_up()
++ */
++ smp_mb();
++ raw_spin_unlock_wait(&tsk->pi_lock);
++
+ /* causes final put_task_struct in finish_task_switch(). */
+ tsk->state = TASK_DEAD;
+ schedule();
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 910db7d..fcc893f 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -8192,34 +8192,66 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+ }
+ #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+
++static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
++
+ /*
+ * Update cpusets according to cpu_active mask. If cpusets are
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
+ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
+ */
+ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+ {
+- switch (action & ~CPU_TASKS_FROZEN) {
++ switch (action) {
++ case CPU_ONLINE_FROZEN:
++ case CPU_DOWN_FAILED_FROZEN:
++
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ num_cpus_frozen--;
++ if (likely(num_cpus_frozen)) {
++ partition_sched_domains(1, NULL, NULL);
++ break;
++ }
++
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ cpuset_update_active_cpus();
+- return NOTIFY_OK;
++ break;
+ default:
+ return NOTIFY_DONE;
+ }
++ return NOTIFY_OK;
+ }
+
+ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+ {
+- switch (action & ~CPU_TASKS_FROZEN) {
++ switch (action) {
+ case CPU_DOWN_PREPARE:
+ cpuset_update_active_cpus();
+- return NOTIFY_OK;
++ break;
++ case CPU_DOWN_PREPARE_FROZEN:
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ break;
+ default:
+ return NOTIFY_DONE;
+ }
++ return NOTIFY_OK;
+ }
+
+ static int update_runtime(struct notifier_block *nfb,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 979d4de..b413138 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3627,18 +3627,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+ #ifdef CONFIG_SMP
+
+ struct work_for_cpu {
+- struct completion completion;
++ struct work_struct work;
+ long (*fn)(void *);
+ void *arg;
+ long ret;
+ };
+
+-static int do_work_for_cpu(void *_wfc)
++static void work_for_cpu_fn(struct work_struct *work)
+ {
+- struct work_for_cpu *wfc = _wfc;
++ struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
++
+ wfc->ret = wfc->fn(wfc->arg);
+- complete(&wfc->completion);
+- return 0;
+ }
+
+ /**
+@@ -3653,19 +3652,11 @@ static int do_work_for_cpu(void *_wfc)
+ */
+ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+ {
+- struct task_struct *sub_thread;
+- struct work_for_cpu wfc = {
+- .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+- .fn = fn,
+- .arg = arg,
+- };
++ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+
+- sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+- if (IS_ERR(sub_thread))
+- return PTR_ERR(sub_thread);
+- kthread_bind(sub_thread, cpu);
+- wake_up_process(sub_thread);
+- wait_for_completion(&wfc.completion);
++ INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++ schedule_work_on(cpu, &wfc.work);
++ flush_work(&wfc.work);
+ return wfc.ret;
+ }
+ EXPORT_SYMBOL_GPL(work_on_cpu);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 6629faf..9ad7d1e 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
+ struct mem_section *ms;
+ struct page *page, *memmap;
+
+- if (!pfn_valid(start_pfn))
+- return;
+-
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+
+@@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
+ end_pfn = pfn + pgdat->node_spanned_pages;
+
+ /* register_section info */
+- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
+- register_page_bootmem_info_section(pfn);
+-
++ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
++ /*
++ * Some platforms can assign the same pfn to multiple nodes - on
++ * node0 as well as nodeN. To avoid registering a pfn against
++ * multiple nodes we check that this pfn does not already
++ * reside in some other node.
++ */
++ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
++ register_page_bootmem_info_section(pfn);
++ }
+ }
+ #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6e51bf0..a88dded 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -541,7 +541,7 @@ static inline void __free_one_page(struct page *page,
+ combined_idx = buddy_idx & page_idx;
+ higher_page = page + (combined_idx - page_idx);
+ buddy_idx = __find_buddy_index(combined_idx, order + 1);
+- higher_buddy = page + (buddy_idx - combined_idx);
++ higher_buddy = higher_page + (buddy_idx - combined_idx);
+ if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+ list_add_tail(&page->lru,
+ &zone->free_area[order].free_list[migratetype]);
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index f5ffc02..9ddbd4e 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -106,7 +106,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+ return NULL;
+ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ skb->mac_header += VLAN_HLEN;
+- skb_reset_mac_len(skb);
+ return skb;
+ }
+
+@@ -173,6 +172,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
++ skb_reset_mac_len(skb);
++
+ return skb;
+
+ err_free:
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 98bfbd5..1fb1aec 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -44,6 +44,7 @@
+
+ #include <net/bluetooth/bluetooth.h>
+ #include <net/bluetooth/hci_core.h>
++#include <net/bluetooth/smp.h>
+
+ static void hci_le_connect(struct hci_conn *conn)
+ {
+@@ -641,6 +642,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ {
+ BT_DBG("conn %p", conn);
+
++ if (conn->type == LE_LINK)
++ return smp_conn_security(conn, sec_level);
++
+ /* For sdp we don't need the link key. */
+ if (sec_level == BT_SECURITY_SDP)
+ return 1;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index dd76177..04175d9 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -902,14 +902,15 @@ static void l2cap_chan_ready(struct sock *sk)
+ static void l2cap_conn_ready(struct l2cap_conn *conn)
+ {
+ struct l2cap_chan *chan;
++ struct hci_conn *hcon = conn->hcon;
+
+ BT_DBG("conn %p", conn);
+
+- if (!conn->hcon->out && conn->hcon->type == LE_LINK)
++ if (!hcon->out && hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
+
+- if (conn->hcon->out && conn->hcon->type == LE_LINK)
+- smp_conn_security(conn, conn->hcon->pending_sec_level);
++ if (hcon->out && hcon->type == LE_LINK)
++ smp_conn_security(hcon, hcon->pending_sec_level);
+
+ read_lock(&conn->chan_lock);
+
+@@ -918,8 +919,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
+
+ bh_lock_sock(sk);
+
+- if (conn->hcon->type == LE_LINK) {
+- if (smp_conn_security(conn, chan->sec_level))
++ if (hcon->type == LE_LINK) {
++ if (smp_conn_security(hcon, chan->sec_level))
+ l2cap_chan_ready(sk);
+
+ } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 6dedd6f..158887a 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -616,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+ break;
+ }
+
+- if (smp_conn_security(conn, sec.level))
++ if (smp_conn_security(conn->hcon, sec.level))
+ break;
+
+ err = 0;
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 759b635..c27b4e3 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -554,9 +554,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ return 0;
+ }
+
+-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
++int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ {
+- struct hci_conn *hcon = conn->hcon;
++ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct smp_chan *smp = conn->smp_chan;
+
+ BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 832ba6d..abe1147 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2108,7 +2108,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+
+ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+ {
+- if (!can_checksum_protocol(features, protocol)) {
++ if (skb->ip_summed != CHECKSUM_NONE &&
++ !can_checksum_protocol(features, protocol)) {
+ features &= ~NETIF_F_ALL_CSUM;
+ features &= ~NETIF_F_SG;
+ } else if (illegal_highdma(skb->dev, skb)) {
+@@ -2686,16 +2687,17 @@ ipv6:
+ nhoff += poff;
+ if (pskb_may_pull(skb, nhoff + 4)) {
+ ports.v32 = * (__force u32 *) (skb->data + nhoff);
+- if (ports.v16[1] < ports.v16[0])
+- swap(ports.v16[0], ports.v16[1]);
+ skb->l4_rxhash = 1;
+ }
+ }
+
+ /* get a consistent hash (same value on both flow directions) */
+- if (addr2 < addr1)
++ if (addr2 < addr1 ||
++ (addr2 == addr1 &&
++ ports.v16[1] < ports.v16[0])) {
+ swap(addr1, addr2);
+-
++ swap(ports.v16[0], ports.v16[1]);
++ }
+ hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ if (!hash)
+ hash = 1;
+@@ -6387,7 +6389,8 @@ static struct hlist_head *netdev_create_hash(void)
+ /* Initialize per network namespace state */
+ static int __net_init netdev_init(struct net *net)
+ {
+- INIT_LIST_HEAD(&net->dev_base_head);
++ if (net != &init_net)
++ INIT_LIST_HEAD(&net->dev_base_head);
+
+ net->dev_name_head = netdev_create_hash();
+ if (net->dev_name_head == NULL)
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 31a5ae5..dd00b71 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -25,7 +25,9 @@ static DEFINE_MUTEX(net_mutex);
+ LIST_HEAD(net_namespace_list);
+ EXPORT_SYMBOL_GPL(net_namespace_list);
+
+-struct net init_net;
++struct net init_net = {
++ .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
++};
+ EXPORT_SYMBOL(init_net);
+
+ #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 018fd41..1e8a882 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -593,7 +593,8 @@ set_rcvbuf:
+
+ case SO_KEEPALIVE:
+ #ifdef CONFIG_INET
+- if (sk->sk_protocol == IPPROTO_TCP)
++ if (sk->sk_protocol == IPPROTO_TCP &&
++ sk->sk_type == SOCK_STREAM)
+ tcp_set_keepalive(sk, valbool);
+ #endif
+ sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 007e2eb..e1d4f30 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -131,18 +131,20 @@ found:
+ * 0 - deliver
+ * 1 - block
+ */
+-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
++static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
+ {
+- int type;
++ struct icmphdr _hdr;
++ const struct icmphdr *hdr;
+
+- if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
++ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_hdr), &_hdr);
++ if (!hdr)
+ return 1;
+
+- type = icmp_hdr(skb)->type;
+- if (type < 32) {
++ if (hdr->type < 32) {
+ __u32 data = raw_sk(sk)->filter.data;
+
+- return ((1 << type) & data) != 0;
++ return ((1U << hdr->type) & data) != 0;
+ }
+
+ /* Do not block unknown ICMP types */
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 043d49b..7397ad8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1589,8 +1589,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ }
+
+ #ifdef CONFIG_NET_DMA
+- if (tp->ucopy.dma_chan)
+- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
++ if (tp->ucopy.dma_chan) {
++ if (tp->rcv_wnd == 0 &&
++ !skb_queue_empty(&sk->sk_async_wait_queue)) {
++ tcp_service_net_dma(sk, true);
++ tcp_cleanup_rbuf(sk, copied);
++ } else
++ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
++ }
+ #endif
+ if (copied >= target) {
+ /* Do not sleep, just process backlog. */
+diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
+index 43242e6..42853c4 100644
+--- a/net/ipv6/mip6.c
++++ b/net/ipv6/mip6.c
+@@ -84,28 +84,30 @@ static int mip6_mh_len(int type)
+
+ static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
+ {
+- struct ip6_mh *mh;
++ struct ip6_mh _hdr;
++ const struct ip6_mh *mh;
+
+- if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
+- !pskb_may_pull(skb, (skb_transport_offset(skb) +
+- ((skb_transport_header(skb)[1] + 1) << 3))))
++ mh = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_hdr), &_hdr);
++ if (!mh)
+ return -1;
+
+- mh = (struct ip6_mh *)skb_transport_header(skb);
++ if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
++ return -1;
+
+ if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
+ LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
+ mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
+- mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
+- skb_network_header(skb)));
++ mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
++ skb_network_header_len(skb));
+ return -1;
+ }
+
+ if (mh->ip6mh_proto != IPPROTO_NONE) {
+ LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
+ mh->ip6mh_proto);
+- mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
+- skb_network_header(skb)));
++ mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
++ skb_network_header_len(skb));
+ return -1;
+ }
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 361ebf3..6e6c2c4 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -107,21 +107,20 @@ found:
+ * 0 - deliver
+ * 1 - block
+ */
+-static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
++static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
+ {
+- struct icmp6hdr *icmph;
+- struct raw6_sock *rp = raw6_sk(sk);
+-
+- if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
+- __u32 *data = &rp->filter.data[0];
+- int bit_nr;
++ struct icmp6hdr *_hdr;
++ const struct icmp6hdr *hdr;
+
+- icmph = (struct icmp6hdr *) skb->data;
+- bit_nr = icmph->icmp6_type;
++ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_hdr), &_hdr);
++ if (hdr) {
++ const __u32 *data = &raw6_sk(sk)->filter.data[0];
++ unsigned int type = hdr->icmp6_type;
+
+- return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
++ return (data[type >> 5] & (1U << (type & 31))) != 0;
+ }
+- return 0;
++ return 1;
+ }
+
+ #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 2e21751..488a1b7 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1435,17 +1435,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
+ struct fib6_table *table;
+ struct net *net = dev_net(rt->rt6i_dev);
+
+- if (rt == net->ipv6.ip6_null_entry)
+- return -ENOENT;
++ if (rt == net->ipv6.ip6_null_entry) {
++ err = -ENOENT;
++ goto out;
++ }
+
+ table = rt->rt6i_table;
+ write_lock_bh(&table->tb6_lock);
+-
+ err = fib6_del(rt, info);
+- dst_release(&rt->dst);
+-
+ write_unlock_bh(&table->tb6_lock);
+
++out:
++ dst_release(&rt->dst);
+ return err;
+ }
+
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index 3c55f63..2cef50b 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
+ printk("\n");
+ }
+
+- if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
++ if (!pskb_may_pull(skb, ETH_HLEN))
+ goto error;
+
+ secpath_reset(skb);
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 732152f..f156382 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1170,7 +1170,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ msg->msg_flags |= MSG_TRUNC;
+ }
+
+- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++ er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++ if (er < 0) {
++ skb_free_datagram(sk, skb);
++ release_sock(sk);
++ return er;
++ }
+
+ if (sax != NULL) {
+ sax->sax25_family = AF_NETROM;
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index bc3f8cd..fc57d31 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
++ msg->msg_namelen = 0;
++
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+@@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ sin->sin_port = inc->i_hdr.h_sport;
+ sin->sin_addr.s_addr = inc->i_saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ msg->msg_namelen = sizeof(*sin);
+ }
+ break;
+ }
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 24d94c0..599f67a 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+ else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
+ cl = defmap[TC_PRIO_BESTEFFORT];
+
+- if (cl == NULL || cl->level >= head->level)
++ if (cl == NULL)
+ goto fallback;
+ }
+-
++ if (cl->level >= head->level)
++ goto fallback;
+ #ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_QUEUED:
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 7b03254..ca0fb48 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -829,7 +829,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
+ if (mask) {
+ struct qfq_group *next = qfq_ffs(q, mask);
+ if (qfq_gt(roundedF, next->F)) {
+- cl->S = next->F;
++ if (qfq_gt(limit, next->F))
++ cl->S = next->F;
++ else /* preserve timestamp correctness */
++ cl->S = limit;
+ return;
+ }
+ }
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 8fc4dcd..32ba8d0 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -334,6 +334,25 @@ finish:
+ return retval;
+ }
+
++static void sctp_packet_release_owner(struct sk_buff *skb)
++{
++ sk_free(skb->sk);
++}
++
++static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
++{
++ skb_orphan(skb);
++ skb->sk = sk;
++ skb->destructor = sctp_packet_release_owner;
++
++ /*
++ * The data chunks have already been accounted for in sctp_sendmsg(),
++ * therefore only reserve a single byte to keep socket around until
++ * the packet has been transmitted.
++ */
++ atomic_inc(&sk->sk_wmem_alloc);
++}
++
+ /* All packets are sent to the network through this function from
+ * sctp_outq_tail().
+ *
+@@ -375,7 +394,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ /* Set the owning socket so that we know where to get the
+ * destination IP address.
+ */
+- skb_set_owner_w(nskb, sk);
++ sctp_packet_set_owner_w(nskb, sk);
+
+ if (!sctp_transport_dst_check(tp)) {
+ sctp_transport_route(tp, NULL, sctp_sk(sk));
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index d57d05b..fa39731 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -331,6 +331,9 @@ static void reg_regdb_search(struct work_struct *work)
+ struct reg_regdb_search_request *request;
+ const struct ieee80211_regdomain *curdom, *regdom;
+ int i, r;
++ bool set_reg = false;
++
++ mutex_lock(&cfg80211_mutex);
+
+ mutex_lock(&reg_regdb_search_mutex);
+ while (!list_empty(&reg_regdb_search_list)) {
+@@ -346,9 +349,7 @@ static void reg_regdb_search(struct work_struct *work)
+ r = reg_copy_regd(&regdom, curdom);
+ if (r)
+ break;
+- mutex_lock(&cfg80211_mutex);
+- set_regdom(regdom);
+- mutex_unlock(&cfg80211_mutex);
++ set_reg = true;
+ break;
+ }
+ }
+@@ -356,6 +357,11 @@ static void reg_regdb_search(struct work_struct *work)
+ kfree(request);
+ }
+ mutex_unlock(&reg_regdb_search_mutex);
++
++ if (set_reg)
++ set_regdom(regdom);
++
++ mutex_unlock(&cfg80211_mutex);
+ }
+
+ static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 54a0dc2..ab2bb42 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -212,7 +212,7 @@ resume:
+ /* only the first xfrm gets the encap type */
+ encap_type = 0;
+
+- if (async && x->repl->check(x, skb, seq)) {
++ if (async && x->repl->recheck(x, skb, seq)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
+ goto drop_unlock;
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 0174034..113d20e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1761,7 +1761,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
+
+ if (!afinfo) {
+ dst_release(dst_orig);
+- ret = ERR_PTR(-EINVAL);
++ return ERR_PTR(-EINVAL);
+ } else {
+ ret = afinfo->blackhole_route(net, dst_orig);
+ }
+diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
+index 2f6d11d..3efb07d 100644
+--- a/net/xfrm/xfrm_replay.c
++++ b/net/xfrm/xfrm_replay.c
+@@ -420,6 +420,18 @@ err:
+ return -EINVAL;
+ }
+
++static int xfrm_replay_recheck_esn(struct xfrm_state *x,
++ struct sk_buff *skb, __be32 net_seq)
++{
++ if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
++ htonl(xfrm_replay_seqhi(x, net_seq)))) {
++ x->stats.replay_window++;
++ return -EINVAL;
++ }
++
++ return xfrm_replay_check_esn(x, skb, net_seq);
++}
++
+ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+ {
+ unsigned int bitnr, nr, i;
+@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+ static struct xfrm_replay xfrm_replay_legacy = {
+ .advance = xfrm_replay_advance,
+ .check = xfrm_replay_check,
++ .recheck = xfrm_replay_check,
+ .notify = xfrm_replay_notify,
+ .overflow = xfrm_replay_overflow,
+ };
+@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
+ static struct xfrm_replay xfrm_replay_bmp = {
+ .advance = xfrm_replay_advance_bmp,
+ .check = xfrm_replay_check_bmp,
++ .recheck = xfrm_replay_check_bmp,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_bmp,
+ };
+@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
+ static struct xfrm_replay xfrm_replay_esn = {
+ .advance = xfrm_replay_advance_esn,
+ .check = xfrm_replay_check_esn,
++ .recheck = xfrm_replay_recheck_esn,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_esn,
+ };
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 7cae73e..ede01a8 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ struct nlattr **attrs)
+ {
+ struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
++ struct xfrm_replay_state_esn *rs;
+
+- if ((p->flags & XFRM_STATE_ESN) && !rt)
+- return -EINVAL;
++ if (p->flags & XFRM_STATE_ESN) {
++ if (!rt)
++ return -EINVAL;
++
++ rs = nla_data(rt);
++
++ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
++ return -EINVAL;
++
++ if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
++ nla_len(rt) != sizeof(*rs))
++ return -EINVAL;
++ }
+
+ if (!rt)
+ return 0;
+@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
+ struct nlattr *rp)
+ {
+ struct xfrm_replay_state_esn *up;
++ int ulen;
+
+ if (!replay_esn || !rp)
+ return 0;
+
+ up = nla_data(rp);
++ ulen = xfrm_replay_state_esn_len(up);
+
+- if (xfrm_replay_state_esn_len(replay_esn) !=
+- xfrm_replay_state_esn_len(up))
++ if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+ return -EINVAL;
+
+ return 0;
+@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
+ struct nlattr *rta)
+ {
+ struct xfrm_replay_state_esn *p, *pp, *up;
++ int klen, ulen;
+
+ if (!rta)
+ return 0;
+
+ up = nla_data(rta);
++ klen = xfrm_replay_state_esn_len(up);
++ ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
+
+- p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
++ p = kzalloc(klen, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+- pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
++ pp = kzalloc(klen, GFP_KERNEL);
+ if (!pp) {
+ kfree(p);
+ return -ENOMEM;
+ }
+
++ memcpy(p, up, ulen);
++ memcpy(pp, up, ulen);
++
+ *replay_esn = p;
+ *preplay_esn = pp;
+
+@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
+ * somehow made shareable and move it to xfrm_state.c - JHS
+ *
+ */
+-static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
++static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
++ int update_esn)
+ {
+ struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
+- struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
++ struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
+ struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
+ goto error;
+
+ /* override default values from above */
+- xfrm_update_ae_params(x, attrs);
++ xfrm_update_ae_params(x, attrs, 0);
+
+ return x;
+
+@@ -689,6 +709,7 @@ out:
+
+ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
+ {
++ memset(p, 0, sizeof(*p));
+ memcpy(&p->id, &x->id, sizeof(p->id));
+ memcpy(&p->sel, &x->sel, sizeof(p->sel));
+ memcpy(&p->lft, &x->lft, sizeof(p->lft));
+@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
+ return -EMSGSIZE;
+
+ algo = nla_data(nla);
+- strcpy(algo->alg_name, auth->alg_name);
++ strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
+ memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
+ algo->alg_key_len = auth->alg_key_len;
+
+@@ -862,6 +883,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+@@ -872,9 +894,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_state(x, 0, &info)) {
++ err = dump_one_state(x, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1297,6 +1320,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
+
+ static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
+ {
++ memset(p, 0, sizeof(*p));
+ memcpy(&p->sel, &xp->selector, sizeof(p->sel));
+ memcpy(&p->lft, &xp->lft, sizeof(p->lft));
+ memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
+@@ -1401,6 +1425,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+ struct xfrm_user_tmpl *up = &vec[i];
+ struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+
++ memset(up, 0, sizeof(*up));
+ memcpy(&up->id, &kp->id, sizeof(up->id));
+ up->family = kp->encap_family;
+ memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
+@@ -1529,6 +1554,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+@@ -1539,9 +1565,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_policy(xp, dir, 0, &info) < 0) {
++ err = dump_one_policy(xp, dir, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1794,7 +1821,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
+ goto out;
+
+ spin_lock_bh(&x->lock);
+- xfrm_update_ae_params(x, attrs);
++ xfrm_update_ae_params(x, attrs, 1);
+ spin_unlock_bh(&x->lock);
+
+ c.event = nlh->nlmsg_type;
+diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
+index a68b264..a9a593a 100644
+--- a/sound/soc/samsung/dma.c
++++ b/sound/soc/samsung/dma.c
+@@ -34,9 +34,7 @@ static const struct snd_pcm_hardware dma_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP |
+- SNDRV_PCM_INFO_MMAP_VALID |
+- SNDRV_PCM_INFO_PAUSE |
+- SNDRV_PCM_INFO_RESUME,
++ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_U8 |
+@@ -246,15 +244,11 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+- case SNDRV_PCM_TRIGGER_RESUME:
+- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ prtd->state |= ST_RUNNING;
+ prtd->params->ops->trigger(prtd->params->ch);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+- case SNDRV_PCM_TRIGGER_SUSPEND:
+- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ prtd->state &= ~ST_RUNNING;
+ prtd->params->ops->stop(prtd->params->ch);
+ break;
diff --git a/3.2.54/1031_linux-3.2.32.patch b/3.2.54/1031_linux-3.2.32.patch
new file mode 100644
index 0000000..247fc0b
--- /dev/null
+++ b/3.2.54/1031_linux-3.2.32.patch
@@ -0,0 +1,6206 @@
+diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
+index c095d79..288dba6 100644
+--- a/Documentation/virtual/lguest/lguest.c
++++ b/Documentation/virtual/lguest/lguest.c
+@@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type)
+ dev->feature_len = 0;
+ dev->num_vq = 0;
+ dev->running = false;
++ dev->next = NULL;
+
+ /*
+ * Append to device list. Prepending to a single-linked list is
+diff --git a/Makefile b/Makefile
+index fd9c414..b6d8282 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
+index a6cbb71..04e703a 100644
+--- a/arch/arm/plat-omap/counter_32k.c
++++ b/arch/arm/plat-omap/counter_32k.c
+@@ -82,22 +82,29 @@ static void notrace omap_update_sched_clock(void)
+ * nsecs and adds to a monotonically increasing timespec.
+ */
+ static struct timespec persistent_ts;
+-static cycles_t cycles, last_cycles;
++static cycles_t cycles;
+ static unsigned int persistent_mult, persistent_shift;
++static DEFINE_SPINLOCK(read_persistent_clock_lock);
++
+ void read_persistent_clock(struct timespec *ts)
+ {
+ unsigned long long nsecs;
+- cycles_t delta;
+- struct timespec *tsp = &persistent_ts;
++ cycles_t last_cycles;
++ unsigned long flags;
++
++ spin_lock_irqsave(&read_persistent_clock_lock, flags);
+
+ last_cycles = cycles;
+ cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+- delta = cycles - last_cycles;
+
+- nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
++ nsecs = clocksource_cyc2ns(cycles - last_cycles,
++ persistent_mult, persistent_shift);
++
++ timespec_add_ns(&persistent_ts, nsecs);
++
++ *ts = persistent_ts;
+
+- timespec_add_ns(tsp, nsecs);
+- *ts = *tsp;
++ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
+ }
+
+ int __init omap_init_clocksource_32k(void)
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 0be3186..aaf7444 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -224,7 +224,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+ LDFLAGS += -m $(ld-emul)
+
+ ifdef CONFIG_MIPS
+-CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
++CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+ ifdef CONFIG_64BIT
+diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
+index 1a96618..ce7dd99 100644
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -102,7 +102,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+
+ obj-$(CONFIG_OF) += prom.o
+
+-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
++CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
+
+diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
+index 7120282..3eb4a52 100644
+--- a/arch/mn10300/Makefile
++++ b/arch/mn10300/Makefile
+@@ -26,7 +26,7 @@ CHECKFLAGS +=
+ PROCESSOR := unset
+ UNIT := unset
+
+-KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33
++KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
+ KBUILD_AFLAGS += -mam33 -DCPU=AM33
+
+ ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
+diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
+index 1b6cb10..a0a4e8a 100644
+--- a/arch/powerpc/platforms/pseries/eeh_driver.c
++++ b/arch/powerpc/platforms/pseries/eeh_driver.c
+@@ -25,6 +25,7 @@
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
++#include <linux/module.h>
+ #include <linux/pci.h>
+ #include <asm/eeh.h>
+ #include <asm/eeh_event.h>
+@@ -41,6 +42,41 @@ static inline const char * pcid_name (struct pci_dev *pdev)
+ return "";
+ }
+
++/**
++ * eeh_pcid_get - Get the PCI device driver
++ * @pdev: PCI device
++ *
++ * The function is used to retrieve the PCI device driver for
++ * the indicated PCI device. Besides, we will increase the reference
++ * of the PCI device driver to prevent that being unloaded on
++ * the fly. Otherwise, kernel crash would be seen.
++ */
++static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
++{
++ if (!pdev || !pdev->driver)
++ return NULL;
++
++ if (!try_module_get(pdev->driver->driver.owner))
++ return NULL;
++
++ return pdev->driver;
++}
++
++/**
++ * eeh_pcid_put - Dereference on the PCI device driver
++ * @pdev: PCI device
++ *
++ * The function is called to do dereference on the PCI device
++ * driver of the indicated PCI device.
++ */
++static inline void eeh_pcid_put(struct pci_dev *pdev)
++{
++ if (!pdev || !pdev->driver)
++ return;
++
++ module_put(pdev->driver->driver.owner);
++}
++
+ #if 0
+ static void print_device_node_tree(struct pci_dn *pdn, int dent)
+ {
+@@ -109,18 +145,20 @@ static void eeh_enable_irq(struct pci_dev *dev)
+ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_frozen;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_disable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->error_detected)
++ !driver->err_handler->error_detected) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
+
+@@ -128,6 +166,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -142,12 +181,15 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+- if (!driver ||
+- !driver->err_handler ||
+- !driver->err_handler->mmio_enabled)
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
++ if (!driver->err_handler ||
++ !driver->err_handler->mmio_enabled) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->mmio_enabled (dev);
+
+@@ -155,6 +197,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -165,18 +208,20 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
+-
+- if (!driver)
+- return 0;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_normal;
+
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
++
+ eeh_enable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->slot_reset)
++ !driver->err_handler->slot_reset) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->slot_reset(dev);
+ if ((*res == PCI_ERS_RESULT_NONE) ||
+@@ -184,6 +229,7 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+ if (*res == PCI_ERS_RESULT_DISCONNECT &&
+ rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -193,21 +239,24 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+
+ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+ {
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_normal;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_enable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->resume)
++ !driver->err_handler->resume) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ driver->err_handler->resume(dev);
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -220,21 +269,24 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+
+ static int eeh_report_failure(struct pci_dev *dev, void *userdata)
+ {
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_perm_failure;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_disable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->error_detected)
++ !driver->err_handler->error_detected) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 18601c8..884507e 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+
+ static inline int pmd_large(pmd_t pte)
+ {
+- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+- (_PAGE_PSE | _PAGE_PRESENT);
++ return pmd_flags(pte) & _PAGE_PSE;
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)
+
+ static inline int pmd_present(pmd_t pmd)
+ {
+- return pmd_flags(pmd) & _PAGE_PRESENT;
++ /*
++ * Checking for _PAGE_PSE is needed too because
++ * split_huge_page will temporarily clear the present bit (but
++ * the _PAGE_PSE flag will remain set at all times while the
++ * _PAGE_PRESENT bit is clear).
++ */
++ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+ }
+
+ static inline int pmd_none(pmd_t pmd)
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 37718f0..4d320b2 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -731,6 +731,7 @@ void __init efi_enter_virtual_mode(void)
+ *
+ * Call EFI services through wrapper functions.
+ */
++ efi.runtime_version = efi_systab.fw_revision;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 9ecec98..5016de5 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -950,8 +950,6 @@ static int __init acpi_bus_init(void)
+ status = acpi_ec_ecdt_probe();
+ /* Ignore result. Not having an ECDT is not fatal. */
+
+- acpi_bus_osc_support();
+-
+ status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
+@@ -959,6 +957,12 @@ static int __init acpi_bus_init(void)
+ }
+
+ /*
++ * _OSC method may exist in module level code,
++ * so it must be run after ACPI_FULL_INITIALIZATION
++ */
++ acpi_bus_osc_support();
++
++ /*
+ * _PDC control method may load dynamic SSDT tables,
+ * and we need to install the table handler before that.
+ */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6f95d98..1f90dab 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -108,7 +108,7 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x413c, 0x8197) },
+
+ /* Foxconn - Hon Hai */
+- { USB_DEVICE(0x0489, 0xe033) },
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
+
+ /*Broadcom devices with vendor specific id */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
+index eedd547..5936691 100644
+--- a/drivers/char/ttyprintk.c
++++ b/drivers/char/ttyprintk.c
+@@ -67,7 +67,7 @@ static int tpk_printk(const unsigned char *buf, int count)
+ tmp[tpk_curr + 1] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+- if (buf[i + 1] == '\n')
++ if ((i + 1) < count && buf[i + 1] == '\n')
+ i++;
+ break;
+ case '\n':
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index b48967b..5991114 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -564,8 +564,8 @@ void dmaengine_get(void)
+ list_del_rcu(&device->global_node);
+ break;
+ } else if (err)
+- pr_err("dmaengine: failed to get %s: (%d)\n",
+- dma_chan_name(chan), err);
++ pr_debug("%s: failed to get %s: (%d)\n",
++ __func__, dma_chan_name(chan), err);
+ }
+ }
+
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 4799393..b97d4f0 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
+ fill_bus_reset_event(&bus_reset, client);
+- ret = copy_to_user(u64_to_uptr(a->bus_reset),
+- &bus_reset, sizeof(bus_reset));
++ /* unaligned size of bus_reset is 36 bytes */
++ ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
+ }
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 0535c21..3e60e8d 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -435,12 +435,23 @@ efivar_attr_read(struct efivar_entry *entry, char *buf)
+ if (status != EFI_SUCCESS)
+ return -EIO;
+
+- if (var->Attributes & 0x1)
++ if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+ str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
+- if (var->Attributes & 0x2)
++ if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
+- if (var->Attributes & 0x4)
++ if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
++ if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
++ str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
++ if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
++ str += sprintf(str,
++ "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
++ if (var->Attributes &
++ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
++ str += sprintf(str,
++ "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
++ if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
++ str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
+ return str - buf;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index e48e01e..33e1555 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1543,16 +1543,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+- if (obj->fenced_gpu_access) {
+- struct drm_i915_fence_reg *reg;
+-
+- BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
++ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+- reg = &dev_priv->fence_regs[obj->fence_reg];
+- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
++ /* Bump MRU to take account of the delayed flush */
++ if (obj->fence_reg != I915_FENCE_REG_NONE) {
++ struct drm_i915_fence_reg *reg;
++
++ reg = &dev_priv->fence_regs[obj->fence_reg];
++ list_move_tail(&reg->lru_list,
++ &dev_priv->mm.fence_list);
++ }
+ }
+ }
+
+@@ -1561,6 +1564,7 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
+ {
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
++ obj->last_fenced_seqno = 0;
+ }
+
+ static void
+@@ -1589,6 +1593,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
++ obj->last_fenced_ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index a6c2f7a..1202198 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -574,7 +574,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ if (ret)
+ break;
+ }
+- obj->pending_fenced_gpu_access = need_fence;
++ obj->pending_fenced_gpu_access =
++ !!(entry->flags & EXEC_OBJECT_NEEDS_FENCE);
+ }
+
+ entry->offset = obj->gtt_offset;
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 31d334d..861223b 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -107,10 +107,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+- } else if (IS_MOBILE(dev)) {
++ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+- /* On mobile 9xx chipsets, channel interleave by the CPU is
++ /* On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index c8b5bc1..2812d7b 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -530,6 +530,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
++ if (de_iir & DE_PIPEA_VBLANK_IVB)
++ drm_handle_vblank(dev, 0);
++
++ if (de_iir & DE_PIPEB_VBLANK_IVB)
++ drm_handle_vblank(dev, 1);
++
+ if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+@@ -540,12 +546,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+- if (de_iir & DE_PIPEA_VBLANK_IVB)
+- drm_handle_vblank(dev, 0);
+-
+- if (de_iir & DE_PIPEB_VBLANK_IVB)
+- drm_handle_vblank(dev, 1);
+-
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+@@ -622,6 +622,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_GSE)
+ intel_opregion_gse_intr(dev);
+
++ if (de_iir & DE_PIPEA_VBLANK)
++ drm_handle_vblank(dev, 0);
++
++ if (de_iir & DE_PIPEB_VBLANK)
++ drm_handle_vblank(dev, 1);
++
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+@@ -632,12 +638,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+- if (de_iir & DE_PIPEA_VBLANK)
+- drm_handle_vblank(dev, 0);
+-
+- if (de_iir & DE_PIPEB_VBLANK)
+- drm_handle_vblank(dev, 1);
+-
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4a5e662..a294a32 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -401,6 +401,9 @@
+ # define VS_TIMER_DISPATCH (1 << 6)
+ # define MI_FLUSH_ENABLE (1 << 11)
+
++#define GEN6_GT_MODE 0x20d0
++#define GEN6_GT_MODE_HI (1 << 9)
++
+ #define GFX_MODE 0x02520
+ #define GFX_MODE_GEN7 0x0229c
+ #define GFX_RUN_LIST_ENABLE (1<<15)
+@@ -1557,6 +1560,10 @@
+
+ /* Video Data Island Packet control */
+ #define VIDEO_DIP_DATA 0x61178
++/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
++ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
++ * of the infoframe structure specified by CEA-861. */
++#define VIDEO_DIP_DATA_SIZE 32
+ #define VIDEO_DIP_CTL 0x61170
+ #define VIDEO_DIP_ENABLE (1 << 31)
+ #define VIDEO_DIP_PORT_B (1 << 29)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6c3fb44..adac0dd 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2850,13 +2850,34 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
+ I915_WRITE_CTL(ring, tmp);
+ }
+
++static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned long flags;
++ bool pending;
++
++ if (atomic_read(&dev_priv->mm.wedged))
++ return false;
++
++ spin_lock_irqsave(&dev->event_lock, flags);
++ pending = to_intel_crtc(crtc)->unpin_work != NULL;
++ spin_unlock_irqrestore(&dev->event_lock, flags);
++
++ return pending;
++}
++
+ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (crtc->fb == NULL)
+ return;
+
++ wait_event(dev_priv->pending_flip_queue,
++ !intel_crtc_has_pending_flip(crtc));
++
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
+@@ -5027,7 +5048,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
++ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+@@ -5495,7 +5516,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ /* determine panel color depth */
+ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
++ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
+@@ -6952,9 +6973,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj->pending_flip.counter);
+- if (atomic_read(&obj->pending_flip) == 0)
+- wake_up(&dev_priv->pending_flip_queue);
+
++ wake_up(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+@@ -7193,7 +7213,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+- goto err;
++ goto err_unpin;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+@@ -8278,6 +8298,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
++
++ /* The default value should be 0x200 according to docs, but the two
++ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
++ I915_WRITE(GEN6_GT_MODE, 0xffff << 16);
++ I915_WRITE(GEN6_GT_MODE, GEN6_GT_MODE_HI << 16 | GEN6_GT_MODE_HI);
+ }
+
+ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index c2a64f4..497da2a 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -138,14 +138,20 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+
++ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
++ /* Write every possible data byte to force correct ECC calculation. */
++ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
++ I915_WRITE(VIDEO_DIP_DATA, 0);
++ mmiowb();
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
++ POSTING_READ(VIDEO_DIP_CTL);
+ }
+
+ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+@@ -168,14 +174,20 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+
++ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
++ /* Write every possible data byte to force correct ECC calculation. */
++ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
++ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
++ mmiowb();
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
++ POSTING_READ(reg);
+ }
+ static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+@@ -546,10 +558,13 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ I915_WRITE(VIDEO_DIP_CTL, 0);
++ POSTING_READ(VIDEO_DIP_CTL);
+ } else {
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+- for_each_pipe(i)
++ for_each_pipe(i) {
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
++ POSTING_READ(TVIDEO_DIP_CTL(i));
++ }
+ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index fc0633c..b61f490 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -37,6 +37,16 @@
+ #define EVERGREEN_PFP_UCODE_SIZE 1120
+ #define EVERGREEN_PM4_UCODE_SIZE 1376
+
++static const u32 crtc_offsets[6] =
++{
++ EVERGREEN_CRTC0_REGISTER_OFFSET,
++ EVERGREEN_CRTC1_REGISTER_OFFSET,
++ EVERGREEN_CRTC2_REGISTER_OFFSET,
++ EVERGREEN_CRTC3_REGISTER_OFFSET,
++ EVERGREEN_CRTC4_REGISTER_OFFSET,
++ EVERGREEN_CRTC5_REGISTER_OFFSET
++};
++
+ static void evergreen_gpu_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+@@ -66,6 +76,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+ }
+ }
+
++void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
++{
++ int i;
++
++ if (crtc >= rdev->num_crtc)
++ return;
++
++ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
++ break;
++ udelay(1);
++ }
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
++ break;
++ udelay(1);
++ }
++ }
++}
++
+ void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+ {
+ /* enable the pflip int */
+@@ -1065,116 +1096,88 @@ void evergreen_agp_enable(struct radeon_device *rdev)
+
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
++ u32 crtc_enabled, tmp, frame_count, blackout;
++ int i, j;
++
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+- /* Stop all video */
++ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+- }
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
++ /* blank the display controllers */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
++ if (crtc_enabled) {
++ save->crtc_enabled[i] = true;
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
++ dce4_wait_for_vblank(rdev, i);
++ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ }
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
+ }
+
+- WREG32(D1VGA_CONTROL, 0);
+- WREG32(D2VGA_CONTROL, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
++ evergreen_mc_wait_for_idle(rdev);
++
++ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ if ((blackout & BLACKOUT_MODE_MASK) != 1) {
++ /* Block CPU access */
++ WREG32(BIF_FB_EN, 0);
++ /* blackout the MC */
++ blackout &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
++ u32 tmp, frame_count;
++ int i, j;
+
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ /* update crtc base addresses */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+-
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+- /* Unlock host access */
++
++ /* unblackout the MC */
++ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ tmp &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
++ /* allow CPU access */
++ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
++
++ for (i = 0; i < rdev->num_crtc; i++) {
++ if (save->crtc_enabled) {
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
++ }
++ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index 7d7f215..e022776 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -210,7 +210,10 @@
+ #define EVERGREEN_CRTC_CONTROL 0x6e70
+ # define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+ # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
++#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
++# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+ #define EVERGREEN_CRTC_STATUS 0x6e8c
++# define EVERGREEN_CRTC_V_BLANK (1 << 0)
+ #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+ #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
+ #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 6ecd23f..fe44a95 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -77,6 +77,10 @@
+
+ #define CONFIG_MEMSIZE 0x5428
+
++#define BIF_FB_EN 0x5490
++#define FB_READ_EN (1 << 0)
++#define FB_WRITE_EN (1 << 1)
++
+ #define CP_ME_CNTL 0x86D8
+ #define CP_ME_HALT (1 << 28)
+ #define CP_PFP_HALT (1 << 26)
+@@ -194,6 +198,9 @@
+ #define NOOFCHAN_MASK 0x00003000
+ #define MC_SHARED_CHREMAP 0x2008
+
++#define MC_SHARED_BLACKOUT_CNTL 0x20ac
++#define BLACKOUT_MODE_MASK 0x00000007
++
+ #define MC_ARB_RAMCFG 0x2760
+ #define NOOFBANK_SHIFT 0
+ #define NOOFBANK_MASK 0x00000003
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 5ce9402..5aa6670 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -386,6 +386,7 @@ void r700_cp_fini(struct radeon_device *rdev);
+ struct evergreen_mc_save {
+ u32 vga_render_control;
+ u32 vga_hdp_control;
++ bool crtc_enabled[RADEON_MAX_CRTCS];
+ };
+
+ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index baa019e..4f9496e 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -143,6 +143,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
++ /* Gateway RS690 only seems to work with MSIs. */
++ if ((rdev->pdev->device == 0x791f) &&
++ (rdev->pdev->subsystem_vendor == 0x107b) &&
++ (rdev->pdev->subsystem_device == 0x0185))
++ return true;
++
++ /* try and enable MSIs by default on all RS690s */
++ if (rdev->family == CHIP_RS690)
++ return true;
++
+ /* RV515 seems to have MSI issues where it loses
+ * MSI rearms occasionally. This leads to lockups and freezes.
+ * disable it by default.
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 78a665b..ebd6c51 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -553,7 +553,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
+ void radeon_pm_resume(struct radeon_device *rdev)
+ {
+ /* set up the default clocks if the MC ucode is loaded */
+- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
++ if ((rdev->family >= CHIP_BARTS) &&
++ (rdev->family <= CHIP_CAYMAN) &&
++ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+@@ -608,7 +610,9 @@ int radeon_pm_init(struct radeon_device *rdev)
+ radeon_pm_print_states(rdev);
+ radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
++ if ((rdev->family >= CHIP_BARTS) &&
++ (rdev->family <= CHIP_CAYMAN) &&
++ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index fe2fdbb..1740b82 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -148,7 +148,7 @@ static int ipoib_stop(struct net_device *dev)
+
+ netif_stop_queue(dev);
+
+- ipoib_ib_dev_down(dev, 0);
++ ipoib_ib_dev_down(dev, 1);
+ ipoib_ib_dev_stop(dev, 0);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index e5069b4..80799c0 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -190,7 +190,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+
+ mcast->mcmember = *mcmember;
+
+- /* Set the cached Q_Key before we attach if it's the broadcast group */
++ /* Set the multicast MTU and cached Q_Key before we attach if it's
++ * the broadcast group.
++ */
+ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid))) {
+ spin_lock_irq(&priv->lock);
+@@ -198,10 +200,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+ spin_unlock_irq(&priv->lock);
+ return -EAGAIN;
+ }
++ priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
+ spin_unlock_irq(&priv->lock);
+ priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
+ set_qkey = 1;
++
++ if (!ipoib_cm_admin_enabled(dev)) {
++ rtnl_lock();
++ dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
++ rtnl_unlock();
++ }
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+@@ -590,14 +599,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
+ return;
+ }
+
+- priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+-
+- if (!ipoib_cm_admin_enabled(dev)) {
+- rtnl_lock();
+- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+- rtnl_unlock();
+- }
+-
+ ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
+
+ clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index c76b051..4ec049d 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -620,9 +620,9 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
+ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+ if (scmnd) {
++ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_RESET << 16;
+ scmnd->scsi_done(scmnd);
+- srp_free_req(target, req, scmnd, 0);
+ }
+ }
+
+@@ -1669,6 +1669,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ SRP_TSK_ABORT_TASK);
+ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
++ scmnd->scsi_done(scmnd);
+
+ return SUCCESS;
+ }
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 96532bc..7be5fd9 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -53,14 +53,19 @@
+ #define ABS_POS_BITS 13
+
+ /*
+- * Any position values from the hardware above the following limits are
+- * treated as "wrapped around negative" values that have been truncated to
+- * the 13-bit reporting range of the hardware. These are just reasonable
+- * guesses and can be adjusted if hardware is found that operates outside
+- * of these parameters.
++ * These values should represent the absolute maximum value that will
++ * be reported for a positive position value. Some Synaptics firmware
++ * uses this value to indicate a finger near the edge of the touchpad
++ * whose precise position cannot be determined.
++ *
++ * At least one touchpad is known to report positions in excess of this
++ * value which are actually negative values truncated to the 13-bit
++ * reporting range. These values have never been observed to be lower
++ * than 8184 (i.e. -8), so we treat all values greater than 8176 as
++ * negative and any other value as positive.
+ */
+-#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
+-#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
++#define X_MAX_POSITIVE 8176
++#define Y_MAX_POSITIVE 8176
+
+ /*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+@@ -561,11 +566,21 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+ }
+
+- /* Convert wrap-around values to negative */
++ /*
++ * Convert wrap-around values to negative. (X|Y)_MAX_POSITIVE
++ * is used by some firmware to indicate a finger at the edge of
++ * the touchpad whose precise position cannot be determined, so
++ * convert these values to the maximum axis value.
++ */
+ if (hw->x > X_MAX_POSITIVE)
+ hw->x -= 1 << ABS_POS_BITS;
++ else if (hw->x == X_MAX_POSITIVE)
++ hw->x = XMAX;
++
+ if (hw->y > Y_MAX_POSITIVE)
+ hw->y -= 1 << ABS_POS_BITS;
++ else if (hw->y == Y_MAX_POSITIVE)
++ hw->y = YMAX;
+
+ return 0;
+ }
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index ccf347f..b9062c0 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -563,7 +563,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
+ {
+ int i;
+
+- domain->iommu_coherency = 1;
++ i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
++
++ domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
+
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ if (!ecap_coherent(g_iommus[i]->ecap)) {
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 0e49c99..c06992e 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ rdev = rc_allocate_device();
+ if (!rdev)
+ goto failure;
++ itdev->rdev = rdev;
+
+ ret = -ENODEV;
+
+@@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ if (ret)
+ goto failure;
+
+- itdev->rdev = rdev;
+ ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+
+ return 0;
+diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
+index 1c44f78..6ddc769 100644
+--- a/drivers/media/video/gspca/pac7302.c
++++ b/drivers/media/video/gspca/pac7302.c
+@@ -1197,6 +1197,8 @@ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x262a)},
+ {USB_DEVICE(0x093a, 0x262c)},
++ {USB_DEVICE(0x145f, 0x013c)},
++ {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */
+ {}
+ };
+ MODULE_DEVICE_TABLE(usb, device_table);
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index d5fe43d..bc27065 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -2188,9 +2188,7 @@ static int omap_hsmmc_suspend(struct device *dev)
+ } else {
+ host->suspended = 0;
+ if (host->pdata->resume) {
+- ret = host->pdata->resume(&pdev->dev,
+- host->slot_id);
+- if (ret)
++ if (host->pdata->resume(&pdev->dev, host->slot_id))
+ dev_dbg(mmc_dev(host->mmc),
+ "Unmask interrupt failed\n");
+ }
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 0d33ff0..06af9e4 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -601,7 +601,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
+
+ sdhci_remove_host(host, 1);
+
+- for (ptr = 0; ptr < 3; ptr++) {
++ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index d5505f3..559d30d 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1003,6 +1003,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+ host->sd_error = true;
+ dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ }
++ if (host->state == STATE_IDLE) {
++ dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
++ return IRQ_HANDLED;
++ }
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
+ complete(&host->intr_wait);
+ else
+diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
+index e5bfd0e..0598d52 100644
+--- a/drivers/mtd/maps/autcpu12-nvram.c
++++ b/drivers/mtd/maps/autcpu12-nvram.c
+@@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
+
+ static int __init init_autcpu12_sram (void)
+ {
+- int err, save0, save1;
++ map_word tmp, save0, save1;
++ int err;
+
+ autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
+ if (!autcpu12_sram_map.virt) {
+@@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
+ err = -EIO;
+ goto out;
+ }
+- simple_map_init(&autcpu_sram_map);
++ simple_map_init(&autcpu12_sram_map);
+
+ /*
+ * Check for 32K/128K
+@@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
+ * Read and check result on ofs 0x0
+ * Restore contents
+ */
+- save0 = map_read32(&autcpu12_sram_map,0);
+- save1 = map_read32(&autcpu12_sram_map,0x10000);
+- map_write32(&autcpu12_sram_map,~save0,0x10000);
++ save0 = map_read(&autcpu12_sram_map, 0);
++ save1 = map_read(&autcpu12_sram_map, 0x10000);
++ tmp.x[0] = ~save0.x[0];
++ map_write(&autcpu12_sram_map, tmp, 0x10000);
+ /* if we find this pattern on 0x0, we have 32K size
+ * restore contents and exit
+ */
+- if ( map_read32(&autcpu12_sram_map,0) != save0) {
+- map_write32(&autcpu12_sram_map,save0,0x0);
++ tmp = map_read(&autcpu12_sram_map, 0);
++ if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
++ map_write(&autcpu12_sram_map, save0, 0x0);
+ goto map;
+ }
+ /* We have a 128K found, restore 0x10000 and set size
+ * to 128K
+ */
+- map_write32(&autcpu12_sram_map,save1,0x10000);
++ map_write(&autcpu12_sram_map, save1, 0x10000);
+ autcpu12_sram_map.size = SZ_128K;
+
+ map:
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index a0bd2de..198da0a 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -748,6 +748,8 @@ static const char *default_mtd_part_types[] = {
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
++ * Note: If there are more then one parser in @types, the kernel only takes the
++ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+@@ -772,11 +774,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
+ if (!parser)
+ continue;
+ ret = (*parser->parse_fn)(master, pparts, data);
++ put_partition_parser(parser);
+ if (ret > 0) {
+ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
++ break;
+ }
+- put_partition_parser(parser);
+ }
+ return ret;
+ }
+diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
+index f024375..532da04 100644
+--- a/drivers/mtd/nand/nand_bbt.c
++++ b/drivers/mtd/nand/nand_bbt.c
+@@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+- mtd->writesize, td);
++ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
+index 83e8e1b..ade0da0 100644
+--- a/drivers/mtd/nand/nandsim.c
++++ b/drivers/mtd/nand/nandsim.c
+@@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
++ retval = -EINVAL;
+ goto err_exit;
+ }
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index f745f00..297c965 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -1132,7 +1132,8 @@ static int omap_nand_remove(struct platform_device *pdev)
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+- kfree(&info->mtd);
++ release_mem_region(info->phys_base, NAND_IO_SIZE);
++ kfree(info);
+ return 0;
+ }
+
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 6c3fb5a..1f9c363 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -816,6 +816,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
++ if (ubi->ro_mode) {
++ ubi_warn("skip auto-resize because of R/O mode");
++ return 0;
++ }
++
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propagate this change
+diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
+index b99318e..b2b62de 100644
+--- a/drivers/mtd/ubi/scan.c
++++ b/drivers/mtd/ubi/scan.c
+@@ -997,7 +997,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+- if (ec_err)
++ if (ec_err || bitflips)
+ err = add_to_list(si, pnum, ec, 1, &si->erase);
+ else
+ err = add_to_list(si, pnum, ec, 0, &si->free);
+diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
+index 5fedc33..d8f2b5b 100644
+--- a/drivers/net/can/mscan/mpc5xxx_can.c
++++ b/drivers/net/can/mscan/mpc5xxx_can.c
+@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+- if (!sys_clk) {
++ if (IS_ERR(sys_clk)) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+- if (!ref_clk) {
++ if (IS_ERR(ref_clk)) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 0549261..c5f6b0e 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -4720,8 +4720,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+
+ netif_device_detach(netdev);
+
+- mutex_lock(&adapter->mutex);
+-
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+@@ -4729,10 +4727,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+
+ #ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+- if (retval) {
+- mutex_unlock(&adapter->mutex);
++ if (retval)
+ return retval;
+- }
+ #endif
+
+ status = er32(STATUS);
+@@ -4789,8 +4785,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
+- mutex_unlock(&adapter->mutex);
+-
+ pci_disable_device(pdev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index ed1be8a..4b43bc5 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -327,6 +327,8 @@ enum rtl_registers {
+ Config0 = 0x51,
+ Config1 = 0x52,
+ Config2 = 0x53,
++#define PME_SIGNAL (1 << 5) /* 8168c and later */
++
+ Config3 = 0x54,
+ Config4 = 0x55,
+ Config5 = 0x56,
+@@ -1360,7 +1362,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+- { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+@@ -1368,16 +1369,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
++ u8 options;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+- u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
++ options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
++ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
++ options = RTL_R8(Config1) & ~PMEnable;
++ if (wolopts)
++ options |= PMEnable;
++ RTL_W8(Config1, options);
++ break;
++ default:
++ options = RTL_R8(Config2) & ~PME_SIGNAL;
++ if (wolopts)
++ options |= PME_SIGNAL;
++ RTL_W8(Config2, options);
++ break;
++ }
++
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+ }
+
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 7145714..c0f097b 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -79,6 +79,7 @@ static int rionet_capable = 1;
+ * on system trade-offs.
+ */
+ static struct rio_dev **rionet_active;
++static int nact; /* total number of active rionet peers */
+
+ #define is_rionet_capable(src_ops, dst_ops) \
+ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
+@@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ u16 destid;
+ unsigned long flags;
++ int add_num = 1;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&rnet->tx_lock)) {
+@@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ return NETDEV_TX_LOCKED;
+ }
+
+- if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
++ if (is_multicast_ether_addr(eth->h_dest))
++ add_num = nact;
++
++ if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&rnet->tx_lock, flags);
+ printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+@@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ }
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
++ int count = 0;
+ for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
+ i++)
+- if (rionet_active[i])
++ if (rionet_active[i]) {
+ rionet_queue_tx_msg(skb, ndev,
+ rionet_active[i]);
++ if (count)
++ atomic_inc(&skb->users);
++ count++;
++ }
+ } else if (RIONET_MAC_MATCH(eth->h_dest)) {
+ destid = RIONET_GET_DESTID(eth->h_dest);
+ if (rionet_active[destid])
+@@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
+ if (info == RIONET_DOORBELL_JOIN) {
+ if (!rionet_active[sid]) {
+ list_for_each_entry(peer, &rionet_peers, node) {
+- if (peer->rdev->destid == sid)
++ if (peer->rdev->destid == sid) {
+ rionet_active[sid] = peer->rdev;
++ nact++;
++ }
+ }
+ rio_mport_send_doorbell(mport, sid,
+ RIONET_DOORBELL_JOIN);
+ }
+ } else if (info == RIONET_DOORBELL_LEAVE) {
+ rionet_active[sid] = NULL;
++ nact--;
+ } else {
+ if (netif_msg_intr(rnet))
+ printk(KERN_WARNING "%s: unhandled doorbell\n",
+@@ -524,6 +537,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+
+ rc = rionet_setup_netdev(rdev->net->hport, ndev);
+ rionet_check = 1;
++ nact = 0;
+ }
+
+ /*
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index 1883d39..f7e17a0 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -122,8 +122,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
+ if (!parent)
+ return;
+
+- if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+- /* Bluetooth coexistance requires disabling ASPM. */
++ if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
++ (AR_SREV_9285(ah))) {
++ /* Bluetooth coexistance requires disabling ASPM for AR9285. */
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
+ aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index dfee1b3..9005380 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -658,8 +658,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+
+ /* Check if setup is sensible at all */
+ if (!pass &&
+- (primary != bus->number || secondary <= bus->number)) {
+- dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
++ (primary != bus->number || secondary <= bus->number ||
++ secondary > subordinate)) {
++ dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
++ secondary, subordinate);
+ broken = 1;
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 0860181..4f1b10b 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+
+ rwlock_init(&port->unit_list_lock);
+ INIT_LIST_HEAD(&port->unit_list);
++ atomic_set(&port->units, 0);
+
+ INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
+ INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
+diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
+index 96f13ad8..79a6afe 100644
+--- a/drivers/s390/scsi/zfcp_ccw.c
++++ b/drivers/s390/scsi/zfcp_ccw.c
+@@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+ }
+
+-static int zfcp_ccw_activate(struct ccw_device *cdev)
+-
++/**
++ * zfcp_ccw_activate - activate adapter and wait for it to finish
++ * @cdev: pointer to belonging ccw device
++ * @clear: Status flags to clear.
++ * @tag: s390dbf trace record tag
++ */
++static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
+ {
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
++ zfcp_erp_clear_adapter_status(adapter, clear);
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+- "ccresu2");
++ tag);
+ zfcp_erp_wait(adapter);
+ flush_work(&adapter->scan_work);
+
+@@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
+ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
+ adapter->req_no = 0;
+
+- zfcp_ccw_activate(cdev);
++ zfcp_ccw_activate(cdev, 0, "ccsonl1");
+ zfcp_ccw_adapter_put(adapter);
+ return 0;
+ }
+
+ /**
+- * zfcp_ccw_set_offline - set_offline function of zfcp driver
++ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
++ * @set: Status flags to set.
++ * @tag: s390dbf trace record tag
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
++static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
+ {
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
+- zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
++ zfcp_erp_set_adapter_status(adapter, set);
++ zfcp_erp_adapter_shutdown(adapter, 0, tag);
+ zfcp_erp_wait(adapter);
+
+ zfcp_ccw_adapter_put(adapter);
+@@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+ }
+
+ /**
++ * zfcp_ccw_set_offline - set_offline function of zfcp driver
++ * @cdev: pointer to belonging ccw device
++ *
++ * This function gets called by the common i/o layer and sets an adapter
++ * into state offline.
++ */
++static int zfcp_ccw_set_offline(struct ccw_device *cdev)
++{
++ return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
++}
++
++/**
+ * zfcp_ccw_notify - ccw notify function
+ * @cdev: pointer to belonging ccw device
+ * @event: indicates if adapter was detached or attached
+@@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+
+ switch (event) {
+ case CIO_GONE:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
++ zfcp_dbf_hba_basic("ccnigo1", adapter);
++ break;
++ }
+ dev_warn(&cdev->dev, "The FCP device has been detached\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
+ break;
+@@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
+ break;
+ case CIO_OPER:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
++ zfcp_dbf_hba_basic("ccniop1", adapter);
++ break;
++ }
+ dev_info(&cdev->dev, "The FCP device is operational again\n");
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING);
+@@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
+ zfcp_ccw_adapter_put(adapter);
+ }
+
++static int zfcp_ccw_suspend(struct ccw_device *cdev)
++{
++ zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
++ return 0;
++}
++
++static int zfcp_ccw_thaw(struct ccw_device *cdev)
++{
++ /* trace records for thaw and final shutdown during suspend
++ can only be found in system dump until the end of suspend
++ but not after resume because it's based on the memory image
++ right after the very first suspend (freeze) callback */
++ zfcp_ccw_activate(cdev, 0, "ccthaw1");
++ return 0;
++}
++
++static int zfcp_ccw_resume(struct ccw_device *cdev)
++{
++ zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
++ return 0;
++}
++
+ struct ccw_driver zfcp_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+@@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = {
+ .set_offline = zfcp_ccw_set_offline,
+ .notify = zfcp_ccw_notify,
+ .shutdown = zfcp_ccw_shutdown,
+- .freeze = zfcp_ccw_set_offline,
+- .thaw = zfcp_ccw_activate,
+- .restore = zfcp_ccw_activate,
++ .freeze = zfcp_ccw_suspend,
++ .thaw = zfcp_ccw_thaw,
++ .restore = zfcp_ccw_resume,
+ };
+diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
+index fab2c25..8ed63aa 100644
+--- a/drivers/s390/scsi/zfcp_cfdc.c
++++ b/drivers/s390/scsi/zfcp_cfdc.c
+@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, port->adapter->scsi_host) {
++ shost_for_each_device(sdev, adapter->scsi_host) {
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ status = atomic_read(&zfcp_sdev->status);
+ if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index a9a816e..79b9848 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ length = min((u16)sizeof(struct qdio_buffer),
+ (u16)ZFCP_DBF_PAY_MAX_REC);
+
+- while ((char *)pl[payload->counter] && payload->counter < scount) {
++ while (payload->counter < scount && (char *)pl[payload->counter]) {
+ memcpy(payload->data, (char *)pl[payload->counter], length);
+ debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+ payload->counter++;
+@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ spin_unlock_irqrestore(&dbf->pay_lock, flags);
+ }
+
++/**
++ * zfcp_dbf_hba_basic - trace event for basic adapter events
++ * @adapter: pointer to struct zfcp_adapter
++ */
++void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
++{
++ struct zfcp_dbf *dbf = adapter->dbf;
++ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dbf->hba_lock, flags);
++ memset(rec, 0, sizeof(*rec));
++
++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++ rec->id = ZFCP_DBF_HBA_BASIC;
++
++ debug_event(dbf->hba, 1, rec, sizeof(*rec));
++ spin_unlock_irqrestore(&dbf->hba_lock, flags);
++}
++
+ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 714f087..3ac7a4b 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
+ ZFCP_DBF_HBA_RES = 1,
+ ZFCP_DBF_HBA_USS = 2,
+ ZFCP_DBF_HBA_BIT = 3,
++ ZFCP_DBF_HBA_BASIC = 4,
+ };
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
+index ed5d921..f172b84 100644
+--- a/drivers/s390/scsi/zfcp_def.h
++++ b/drivers/s390/scsi/zfcp_def.h
+@@ -77,6 +77,7 @@ struct zfcp_reqlist;
+ #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
+ #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
+ #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
++#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
+ #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
+ #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+ #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
+@@ -204,6 +205,7 @@ struct zfcp_port {
+ struct zfcp_adapter *adapter; /* adapter used to access port */
+ struct list_head unit_list; /* head of logical unit list */
+ rwlock_t unit_list_lock; /* unit list lock */
++ atomic_t units; /* zfcp_unit count */
+ atomic_t status; /* status of this remote port */
+ u64 wwnn; /* WWNN if known */
+ u64 wwpn; /* WWPN */
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index 2302e1c..ef9e502 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
++extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+ extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+@@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+ extern struct attribute_group zfcp_sysfs_unit_attrs;
+ extern struct attribute_group zfcp_sysfs_adapter_attrs;
+ extern struct attribute_group zfcp_sysfs_port_attrs;
++extern struct mutex zfcp_sysfs_port_units_mutex;
+ extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
+ extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index e9a787e..8c849f0 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
+ return;
+ }
+
+- zfcp_dbf_hba_fsf_uss("fssrh_2", req);
++ zfcp_dbf_hba_fsf_uss("fssrh_4", req);
+
+ switch (sr_buf->status_type) {
+ case FSF_STATUS_READ_PORT_CLOSED:
+@@ -771,12 +771,14 @@ out:
+ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
+ {
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_PORT_HANDLE_NOT_VALID:
+ if (fsq->word[0] == fsq->word[1]) {
+@@ -885,7 +887,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+
+ switch (header->fsf_status) {
+ case FSF_GOOD:
+- zfcp_dbf_san_res("fsscth1", req);
++ zfcp_dbf_san_res("fsscth2", req);
+ ct->status = 0;
+ break;
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+@@ -1739,13 +1741,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
+ {
+ struct zfcp_adapter *adapter = req->adapter;
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+ struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ ZFCP_STATUS_COMMON_ACCESS_BOXED |
+ ZFCP_STATUS_LUN_SHARED |
+@@ -1856,11 +1860,13 @@ out:
+ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
+ {
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_PORT_HANDLE_NOT_VALID:
+ zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
+@@ -1950,7 +1956,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+ {
+ struct fsf_qual_latency_info *lat_in;
+ struct latency_cont *lat = NULL;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct zfcp_blk_drv_data blktrc;
+ int ticks = req->adapter->timer_ticks;
+
+@@ -1965,6 +1971,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+ !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
++ zfcp_sdev = sdev_to_zfcp(scsi->device);
+ blktrc.flags |= ZFCP_BLK_LAT_VALID;
+ blktrc.channel_lat = lat_in->channel_lat * ticks;
+ blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+@@ -2002,12 +2009,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
+ {
+ struct scsi_cmnd *scmnd = req->data;
+ struct scsi_device *sdev = scmnd->device;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+
+ if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (header->fsf_status) {
+ case FSF_HANDLE_MISMATCH:
+ case FSF_PORT_HANDLE_NOT_VALID:
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index e14da57..e76d003 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+ {
+ struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+ struct zfcp_adapter *adapter = qdio->adapter;
+- struct qdio_buffer_element *sbale;
+ int sbal_no, sbal_idx;
+- void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+- u64 req_id;
+- u8 scount;
+
+ if (unlikely(qdio_err)) {
+- memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
++ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
++ struct qdio_buffer_element *sbale;
++ u64 req_id;
++ u8 scount;
++
++ memset(pl, 0,
++ ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ sbale = qdio->res_q[idx]->element;
+ req_id = (u64) sbale->addr;
+- scount = sbale->scount + 1; /* incl. signaling SBAL */
++ scount = min(sbale->scount + 1,
++ ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
++ /* incl. signaling SBAL */
+
+ for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+ sbal_idx = (idx + sbal_no) %
+diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
+index cdc4ff7..9e62210 100644
+--- a/drivers/s390/scsi/zfcp_sysfs.c
++++ b/drivers/s390/scsi/zfcp_sysfs.c
+@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
+ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
+ zfcp_sysfs_port_rescan_store);
+
++DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
++
+ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+ else
+ retval = 0;
+
++ mutex_lock(&zfcp_sysfs_port_units_mutex);
++ if (atomic_read(&port->units) > 0) {
++ retval = -EBUSY;
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++ goto out;
++ }
++ /* port is about to be removed, so no more unit_add */
++ atomic_set(&port->units, -1);
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++
+ write_lock_irq(&adapter->port_list_lock);
+ list_del(&port->list);
+ write_unlock_irq(&adapter->port_list_lock);
+@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+ {
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+ u64 fcp_lun;
++ int retval;
+
+ if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ return -EINVAL;
+
+- if (zfcp_unit_add(port, fcp_lun))
+- return -EINVAL;
++ retval = zfcp_unit_add(port, fcp_lun);
++ if (retval)
++ return retval;
+
+ return count;
+ }
+diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
+index 20796eb..4e6a535 100644
+--- a/drivers/s390/scsi/zfcp_unit.c
++++ b/drivers/s390/scsi/zfcp_unit.c
+@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
+ {
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+- put_device(&unit->port->dev);
++ atomic_dec(&unit->port->units);
+ kfree(unit);
+ }
+
+@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
+ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+ {
+ struct zfcp_unit *unit;
++ int retval = 0;
++
++ mutex_lock(&zfcp_sysfs_port_units_mutex);
++ if (atomic_read(&port->units) == -1) {
++ /* port is already gone */
++ retval = -ENODEV;
++ goto out;
++ }
+
+ unit = zfcp_unit_find(port, fcp_lun);
+ if (unit) {
+ put_device(&unit->dev);
+- return -EEXIST;
++ retval = -EEXIST;
++ goto out;
+ }
+
+ unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+- if (!unit)
+- return -ENOMEM;
++ if (!unit) {
++ retval = -ENOMEM;
++ goto out;
++ }
+
+ unit->port = port;
+ unit->fcp_lun = fcp_lun;
+@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+ if (dev_set_name(&unit->dev, "0x%016llx",
+ (unsigned long long) fcp_lun)) {
+ kfree(unit);
+- return -ENOMEM;
++ retval = -ENOMEM;
++ goto out;
+ }
+
+- get_device(&port->dev);
+-
+ if (device_register(&unit->dev)) {
+ put_device(&unit->dev);
+- return -ENOMEM;
++ retval = -ENOMEM;
++ goto out;
+ }
+
+ if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
+ device_unregister(&unit->dev);
+- return -EINVAL;
++ retval = -EINVAL;
++ goto out;
+ }
+
++ atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
++
+ write_lock_irq(&port->unit_list_lock);
+ list_add_tail(&unit->list, &port->unit_list);
+ write_unlock_irq(&port->unit_list_lock);
+
+ zfcp_unit_scsi_scan(unit);
+
+- return 0;
++out:
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++ return retval;
+ }
+
+ /**
+diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
+index 7e6eca4..59fc5a1 100644
+--- a/drivers/scsi/atp870u.c
++++ b/drivers/scsi/atp870u.c
+@@ -1174,7 +1174,16 @@ wait_io1:
+ outw(val, tmport);
+ outb(2, 0x80);
+ TCM_SYNC:
+- udelay(0x800);
++ /*
++ * The funny division into multiple delays is to accomodate
++ * arches like ARM where udelay() multiplies its argument by
++ * a large number to initialize a loop counter. To avoid
++ * overflow, the maximum supported udelay is 2000 microseconds.
++ *
++ * XXX it would be more polite to find a way to use msleep()
++ */
++ mdelay(2);
++ udelay(48);
+ if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
+ outw(0, tmport--);
+ outb(0, tmport);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 4ef0212..e5a4423 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -578,8 +578,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+ h->state = TPGS_STATE_STANDBY;
+ break;
+ case TPGS_STATE_OFFLINE:
+- case TPGS_STATE_UNAVAILABLE:
+- /* Path unusable for unavailable/offline */
++ /* Path unusable */
+ err = SCSI_DH_DEV_OFFLINED;
+ break;
+ default:
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index be9aad8..22523aa 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -532,12 +532,42 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+ }
+
++static int is_firmware_flash_cmd(u8 *cdb)
++{
++ return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
++}
++
++/*
++ * During firmware flash, the heartbeat register may not update as frequently
++ * as it should. So we dial down lockup detection during firmware flash. and
++ * dial it back up when firmware flash completes.
++ */
++#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
++#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
++static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
++ struct CommandList *c)
++{
++ if (!is_firmware_flash_cmd(c->Request.CDB))
++ return;
++ atomic_inc(&h->firmware_flash_in_progress);
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
++}
++
++static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
++ struct CommandList *c)
++{
++ if (is_firmware_flash_cmd(c->Request.CDB) &&
++ atomic_dec_and_test(&h->firmware_flash_in_progress))
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
++}
++
+ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+ {
+ unsigned long flags;
+
+ set_performant_mode(h, c);
++ dial_down_lockup_detection_during_fw_flash(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+@@ -2926,7 +2956,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ c->Request.Timeout = 0; /* Don't time out */
+ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
+ c->Request.CDB[0] = cmd;
+- c->Request.CDB[1] = 0x03; /* Reset target above */
++ c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
+ /* If bytes 4-7 are zero, it means reset the */
+ /* LunID device */
+ c->Request.CDB[4] = 0x00;
+@@ -3032,6 +3062,7 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+ {
+ removeQ(c);
++ dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
+ if (likely(c->cmd_type == CMD_SCSI))
+ complete_scsi_command(c);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+@@ -4172,9 +4203,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+
+-#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
+-#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
+-
+ static void detect_controller_lockup(struct ctlr_info *h)
+ {
+ u64 now;
+@@ -4185,7 +4213,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
+ now = get_jiffies_64();
+ /* If we've received an interrupt recently, we're ok. */
+ if (time_after64(h->last_intr_timestamp +
+- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
++ (h->heartbeat_sample_interval), now))
+ return;
+
+ /*
+@@ -4194,7 +4222,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
+ * otherwise don't care about signals in this thread.
+ */
+ if (time_after64(h->last_heartbeat_timestamp +
+- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
++ (h->heartbeat_sample_interval), now))
+ return;
+
+ /* If heartbeat has not changed since we last looked, we're not ok. */
+@@ -4236,6 +4264,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
+ {
+ unsigned long flags;
+
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+ spin_lock_irqsave(&lockup_detector_lock, flags);
+ list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 91edafb..c721509 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -124,6 +124,8 @@ struct ctlr_info {
+ u64 last_intr_timestamp;
+ u32 last_heartbeat;
+ u64 last_heartbeat_timestamp;
++ u32 heartbeat_sample_interval;
++ atomic_t firmware_flash_in_progress;
+ u32 lockup_detected;
+ struct list_head lockup_list;
+ };
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 3fd4715..e4ea0a3 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -163,6 +163,7 @@ struct SenseSubsystem_info {
+ #define BMIC_WRITE 0x27
+ #define BMIC_CACHE_FLUSH 0xc2
+ #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
++#define BMIC_FLASH_FIRMWARE 0xF7
+
+ /* Command List Structure */
+ union SCSI3Addr {
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index 3d391dc..36aca4b 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -1547,6 +1547,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
+
+ host_config = &evt_struct->iu.mad.host_config;
+
++ /* The transport length field is only 16-bit */
++ length = min(0xffff, length);
++
+ /* Set up a lun reset SRP command */
+ memset(host_config, 0x00, sizeof(*host_config));
+ host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index 83d08b6..5c8b0dc 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -469,7 +469,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+- devm_kfree(&pdev->dev, orom);
+ orom = NULL;
+ break;
+ }
+diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
+index b5f4341..7cd637d 100644
+--- a/drivers/scsi/isci/probe_roms.c
++++ b/drivers/scsi/isci/probe_roms.c
+@@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+- devm_kfree(&pdev->dev, rom);
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index bb7c482..08d48a3 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1023,33 +1023,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ void scsi_remove_target(struct device *dev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+- struct scsi_target *starget, *found;
++ struct scsi_target *starget, *last = NULL;
+ unsigned long flags;
+
+- restart:
+- found = NULL;
++ /* remove targets being careful to lookup next entry before
++ * deleting the last
++ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->state == STARGET_DEL)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+- found = starget;
+- found->reap_ref++;
+- break;
++ /* assuming new targets arrive at the end */
++ starget->reap_ref++;
++ spin_unlock_irqrestore(shost->host_lock, flags);
++ if (last)
++ scsi_target_reap(last);
++ last = starget;
++ __scsi_remove_target(starget);
++ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+- if (found) {
+- __scsi_remove_target(found);
+- scsi_target_reap(found);
+- /* in the case where @dev has multiple starget children,
+- * continue removing.
+- *
+- * FIXME: does such a case exist?
+- */
+- goto restart;
+- }
++ if (last)
++ scsi_target_reap(last);
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 4ad2c0e..9465bce 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -843,7 +843,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
+ ret = -EAGAIN;
+ break;
+ }
+- ret = s->async->inttrig(dev, s, insn->data[0]);
++ ret = s->async->inttrig(dev, s, data[0]);
+ if (ret >= 0)
+ ret = 1;
+ break;
+@@ -1088,7 +1088,6 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ goto cleanup;
+ }
+
+- kfree(async->cmd.chanlist);
+ async->cmd = user_cmd;
+ async->cmd.data = NULL;
+ /* load channel/gain list */
+@@ -1833,6 +1832,8 @@ void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s)
+ if (async) {
+ comedi_reset_async_buf(async);
+ async->inttrig = NULL;
++ kfree(async->cmd.chanlist);
++ async->cmd.chanlist = NULL;
+ } else {
+ printk(KERN_ERR
+ "BUG: (?) do_become_nonbusy called with async=0\n");
+diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
+index 8d98cf4..c8b7eed 100644
+--- a/drivers/staging/comedi/drivers/jr3_pci.c
++++ b/drivers/staging/comedi/drivers/jr3_pci.c
+@@ -913,7 +913,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
+ }
+
+ /* Reset DSP card */
+- devpriv->iobase->channel[0].reset = 0;
++ writel(0, &devpriv->iobase->channel[0].reset);
+
+ result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
+ printk("Firmare load %d\n", result);
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index 23fc64b..c72128f 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -2370,7 +2370,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
+ /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
+
+ k->SetMode(dev, k, Setup, TRUE);
+- Preload(dev, k, *(insn->data));
++ Preload(dev, k, data[0]);
+ k->PulseIndex(dev, k);
+ SetLatchSource(dev, k, valueSrclatch);
+ k->SetEnable(dev, k, (uint16_t) (enab != 0));
+diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
+index 42cdafe..b5130c8 100644
+--- a/drivers/staging/speakup/speakup_soft.c
++++ b/drivers/staging/speakup/speakup_soft.c
+@@ -40,7 +40,7 @@ static int softsynth_is_alive(struct spk_synth *synth);
+ static unsigned char get_index(void);
+
+ static struct miscdevice synth_device;
+-static int initialized;
++static int init_pos;
+ static int misc_registered;
+
+ static struct var_t vars[] = {
+@@ -194,7 +194,7 @@ static int softsynth_close(struct inode *inode, struct file *fp)
+ unsigned long flags;
+ spk_lock(flags);
+ synth_soft.alive = 0;
+- initialized = 0;
++ init_pos = 0;
+ spk_unlock(flags);
+ /* Make sure we let applications go before leaving */
+ speakup_start_ttys();
+@@ -239,13 +239,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
+ ch = '\x18';
+ } else if (synth_buffer_empty()) {
+ break;
+- } else if (!initialized) {
+- if (*init) {
+- ch = *init;
+- init++;
+- } else {
+- initialized = 1;
+- }
++ } else if (init[init_pos]) {
++ ch = init[init_pos++];
+ } else {
+ ch = synth_buffer_getc();
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 2ff1255..f35cb10 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3204,7 +3204,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+- spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+@@ -3357,6 +3356,7 @@ static int iscsit_send_reject(
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
++ hdr->ffffffff = 0xffffffff;
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 0f68197..dae283f 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -25,10 +25,10 @@
+ #define NA_DATAOUT_TIMEOUT_RETRIES 5
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+-#define NA_NOPIN_TIMEOUT 5
++#define NA_NOPIN_TIMEOUT 15
+ #define NA_NOPIN_TIMEOUT_MAX 60
+ #define NA_NOPIN_TIMEOUT_MIN 3
+-#define NA_NOPIN_RESPONSE_TIMEOUT 5
++#define NA_NOPIN_RESPONSE_TIMEOUT 30
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index d4cf2cd..309f14c 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -674,6 +674,12 @@ int iscsit_ta_generate_node_acls(
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
++ if (flag == 1 && a->cache_dynamic_acls == 0) {
++ pr_debug("Explicitly setting cache_dynamic_acls=1 when "
++ "generate_node_acls=1\n");
++ a->cache_dynamic_acls = 1;
++ }
++
+ return 0;
+ }
+
+@@ -713,6 +719,12 @@ int iscsit_ta_cache_dynamic_acls(
+ return -EINVAL;
+ }
+
++ if (a->generate_node_acls == 1 && flag == 0) {
++ pr_debug("Skipping cache_dynamic_acls=0 when"
++ " generate_node_acls=1\n");
++ return 0;
++ }
++
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 93d4f6a..0b01bfc 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3123,6 +3123,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3138,6 +3139,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+ config_group_init_type_name(&alua_group,
+@@ -3153,6 +3155,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3164,14 +3167,17 @@ static int __init target_core_init_configfs(void)
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+- if (IS_ERR(lu_gp))
++ if (IS_ERR(lu_gp)) {
++ ret = -ENOMEM;
+ goto out_global;
++ }
+
+ lu_gp_cg = &alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 455a251..cafa477 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -139,6 +139,19 @@ static struct se_device *fd_create_virtdevice(
+ * of pure timestamp updates.
+ */
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
++ /*
++ * Optionally allow fd_buffered_io=1 to be enabled for people
++ * who want use the fs buffer cache as an WriteCache mechanism.
++ *
++ * This means that in event of a hard failure, there is a risk
++ * of silent data-loss if the SCSI client has *not* performed a
++ * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
++ * to write-out the entire device cache.
++ */
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
++ flags &= ~O_DSYNC;
++ }
+
+ file = filp_open(dev_p, flags, 0600);
+ if (IS_ERR(file)) {
+@@ -206,6 +219,12 @@ static struct se_device *fd_create_virtdevice(
+ if (!dev)
+ goto fail;
+
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
++ " with FDBD_HAS_BUFFERED_IO_WCE\n");
++ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
++ }
++
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+@@ -450,6 +469,7 @@ enum {
+ static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
++ {Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_err, NULL}
+ };
+
+@@ -461,7 +481,7 @@ static ssize_t fd_set_configfs_dev_params(
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+- int ret = 0, token;
++ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+@@ -505,6 +525,19 @@ static ssize_t fd_set_configfs_dev_params(
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
++ case Opt_fd_buffered_io:
++ match_int(args, &arg);
++ if (arg != 1) {
++ pr_err("bogus fd_buffered_io=%d value\n", arg);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ pr_debug("FILEIO: Using buffered I/O"
++ " operations for struct fd_dev\n");
++
++ fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
++ break;
+ default:
+ break;
+ }
+@@ -536,8 +569,10 @@ static ssize_t fd_show_configfs_dev_params(
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+- bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+- fd_dev->fd_dev_name, fd_dev->fd_dev_size);
++ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
++ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
++ (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
++ "Buffered-WCE" : "O_DSYNC");
+ return bl;
+ }
+
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 53ece69..6b1b6a9 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -18,6 +18,7 @@ struct fd_request {
+
+ #define FBDF_HAS_PATH 0x01
+ #define FBDF_HAS_SIZE 0x02
++#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+
+ struct fd_dev {
+ u32 fbd_flags;
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index fc7bbba..d190269 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -108,7 +108,7 @@ struct gsm_mux_net {
+ */
+
+ struct gsm_msg {
+- struct gsm_msg *next;
++ struct list_head list;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+@@ -245,8 +245,7 @@ struct gsm_mux {
+ unsigned int tx_bytes; /* TX data outstanding */
+ #define TX_THRESH_HI 8192
+ #define TX_THRESH_LO 2048
+- struct gsm_msg *tx_head; /* Pending data packets */
+- struct gsm_msg *tx_tail;
++ struct list_head tx_list; /* Pending data packets */
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+@@ -663,7 +662,7 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+- m->next = NULL;
++ INIT_LIST_HEAD(&m->list);
+ return m;
+ }
+
+@@ -673,22 +672,21 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
++ * If we have been flow-stopped by a CMD_FCOFF, then we can only
++ * send messages on DLCI0 until CMD_FCON
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+ static void gsm_data_kick(struct gsm_mux *gsm)
+ {
+- struct gsm_msg *msg = gsm->tx_head;
++ struct gsm_msg *msg, *nmsg;
+ int len;
+ int skip_sof = 0;
+
+- /* FIXME: We need to apply this solely to data messages */
+- if (gsm->constipated)
+- return;
+-
+- while (gsm->tx_head != NULL) {
+- msg = gsm->tx_head;
++ list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
++ if (gsm->constipated && msg->addr)
++ continue;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+@@ -711,14 +709,13 @@ static void gsm_data_kick(struct gsm_mux *gsm)
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+- gsm->tx_head = msg->next;
+- if (gsm->tx_head == NULL)
+- gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+- kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
++
++ list_del(&msg->list);
++ kfree(msg);
+ }
+ }
+
+@@ -768,11 +765,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+- if (gsm->tx_tail)
+- gsm->tx_tail->next = msg;
+- else
+- gsm->tx_head = msg;
+- gsm->tx_tail = msg;
++ list_add_tail(&msg->list, &gsm->tx_list);
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+ }
+@@ -875,7 +868,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+- dlci->skb = skb_dequeue(&dlci->skb_list);
++ dlci->skb = skb_dequeue_tail(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+@@ -886,7 +879,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+- kfree_skb(dlci->skb);
++ dev_kfree_skb_any(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+@@ -899,8 +892,11 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+- if (msg == NULL)
++ if (msg == NULL) {
++ skb_queue_tail(&dlci->skb_list, dlci->skb);
++ dlci->skb = NULL;
+ return -ENOMEM;
++ }
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+@@ -912,7 +908,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ skb_pull(dlci->skb, len);
+ __gsm_data_queue(dlci, msg);
+ if (last) {
+- kfree_skb(dlci->skb);
++ dev_kfree_skb_any(dlci->skb);
+ dlci->skb = NULL;
+ }
+ return size;
+@@ -971,16 +967,22 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+ {
+ unsigned long flags;
++ int sweep;
++
++ if (dlci->constipated)
++ return;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
++ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
+ if (dlci->gsm->tx_bytes == 0) {
+ if (dlci->net)
+ gsm_dlci_data_output_framed(dlci->gsm, dlci);
+ else
+ gsm_dlci_data_output(dlci->gsm, dlci);
+- } else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+- gsm_dlci_data_sweep(dlci->gsm);
++ }
++ if (sweep)
++ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ }
+
+@@ -1027,6 +1029,7 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ {
+ int mlines = 0;
+ u8 brk = 0;
++ int fc;
+
+ /* The modem status command can either contain one octet (v.24 signals)
+ or two octets (v.24 signals + break signals). The length field will
+@@ -1038,19 +1041,21 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ else {
+ brk = modem & 0x7f;
+ modem = (modem >> 7) & 0x7f;
+- };
++ }
+
+ /* Flow control/ready to communicate */
+- if (modem & MDM_FC) {
++ fc = (modem & MDM_FC) || !(modem & MDM_RTR);
++ if (fc && !dlci->constipated) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+- }
+- if (modem & MDM_RTC) {
+- mlines |= TIOCM_DSR | TIOCM_DTR;
++ } else if (!fc && dlci->constipated) {
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
++
+ /* Map modem bits */
++ if (modem & MDM_RTC)
++ mlines |= TIOCM_DSR | TIOCM_DTR;
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+@@ -1190,6 +1195,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+ {
+ u8 buf[1];
++ unsigned long flags;
++
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+@@ -1206,16 +1213,18 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+- /* Modem wants us to STFU */
+- gsm->constipated = 1;
+- gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+- break;
+- case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+- gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
++ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ /* Kick the link in case it is idling */
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm);
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
++ break;
++ case CMD_FCOFF:
++ /* Modem wants us to STFU */
++ gsm->constipated = 1;
++ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+@@ -1668,7 +1677,7 @@ static void gsm_dlci_free(struct kref *ref)
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
+- kfree_skb(dlci->skb);
++ dev_kfree_skb(dlci->skb);
+ kfree(dlci);
+ }
+
+@@ -2007,7 +2016,7 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
+ {
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+- struct gsm_msg *txq;
++ struct gsm_msg *txq, *ntxq;
+ struct gsm_control *gc;
+
+ gsm->dead = 1;
+@@ -2042,11 +2051,9 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
+ if (gsm->dlci[i])
+ gsm_dlci_release(gsm->dlci[i]);
+ /* Now wipe the queues */
+- for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+- gsm->tx_head = txq->next;
++ list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ kfree(txq);
+- }
+- gsm->tx_tail = NULL;
++ INIT_LIST_HEAD(&gsm->tx_list);
+ }
+ EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+@@ -2157,6 +2164,7 @@ struct gsm_mux *gsm_alloc_mux(void)
+ }
+ spin_lock_init(&gsm->lock);
+ kref_init(&gsm->ref);
++ INIT_LIST_HEAD(&gsm->tx_list);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+@@ -2273,7 +2281,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+- WARN_ONCE("%s: unknown flag %d\n",
++ WARN_ONCE(1, "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+@@ -2377,12 +2385,12 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO) {
+- spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_dlci_data_sweep(gsm);
+- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
+
+ /**
+@@ -2889,6 +2897,10 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
++ /* If DLCI 0 is not yet fully open return an error. This is ok from a locking
++ perspective as we don't have to worry about this if DLCI0 is lost */
++ if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
++ return -EL2NSYNC;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 39d6ab6..8481aae 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1728,7 +1728,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+
+ do_it_again:
+
+- BUG_ON(!tty->read_buf);
++ if (WARN_ON(!tty->read_buf))
++ return -EAGAIN;
+
+ c = job_control(tty, file);
+ if (c < 0)
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 482d51e..e7d82c1 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1118,6 +1118,8 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
+ #define PCI_SUBDEVICE_ID_POCTAL232 0x0308
+ #define PCI_SUBDEVICE_ID_POCTAL422 0x0408
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
+ #define PCI_VENDOR_ID_ADVANTECH 0x13fe
+ #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
+@@ -3168,8 +3170,11 @@ static struct pci_device_id serial_pci_tbl[] = {
+ * For now just used the hex ID 0x950a.
+ */
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+- PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0,
+- pbn_b0_2_115200 },
++ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00,
++ 0, 0, pbn_b0_2_115200 },
++ { PCI_VENDOR_ID_OXSEMI, 0x950a,
++ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30,
++ 0, 0, pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_2_1130000 },
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 6da8cf8..fe9f111 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1627,13 +1627,26 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ old_cr &= ~ST_UART011_CR_OVSFACT;
+ }
+
++ /*
++ * Workaround for the ST Micro oversampling variants to
++ * increase the bitrate slightly, by lowering the divisor,
++ * to avoid delayed sampling of start bit at high speeds,
++ * else we see data corruption.
++ */
++ if (uap->vendor->oversampling) {
++ if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
++ quot -= 1;
++ else if ((baud > 3250000) && (quot > 2))
++ quot -= 2;
++ }
+ /* Set baud rate */
+ writew(quot & 0x3f, port->membase + UART011_FBRD);
+ writew(quot >> 6, port->membase + UART011_IBRD);
+
+ /*
+ * ----------v----------v----------v----------v-----
+- * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
++ * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
++ * UART011_FBRD & UART011_IBRD.
+ * ----------^----------^----------^----------^-----
+ */
+ writew(lcr_h, port->membase + uap->lcrh_rx);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index a40ab98..4cddbfc 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1680,6 +1680,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ {
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct dev_info *dev_info, *next;
++ struct xhci_cd *cur_cd, *next_cd;
+ unsigned long flags;
+ int size;
+ int i, j, num_ports;
+@@ -1701,6 +1702,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ }
+
+ for (i = 1; i < MAX_HC_SLOTS; ++i)
+ xhci_free_virt_device(xhci, i);
+@@ -2246,6 +2252,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
++ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index bddcbfc..4ed7572 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -99,6 +99,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
++ xhci->quirks |= XHCI_AVOID_BEI;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index c7c530c..950aef8 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -309,12 +309,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ /* Ring the host controller doorbell after placing a command on the ring */
+ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+ {
++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
++ return;
++
+ xhci_dbg(xhci, "// Ding dong!\n");
+ xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
+ /* Flush PCI posted writes */
+ xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ }
+
++static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
++{
++ u64 temp_64;
++ int ret;
++
++ xhci_dbg(xhci, "Abort command ring\n");
++
++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
++ xhci_dbg(xhci, "The command ring isn't running, "
++ "Have the command ring been stopped?\n");
++ return 0;
++ }
++
++ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
++ if (!(temp_64 & CMD_RING_RUNNING)) {
++ xhci_dbg(xhci, "Command ring had been stopped\n");
++ return 0;
++ }
++ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
++ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
++ &xhci->op_regs->cmd_ring);
++
++ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
++ * time the completion od all xHCI commands, including
++ * the Command Abort operation. If software doesn't see
++ * CRR negated in a timely manner (e.g. longer than 5
++ * seconds), then it should assume that the there are
++ * larger problems with the xHC and assert HCRST.
++ */
++ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
++ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
++ if (ret < 0) {
++ xhci_err(xhci, "Stopped the command ring failed, "
++ "maybe the host is dead\n");
++ xhci->xhc_state |= XHCI_STATE_DYING;
++ xhci_quiesce(xhci);
++ xhci_halt(xhci);
++ return -ESHUTDOWN;
++ }
++
++ return 0;
++}
++
++static int xhci_queue_cd(struct xhci_hcd *xhci,
++ struct xhci_command *command,
++ union xhci_trb *cmd_trb)
++{
++ struct xhci_cd *cd;
++ cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
++ if (!cd)
++ return -ENOMEM;
++ INIT_LIST_HEAD(&cd->cancel_cmd_list);
++
++ cd->command = command;
++ cd->cmd_trb = cmd_trb;
++ list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
++
++ return 0;
++}
++
++/*
++ * Cancel the command which has issue.
++ *
++ * Some commands may hang due to waiting for acknowledgement from
++ * usb device. It is outside of the xHC's ability to control and
++ * will cause the command ring is blocked. When it occurs software
++ * should intervene to recover the command ring.
++ * See Section 4.6.1.1 and 4.6.1.2
++ */
++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
++ union xhci_trb *cmd_trb)
++{
++ int retval = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xhci->lock, flags);
++
++ if (xhci->xhc_state & XHCI_STATE_DYING) {
++ xhci_warn(xhci, "Abort the command ring,"
++ " but the xHCI is dead.\n");
++ retval = -ESHUTDOWN;
++ goto fail;
++ }
++
++ /* queue the cmd desriptor to cancel_cmd_list */
++ retval = xhci_queue_cd(xhci, command, cmd_trb);
++ if (retval) {
++ xhci_warn(xhci, "Queuing command descriptor failed.\n");
++ goto fail;
++ }
++
++ /* abort command ring */
++ retval = xhci_abort_cmd_ring(xhci);
++ if (retval) {
++ xhci_err(xhci, "Abort command ring failed\n");
++ if (unlikely(retval == -ESHUTDOWN)) {
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
++ xhci_dbg(xhci, "xHCI host controller is dead.\n");
++ return retval;
++ }
++ }
++
++fail:
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ return retval;
++}
++
+ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index,
+@@ -1043,6 +1154,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
+ }
+ }
+
++/* Complete the command and detele it from the devcie's command queue.
++ */
++static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
++ struct xhci_command *command, u32 status)
++{
++ command->status = status;
++ list_del(&command->cmd_list);
++ if (command->completion)
++ complete(command->completion);
++ else
++ xhci_free_command(xhci, command);
++}
++
++
+ /* Check to see if a command in the device's command queue matches this one.
+ * Signal the completion or free the command, and return 1. Return 0 if the
+ * completed command isn't at the head of the command list.
+@@ -1061,15 +1186,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+ if (xhci->cmd_ring->dequeue != command->command_trb)
+ return 0;
+
+- command->status = GET_COMP_CODE(le32_to_cpu(event->status));
+- list_del(&command->cmd_list);
+- if (command->completion)
+- complete(command->completion);
+- else
+- xhci_free_command(xhci, command);
++ xhci_complete_cmd_in_cmd_wait_list(xhci, command,
++ GET_COMP_CODE(le32_to_cpu(event->status)));
+ return 1;
+ }
+
++/*
++ * Finding the command trb need to be cancelled and modifying it to
++ * NO OP command. And if the command is in device's command wait
++ * list, finishing and freeing it.
++ *
++ * If we can't find the command trb, we think it had already been
++ * executed.
++ */
++static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
++{
++ struct xhci_segment *cur_seg;
++ union xhci_trb *cmd_trb;
++ u32 cycle_state;
++
++ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
++ return;
++
++ /* find the current segment of command ring */
++ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
++ xhci->cmd_ring->dequeue, &cycle_state);
++
++ /* find the command trb matched by cd from command ring */
++ for (cmd_trb = xhci->cmd_ring->dequeue;
++ cmd_trb != xhci->cmd_ring->enqueue;
++ next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
++ /* If the trb is link trb, continue */
++ if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
++ continue;
++
++ if (cur_cd->cmd_trb == cmd_trb) {
++
++ /* If the command in device's command list, we should
++ * finish it and free the command structure.
++ */
++ if (cur_cd->command)
++ xhci_complete_cmd_in_cmd_wait_list(xhci,
++ cur_cd->command, COMP_CMD_STOP);
++
++ /* get cycle state from the origin command trb */
++ cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
++ & TRB_CYCLE;
++
++ /* modify the command trb to NO OP command */
++ cmd_trb->generic.field[0] = 0;
++ cmd_trb->generic.field[1] = 0;
++ cmd_trb->generic.field[2] = 0;
++ cmd_trb->generic.field[3] = cpu_to_le32(
++ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
++ break;
++ }
++ }
++}
++
++static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
++{
++ struct xhci_cd *cur_cd, *next_cd;
++
++ if (list_empty(&xhci->cancel_cmd_list))
++ return;
++
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ xhci_cmd_to_noop(xhci, cur_cd);
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ }
++}
++
++/*
++ * traversing the cancel_cmd_list. If the command descriptor according
++ * to cmd_trb is found, the function free it and return 1, otherwise
++ * return 0.
++ */
++static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
++ union xhci_trb *cmd_trb)
++{
++ struct xhci_cd *cur_cd, *next_cd;
++
++ if (list_empty(&xhci->cancel_cmd_list))
++ return 0;
++
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ if (cur_cd->cmd_trb == cmd_trb) {
++ if (cur_cd->command)
++ xhci_complete_cmd_in_cmd_wait_list(xhci,
++ cur_cd->command, COMP_CMD_STOP);
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
++ * trb pointed by the command ring dequeue pointer is the trb we want to
++ * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
++ * traverse the cancel_cmd_list to trun the all of the commands according
++ * to command descriptor to NO-OP trb.
++ */
++static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
++ int cmd_trb_comp_code)
++{
++ int cur_trb_is_good = 0;
++
++ /* Searching the cmd trb pointed by the command ring dequeue
++ * pointer in command descriptor list. If it is found, free it.
++ */
++ cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
++ xhci->cmd_ring->dequeue);
++
++ if (cmd_trb_comp_code == COMP_CMD_ABORT)
++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
++ else if (cmd_trb_comp_code == COMP_CMD_STOP) {
++ /* traversing the cancel_cmd_list and canceling
++ * the command according to command descriptor
++ */
++ xhci_cancel_cmd_in_cd_list(xhci);
++
++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
++ /*
++ * ring command ring doorbell again to restart the
++ * command ring
++ */
++ if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
++ xhci_ring_cmd_db(xhci);
++ }
++ return cur_trb_is_good;
++}
++
+ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+ {
+@@ -1095,6 +1349,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
++
++ if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
++ (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
++ /* If the return value is 0, we think the trb pointed by
++ * command ring dequeue pointer is a good trb. The good
++ * trb means we don't want to cancel the trb, but it have
++ * been stopped by host. So we should handle it normally.
++ * Otherwise, driver should invoke inc_deq() and return.
++ */
++ if (handle_stopped_cmd_ring(xhci,
++ GET_COMP_CODE(le32_to_cpu(event->status)))) {
++ inc_deq(xhci, xhci->cmd_ring, false);
++ return;
++ }
++ }
++
+ switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
+ & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_ENABLE_SLOT):
+@@ -3356,7 +3626,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ } else {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+- if (xhci->hci_version == 0x100) {
++ if (xhci->hci_version == 0x100 &&
++ !(xhci->quirks &
++ XHCI_AVOID_BEI)) {
+ /* Set BEI bit except for the last td */
+ if (i < num_tds - 1)
+ field |= TRB_BEI;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 09872ee..f5c0f38 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -52,7 +52,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+-static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+ {
+ u32 result;
+@@ -105,8 +105,12 @@ int xhci_halt(struct xhci_hcd *xhci)
+
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+- if (!ret)
++ if (!ret) {
+ xhci->xhc_state |= XHCI_STATE_HALTED;
++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
++ } else
++ xhci_warn(xhci, "Host not halted after %u microseconds.\n",
++ XHCI_MAX_HALT_USEC);
+ return ret;
+ }
+
+@@ -459,6 +463,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+
+ dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
++ if (!dmi_product_name || !dmi_sys_vendor)
++ return false;
+
+ if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+ return false;
+@@ -570,6 +576,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
+ return -ENODEV;
+ }
+ xhci->shared_hcd->state = HC_STATE_RUNNING;
++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_ring_cmd_db(xhci);
+@@ -874,7 +881,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command &= ~CMD_RUN;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_HALT, STS_HALT, 100*100)) {
++ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
+ xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+@@ -2506,6 +2513,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ struct completion *cmd_completion;
+ u32 *cmd_status;
+ struct xhci_virt_device *virt_dev;
++ union xhci_trb *cmd_trb;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev = xhci->devs[udev->slot_id];
+@@ -2551,6 +2559,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ }
+ init_completion(cmd_completion);
+
++ cmd_trb = xhci->cmd_ring->dequeue;
+ if (!ctx_change)
+ ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
+ udev->slot_id, must_succeed);
+@@ -2572,14 +2581,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ cmd_completion,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for %s command\n",
+ timeleft == 0 ? "Timeout" : "Signal",
+ ctx_change == 0 ?
+ "configure endpoint" :
+ "evaluate context");
+- /* FIXME cancel the configure endpoint command */
++ /* cancel the configure endpoint command */
++ ret = xhci_cancel_cmd(xhci, command, cmd_trb);
++ if (ret < 0)
++ return ret;
+ return -ETIME;
+ }
+
+@@ -3528,8 +3540,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ unsigned long flags;
+ int timeleft;
+ int ret;
++ union xhci_trb *cmd_trb;
+
+ spin_lock_irqsave(&xhci->lock, flags);
++ cmd_trb = xhci->cmd_ring->dequeue;
+ ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -3541,12 +3555,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+
+ /* XXX: how much time for xHC slot assignment? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+- /* FIXME cancel the enable slot request */
+- return 0;
++ /* cancel the enable slot request */
++ return xhci_cancel_cmd(xhci, NULL, cmd_trb);
+ }
+
+ if (!xhci->slot_id) {
+@@ -3607,6 +3621,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u64 temp_64;
++ union xhci_trb *cmd_trb;
+
+ if (!udev->slot_id) {
+ xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+@@ -3645,6 +3660,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+
+ spin_lock_irqsave(&xhci->lock, flags);
++ cmd_trb = xhci->cmd_ring->dequeue;
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
+ udev->slot_id);
+ if (ret) {
+@@ -3657,7 +3673,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+
+ /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ /* FIXME: From section 4.3.4: "Software shall be responsible for timing
+ * the SetAddress() "recovery interval" required by USB and aborting the
+ * command on a timeout.
+@@ -3665,7 +3681,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for address device command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+- /* FIXME cancel the address device command */
++ /* cancel the address device command */
++ ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
++ if (ret < 0)
++ return ret;
+ return -ETIME;
+ }
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 44d518a..cc368c2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1255,6 +1255,16 @@ struct xhci_td {
+ union xhci_trb *last_trb;
+ };
+
++/* xHCI command default timeout value */
++#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
++
++/* command descriptor */
++struct xhci_cd {
++ struct list_head cancel_cmd_list;
++ struct xhci_command *command;
++ union xhci_trb *cmd_trb;
++};
++
+ struct xhci_dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+@@ -1402,6 +1412,11 @@ struct xhci_hcd {
+ /* data structures */
+ struct xhci_device_context_array *dcbaa;
+ struct xhci_ring *cmd_ring;
++ unsigned int cmd_ring_state;
++#define CMD_RING_STATE_RUNNING (1 << 0)
++#define CMD_RING_STATE_ABORTED (1 << 1)
++#define CMD_RING_STATE_STOPPED (1 << 2)
++ struct list_head cancel_cmd_list;
+ unsigned int cmd_ring_reserved_trbs;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+@@ -1473,6 +1488,7 @@ struct xhci_hcd {
+ #define XHCI_TRUST_TX_LENGTH (1 << 10)
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
++#define XHCI_AVOID_BEI (1 << 15)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+@@ -1666,6 +1682,8 @@ static inline void xhci_unregister_pci(void) {}
+
+ /* xHCI host controller glue */
+ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
++ u32 mask, u32 done, int usec);
+ void xhci_quiesce(struct xhci_hcd *xhci);
+ int xhci_halt(struct xhci_hcd *xhci);
+ int xhci_reset(struct xhci_hcd *xhci);
+@@ -1756,6 +1774,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state);
+ void xhci_stop_endpoint_command_watchdog(unsigned long arg);
++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
++ union xhci_trb *cmd_trb);
+ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, unsigned int stream_id);
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 7324bea..e29a664 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -584,6 +584,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /*
+ * ELV devices:
+ */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 06f6fd2..7b5eb74 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -517,6 +517,11 @@
+ */
+ #define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
+
++/*
++ * TIAO product ids (FTDI_VID)
++ * http://www.tiaowiki.com/w/Main_Page
++ */
++#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
+
+
+ /********************************/
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c068b4d..3fd4e6f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -870,7 +870,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index a348198..87271e3 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -36,8 +36,6 @@
+ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+ #define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+ #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+-#define PANTECH_PRODUCT_UML190_VZW 0x3716
+-#define PANTECH_PRODUCT_UML290_VZW 0x3718
+
+ /* CMOTECH devices */
+ #define CMOTECH_VENDOR_ID 0x16d8
+@@ -68,11 +66,9 @@ static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index f55ae23..790fa63 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ status = autofs4_mount_wait(dentry);
+- if (status)
+- return ERR_PTR(status);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
++ if (status) {
++ spin_unlock(&sbi->fs_lock);
++ return ERR_PTR(status);
++ }
+ }
+ done:
+ if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 6ff96c6..8dd615c 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1668,30 +1668,19 @@ static int elf_note_info_init(struct elf_note_info *info)
+ return 0;
+ info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
+ if (!info->psinfo)
+- goto notes_free;
++ return 0;
+ info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
+ if (!info->prstatus)
+- goto psinfo_free;
++ return 0;
+ info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
+ if (!info->fpu)
+- goto prstatus_free;
++ return 0;
+ #ifdef ELF_CORE_COPY_XFPREGS
+ info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
+ if (!info->xfpu)
+- goto fpu_free;
++ return 0;
+ #endif
+ return 1;
+-#ifdef ELF_CORE_COPY_XFPREGS
+- fpu_free:
+- kfree(info->fpu);
+-#endif
+- prstatus_free:
+- kfree(info->prstatus);
+- psinfo_free:
+- kfree(info->psinfo);
+- notes_free:
+- kfree(info->notes);
+- return 0;
+ }
+
+ static int fill_note_info(struct elfhdr *elf, int phdrs,
+diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
+index a9f29b1..2262a77 100644
+--- a/fs/ecryptfs/ecryptfs_kernel.h
++++ b/fs/ecryptfs/ecryptfs_kernel.h
+@@ -559,6 +559,8 @@ struct ecryptfs_open_req {
+ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+ struct super_block *sb);
+ void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
++int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
++ struct inode *ecryptfs_inode);
+ int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
+ size_t *decrypted_name_size,
+ struct dentry *ecryptfs_dentry,
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index d3f95f9..841f24f 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -139,29 +139,50 @@ out:
+ return rc;
+ }
+
+-static void ecryptfs_vma_close(struct vm_area_struct *vma)
+-{
+- filemap_write_and_wait(vma->vm_file->f_mapping);
+-}
+-
+-static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+- .close = ecryptfs_vma_close,
+- .fault = filemap_fault,
+-};
++struct kmem_cache *ecryptfs_file_info_cache;
+
+-static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
++static int read_or_initialize_metadata(struct dentry *dentry)
+ {
++ struct inode *inode = dentry->d_inode;
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
++ struct ecryptfs_crypt_stat *crypt_stat;
+ int rc;
+
+- rc = generic_file_mmap(file, vma);
++ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
++ mount_crypt_stat = &ecryptfs_superblock_to_private(
++ inode->i_sb)->mount_crypt_stat;
++ mutex_lock(&crypt_stat->cs_mutex);
++
++ if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED &&
++ crypt_stat->flags & ECRYPTFS_KEY_VALID) {
++ rc = 0;
++ goto out;
++ }
++
++ rc = ecryptfs_read_metadata(dentry);
+ if (!rc)
+- vma->vm_ops = &ecryptfs_file_vm_ops;
++ goto out;
++
++ if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) {
++ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
++ | ECRYPTFS_ENCRYPTED);
++ rc = 0;
++ goto out;
++ }
++
++ if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) &&
++ !i_size_read(ecryptfs_inode_to_lower(inode))) {
++ rc = ecryptfs_initialize_file(dentry, inode);
++ if (!rc)
++ goto out;
++ }
+
++ rc = -EIO;
++out:
++ mutex_unlock(&crypt_stat->cs_mutex);
+ return rc;
+ }
+
+-struct kmem_cache *ecryptfs_file_info_cache;
+-
+ /**
+ * ecryptfs_open
+ * @inode: inode speciying file to open
+@@ -237,32 +258,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ rc = 0;
+ goto out;
+ }
+- mutex_lock(&crypt_stat->cs_mutex);
+- if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
+- || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
+- rc = ecryptfs_read_metadata(ecryptfs_dentry);
+- if (rc) {
+- ecryptfs_printk(KERN_DEBUG,
+- "Valid headers not found\n");
+- if (!(mount_crypt_stat->flags
+- & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
+- rc = -EIO;
+- printk(KERN_WARNING "Either the lower file "
+- "is not in a valid eCryptfs format, "
+- "or the key could not be retrieved. "
+- "Plaintext passthrough mode is not "
+- "enabled; returning -EIO\n");
+- mutex_unlock(&crypt_stat->cs_mutex);
+- goto out_put;
+- }
+- rc = 0;
+- crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+- | ECRYPTFS_ENCRYPTED);
+- mutex_unlock(&crypt_stat->cs_mutex);
+- goto out;
+- }
+- }
+- mutex_unlock(&crypt_stat->cs_mutex);
++ rc = read_or_initialize_metadata(ecryptfs_dentry);
++ if (rc)
++ goto out_put;
+ ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
+ "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
+ (unsigned long long)i_size_read(inode));
+@@ -278,8 +276,14 @@ out:
+
+ static int ecryptfs_flush(struct file *file, fl_owner_t td)
+ {
+- return file->f_mode & FMODE_WRITE
+- ? filemap_write_and_wait(file->f_mapping) : 0;
++ struct file *lower_file = ecryptfs_file_to_lower(file);
++
++ if (lower_file->f_op && lower_file->f_op->flush) {
++ filemap_write_and_wait(file->f_mapping);
++ return lower_file->f_op->flush(lower_file, td);
++ }
++
++ return 0;
+ }
+
+ static int ecryptfs_release(struct inode *inode, struct file *file)
+@@ -293,15 +297,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
+ static int
+ ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+- int rc = 0;
+-
+- rc = generic_file_fsync(file, start, end, datasync);
+- if (rc)
+- goto out;
+- rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end,
+- datasync);
+-out:
+- return rc;
++ return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+ }
+
+ static int ecryptfs_fasync(int fd, struct file *file, int flag)
+@@ -370,7 +366,7 @@ const struct file_operations ecryptfs_main_fops = {
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+- .mmap = ecryptfs_file_mmap,
++ .mmap = generic_file_mmap,
+ .open = ecryptfs_open,
+ .flush = ecryptfs_flush,
+ .release = ecryptfs_release,
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 7c7556b..a9be90d 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -161,6 +161,31 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
+ return vfs_create(lower_dir_inode, lower_dentry, mode, NULL);
+ }
+
++static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
++ struct inode *inode)
++{
++ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
++ struct dentry *lower_dir_dentry;
++ int rc;
++
++ dget(lower_dentry);
++ lower_dir_dentry = lock_parent(lower_dentry);
++ rc = vfs_unlink(lower_dir_inode, lower_dentry);
++ if (rc) {
++ printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
++ goto out_unlock;
++ }
++ fsstack_copy_attr_times(dir, lower_dir_inode);
++ set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
++ inode->i_ctime = dir->i_ctime;
++ d_drop(dentry);
++out_unlock:
++ unlock_dir(lower_dir_dentry);
++ dput(lower_dentry);
++ return rc;
++}
++
+ /**
+ * ecryptfs_do_create
+ * @directory_inode: inode of the new file's dentry's parent in ecryptfs
+@@ -201,8 +226,10 @@ ecryptfs_do_create(struct inode *directory_inode,
+ }
+ inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+ directory_inode->i_sb);
+- if (IS_ERR(inode))
++ if (IS_ERR(inode)) {
++ vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
+ goto out_lock;
++ }
+ fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
+ fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
+ out_lock:
+@@ -219,8 +246,8 @@ out:
+ *
+ * Returns zero on success
+ */
+-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+- struct inode *ecryptfs_inode)
++int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
++ struct inode *ecryptfs_inode)
+ {
+ struct ecryptfs_crypt_stat *crypt_stat =
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+@@ -284,7 +311,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
+ * that this on disk file is prepared to be an ecryptfs file */
+ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
+ if (rc) {
+- drop_nlink(ecryptfs_inode);
++ ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
++ ecryptfs_inode);
++ make_bad_inode(ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
+ iput(ecryptfs_inode);
+ goto out;
+@@ -496,27 +525,7 @@ out_lock:
+
+ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
+ {
+- int rc = 0;
+- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
+- struct dentry *lower_dir_dentry;
+-
+- dget(lower_dentry);
+- lower_dir_dentry = lock_parent(lower_dentry);
+- rc = vfs_unlink(lower_dir_inode, lower_dentry);
+- if (rc) {
+- printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
+- goto out_unlock;
+- }
+- fsstack_copy_attr_times(dir, lower_dir_inode);
+- set_nlink(dentry->d_inode,
+- ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink);
+- dentry->d_inode->i_ctime = dir->i_ctime;
+- d_drop(dentry);
+-out_unlock:
+- unlock_dir(lower_dir_dentry);
+- dput(lower_dentry);
+- return rc;
++ return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
+ }
+
+ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
+@@ -1026,12 +1035,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+ goto out;
+ }
+
+- if (S_ISREG(inode->i_mode)) {
+- rc = filemap_write_and_wait(inode->i_mapping);
+- if (rc)
+- goto out;
+- fsstack_copy_attr_all(inode, lower_inode);
+- }
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ if (ia->ia_valid & ATTR_FILE)
+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index b4a6bef..1cfef9f 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode)
+ inode_info = ecryptfs_inode_to_private(inode);
+ if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
+ &inode_info->lower_file_mutex)) {
++ filemap_write_and_wait(inode->i_mapping);
+ fput(inode_info->lower_file);
+ inode_info->lower_file = NULL;
+ mutex_unlock(&inode_info->lower_file_mutex);
+diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
+index 6a44148..93a998a 100644
+--- a/fs/ecryptfs/mmap.c
++++ b/fs/ecryptfs/mmap.c
+@@ -62,18 +62,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ int rc;
+
+- /*
+- * Refuse to write the page out if we are called from reclaim context
+- * since our writepage() path may potentially allocate memory when
+- * calling into the lower fs vfs_write() which may in turn invoke
+- * us again.
+- */
+- if (current->flags & PF_MEMALLOC) {
+- redirty_page_for_writepage(wbc, page);
+- rc = 0;
+- goto out;
+- }
+-
+ rc = ecryptfs_encrypt_page(page);
+ if (rc) {
+ ecryptfs_printk(KERN_WARNING, "Error encrypting "
+@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
+ struct ecryptfs_crypt_stat *crypt_stat =
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+ int rc;
+- int need_unlock_page = 1;
+
+ ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
+ "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
+@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
+ "zeros in page with index = [0x%.16lx]\n", index);
+ goto out;
+ }
+- set_page_dirty(page);
+- unlock_page(page);
+- need_unlock_page = 0;
++ rc = ecryptfs_encrypt_page(page);
++ if (rc) {
++ ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
++ "index [0x%.16lx])\n", index);
++ goto out;
++ }
+ if (pos + copied > i_size_read(ecryptfs_inode)) {
+ i_size_write(ecryptfs_inode, pos + copied);
+ ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
+ "[0x%.16llx]\n",
+ (unsigned long long)i_size_read(ecryptfs_inode));
+- balance_dirty_pages_ratelimited(mapping);
+- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+- if (rc) {
+- printk(KERN_ERR "Error writing inode size to metadata; "
+- "rc = [%d]\n", rc);
+- goto out;
+- }
+ }
+- rc = copied;
++ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
++ if (rc)
++ printk(KERN_ERR "Error writing inode size to metadata; "
++ "rc = [%d]\n", rc);
++ else
++ rc = copied;
+ out:
+- if (need_unlock_page)
+- unlock_page(page);
++ unlock_page(page);
+ page_cache_release(page);
+ return rc;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8b01f9f..bac2330 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2382,6 +2382,16 @@ static int ext4_nonda_switch(struct super_block *sb)
+ free_blocks = EXT4_C2B(sbi,
+ percpu_counter_read_positive(&sbi->s_freeclusters_counter));
+ dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
++ /*
++ * Start pushing delalloc when 1/2 of free blocks are dirty.
++ */
++ if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
++ !writeback_in_progress(sb->s_bdi) &&
++ down_read_trylock(&sb->s_umount)) {
++ writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
++ up_read(&sb->s_umount);
++ }
++
+ if (2 * free_blocks < 3 * dirty_blocks ||
+ free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
+ /*
+@@ -2390,13 +2400,6 @@ static int ext4_nonda_switch(struct super_block *sb)
+ */
+ return 1;
+ }
+- /*
+- * Even if we don't switch but are nearing capacity,
+- * start pushing delalloc when 1/2 of free blocks are dirty.
+- */
+- if (free_blocks < 2 * dirty_blocks)
+- writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
+-
+ return 0;
+ }
+
+@@ -4004,6 +4007,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct buffer_head *bh = iloc->bh;
+ int err = 0, rc, block;
++ int need_datasync = 0;
+
+ /* For fields not not tracking in the in-memory inode,
+ * initialise them to zero for new inodes. */
+@@ -4052,7 +4056,10 @@ static int ext4_do_update_inode(handle_t *handle,
+ raw_inode->i_file_acl_high =
+ cpu_to_le16(ei->i_file_acl >> 32);
+ raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
+- ext4_isize_set(raw_inode, ei->i_disksize);
++ if (ei->i_disksize != ext4_isize(raw_inode)) {
++ ext4_isize_set(raw_inode, ei->i_disksize);
++ need_datasync = 1;
++ }
+ if (ei->i_disksize > 0x7fffffffULL) {
+ struct super_block *sb = inode->i_sb;
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+@@ -4105,7 +4112,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ err = rc;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
+
+- ext4_update_inode_fsync_trans(handle, inode, 0);
++ ext4_update_inode_fsync_trans(handle, inode, need_datasync);
+ out_brelse:
+ brelse(bh);
+ ext4_std_error(inode->i_sb, err);
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index c5826c6..e2016f3 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
+ }
+
+ /**
+- * mext_check_null_inode - NULL check for two inodes
+- *
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+- */
+-static int
+-mext_check_null_inode(struct inode *inode1, struct inode *inode2,
+- const char *function, unsigned int line)
+-{
+- int ret = 0;
+-
+- if (inode1 == NULL) {
+- __ext4_error(inode2->i_sb, function, line,
+- "Both inodes should not be NULL: "
+- "inode1 NULL inode2 %lu", inode2->i_ino);
+- ret = -EIO;
+- } else if (inode2 == NULL) {
+- __ext4_error(inode1->i_sb, function, line,
+- "Both inodes should not be NULL: "
+- "inode1 %lu inode2 NULL", inode1->i_ino);
+- ret = -EIO;
+- }
+- return ret;
+-}
+-
+-/**
+ * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
+ *
+- * @orig_inode: original inode structure
+- * @donor_inode: donor inode structure
+- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
+- * i_ino order.
++ * Acquire write lock of i_data_sem of the two inodes
+ */
+ static void
+-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
++double_down_write_data_sem(struct inode *first, struct inode *second)
+ {
+- struct inode *first = orig_inode, *second = donor_inode;
++ if (first < second) {
++ down_write(&EXT4_I(first)->i_data_sem);
++ down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
++ } else {
++ down_write(&EXT4_I(second)->i_data_sem);
++ down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+
+- /*
+- * Use the inode number to provide the stable locking order instead
+- * of its address, because the C language doesn't guarantee you can
+- * compare pointers that don't come from the same array.
+- */
+- if (donor_inode->i_ino < orig_inode->i_ino) {
+- first = donor_inode;
+- second = orig_inode;
+ }
+-
+- down_write(&EXT4_I(first)->i_data_sem);
+- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ }
+
+ /**
+@@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode,
+ return -EINVAL;
+ }
+
+- /* Files should be in the same ext4 FS */
+- if (orig_inode->i_sb != donor_inode->i_sb) {
+- ext4_debug("ext4 move extent: The argument files "
+- "should be in same FS [ino:orig %lu, donor %lu]\n",
+- orig_inode->i_ino, donor_inode->i_ino);
+- return -EINVAL;
+- }
+-
+ /* Ext4 move extent supports only extent based file */
+ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+ ext4_debug("ext4 move extent: orig file is not extents "
+@@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode,
+ * @inode1: the inode structure
+ * @inode2: the inode structure
+ *
+- * Lock two inodes' i_mutex by i_ino order.
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
++ * Lock two inodes' i_mutex
+ */
+-static int
++static void
+ mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
+ {
+- int ret = 0;
+-
+- BUG_ON(inode1 == NULL && inode2 == NULL);
+-
+- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
+- if (ret < 0)
+- goto out;
+-
+- if (inode1 == inode2) {
+- mutex_lock(&inode1->i_mutex);
+- goto out;
+- }
+-
+- if (inode1->i_ino < inode2->i_ino) {
++ BUG_ON(inode1 == inode2);
++ if (inode1 < inode2) {
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+ } else {
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
+ }
+-
+-out:
+- return ret;
+ }
+
+ /**
+@@ -1109,28 +1051,13 @@ out:
+ * @inode1: the inode that is released first
+ * @inode2: the inode that is released second
+ *
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+ */
+
+-static int
++static void
+ mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
+ {
+- int ret = 0;
+-
+- BUG_ON(inode1 == NULL && inode2 == NULL);
+-
+- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
+- if (ret < 0)
+- goto out;
+-
+- if (inode1)
+- mutex_unlock(&inode1->i_mutex);
+-
+- if (inode2 && inode2 != inode1)
+- mutex_unlock(&inode2->i_mutex);
+-
+-out:
+- return ret;
++ mutex_unlock(&inode1->i_mutex);
++ mutex_unlock(&inode2->i_mutex);
+ }
+
+ /**
+@@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
+ ext4_lblk_t rest_blocks;
+ pgoff_t orig_page_offset = 0, seq_end_page;
+- int ret1, ret2, depth, last_extent = 0;
++ int ret, depth, last_extent = 0;
+ int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ int data_offset_in_page;
+ int block_len_in_page;
+ int uninit;
+
+- /* orig and donor should be different file */
+- if (orig_inode->i_ino == donor_inode->i_ino) {
++ if (orig_inode->i_sb != donor_inode->i_sb) {
++ ext4_debug("ext4 move extent: The argument files "
++ "should be in same FS [ino:orig %lu, donor %lu]\n",
++ orig_inode->i_ino, donor_inode->i_ino);
++ return -EINVAL;
++ }
++
++ /* orig and donor should be different inodes */
++ if (orig_inode == donor_inode) {
+ ext4_debug("ext4 move extent: The argument files should not "
+- "be same file [ino:orig %lu, donor %lu]\n",
++ "be same inode [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+@@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+-
++ /* TODO: This is non obvious task to swap blocks for inodes with full
++ jornaling enabled */
++ if (ext4_should_journal_data(orig_inode) ||
++ ext4_should_journal_data(donor_inode)) {
++ return -EINVAL;
++ }
+ /* Protect orig and donor inodes against a truncate */
+- ret1 = mext_inode_double_lock(orig_inode, donor_inode);
+- if (ret1 < 0)
+- return ret1;
++ mext_inode_double_lock(orig_inode, donor_inode);
+
+ /* Protect extent tree against block allocations via delalloc */
+ double_down_write_data_sem(orig_inode, donor_inode);
+ /* Check the filesystem environment whether move_extent can be done */
+- ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
++ ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
+ donor_start, &len);
+- if (ret1)
++ if (ret)
+ goto out;
+
+ file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
+@@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ if (file_end < block_end)
+ len -= block_end - file_end;
+
+- ret1 = get_ext_path(orig_inode, block_start, &orig_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, block_start, &orig_path);
++ if (ret)
+ goto out;
+
+ /* Get path structure to check the hole */
+- ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, block_start, &holecheck_path);
++ if (ret)
+ goto out;
+
+ depth = ext_depth(orig_inode);
+@@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ last_extent = mext_next_extent(orig_inode,
+ holecheck_path, &ext_cur);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ goto out;
+ }
+ last_extent = mext_next_extent(orig_inode, orig_path,
+ &ext_dummy);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ goto out;
+ }
+ seq_start = le32_to_cpu(ext_cur->ee_block);
+@@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ if (le32_to_cpu(ext_cur->ee_block) > block_end) {
+ ext4_debug("ext4 move extent: The specified range of file "
+ "may be the hole\n");
+- ret1 = -EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ last_extent = mext_next_extent(orig_inode, holecheck_path,
+ &ext_cur);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ break;
+ }
+ add_blocks = ext4_ext_get_actual_len(ext_cur);
+@@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ orig_page_offset,
+ data_offset_in_page,
+ block_len_in_page, uninit,
+- &ret1);
++ &ret);
+
+ /* Count how many blocks we have exchanged */
+ *moved_len += block_len_in_page;
+- if (ret1 < 0)
++ if (ret < 0)
+ break;
+ if (*moved_len > len) {
+ EXT4_ERROR_INODE(orig_inode,
+ "We replaced blocks too much! "
+ "sum of replaced: %llu requested: %llu",
+ *moved_len, len);
+- ret1 = -EIO;
++ ret = -EIO;
+ break;
+ }
+
+@@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ }
+
+ double_down_write_data_sem(orig_inode, donor_inode);
+- if (ret1 < 0)
++ if (ret < 0)
+ break;
+
+ /* Decrease buffer counter */
+ if (holecheck_path)
+ ext4_ext_drop_refs(holecheck_path);
+- ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
++ if (ret)
+ break;
+ depth = holecheck_path->p_depth;
+
+ /* Decrease buffer counter */
+ if (orig_path)
+ ext4_ext_drop_refs(orig_path);
+- ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, seq_start, &orig_path);
++ if (ret)
+ break;
+
+ ext_cur = holecheck_path[depth].p_ext;
+@@ -1412,12 +1349,7 @@ out:
+ kfree(holecheck_path);
+ }
+ double_up_write_data_sem(orig_inode, donor_inode);
+- ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
+-
+- if (ret1)
+- return ret1;
+- else if (ret2)
+- return ret2;
++ mext_inode_double_unlock(orig_inode, donor_inode);
+
+- return 0;
++ return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4dd0890..88f97e5 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1801,9 +1801,7 @@ retry:
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, inode->i_mode, rdev);
+-#ifdef CONFIG_EXT4_FS_XATTR
+ inode->i_op = &ext4_special_inode_operations;
+-#endif
+ err = ext4_add_nondir(handle, dentry, inode);
+ }
+ ext4_journal_stop(handle);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 54f5786..13bfa07 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -63,6 +63,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
+ {
+ return test_bit(BDI_writeback_running, &bdi->state);
+ }
++EXPORT_SYMBOL(writeback_in_progress);
+
+ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+ {
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index b09e51d..464cd76 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1032,11 +1032,11 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
+ ops.datbuf = NULL;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+- if (ret || ops.oobretlen != ops.ooblen) {
++ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+ printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
+ " bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
+- if (!ret)
++ if (!ret || mtd_is_bitflip(ret))
+ ret = -EIO;
+ return ret;
+ }
+@@ -1075,11 +1075,11 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
+ ops.datbuf = NULL;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+- if (ret || ops.oobretlen != ops.ooblen) {
++ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+ printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
+ " bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
+- if (!ret)
++ if (!ret || mtd_is_bitflip(ret))
+ ret = -EIO;
+ return ret;
+ }
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 23d7451..df753a1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -40,6 +40,7 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
++ char *nodename;
+ };
+
+ struct nsm_res {
+@@ -93,6 +94,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
++ .nodename = utsname()->nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+@@ -429,7 +431,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, utsname()->nodename);
++ encode_nsm_string(xdr, argp->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index d774309..1aaa0ee 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -164,25 +164,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ return bio;
+ }
+
+-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+- struct parallel_io *par)
++ struct parallel_io *par,
++ unsigned int offset, int len)
+ {
++ isect = isect + (offset >> SECTOR_SHIFT);
++ dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
++ npg, rw, (unsigned long long)isect, offset, len);
+ retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++ if (bio_add_page(bio, page, len, offset) < len) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+ }
+
++static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++ sector_t isect, struct page *page,
++ struct pnfs_block_extent *be,
++ void (*end_io)(struct bio *, int err),
++ struct parallel_io *par)
++{
++ return do_add_page_to_bio(bio, npg, rw, isect, page, be,
++ end_io, par, 0, PAGE_CACHE_SIZE);
++}
++
+ /* This is basically copied from mpage_end_io_read */
+ static void bl_end_io_read(struct bio *bio, int err)
+ {
+@@ -446,6 +460,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+ return;
+ }
+
++static void
++bl_read_single_end_io(struct bio *bio, int error)
++{
++ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++ struct page *page = bvec->bv_page;
++
++ /* Only one page in bvec */
++ unlock_page(page);
++}
++
++static int
++bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int offset, unsigned int len)
++{
++ struct bio *bio;
++ struct page *shadow_page;
++ sector_t isect;
++ char *kaddr, *kshadow_addr;
++ int ret = 0;
++
++ dprintk("%s: offset %u len %u\n", __func__, offset, len);
++
++ shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (shadow_page == NULL)
++ return -ENOMEM;
++
++ bio = bio_alloc(GFP_NOIO, 1);
++ if (bio == NULL)
++ return -ENOMEM;
++
++ isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
++ (offset / SECTOR_SIZE);
++
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = bl_read_single_end_io;
++
++ lock_page(shadow_page);
++ if (bio_add_page(bio, shadow_page,
++ SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
++ unlock_page(shadow_page);
++ bio_put(bio);
++ return -EIO;
++ }
++
++ submit_bio(READ, bio);
++ wait_on_page_locked(shadow_page);
++ if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
++ ret = -EIO;
++ } else {
++ kaddr = kmap_atomic(page);
++ kshadow_addr = kmap_atomic(shadow_page);
++ memcpy(kaddr + offset, kshadow_addr + offset, len);
++ kunmap_atomic(kshadow_addr);
++ kunmap_atomic(kaddr);
++ }
++ __free_page(shadow_page);
++ bio_put(bio);
++
++ return ret;
++}
++
++static int
++bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int dirty_offset, unsigned int dirty_len,
++ bool full_page)
++{
++ int ret = 0;
++ unsigned int start, end;
++
++ if (full_page) {
++ start = 0;
++ end = PAGE_CACHE_SIZE;
++ } else {
++ start = round_down(dirty_offset, SECTOR_SIZE);
++ end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
++ }
++
++ dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
++ if (!be) {
++ zero_user_segments(page, start, dirty_offset,
++ dirty_offset + dirty_len, end);
++ if (start == 0 && end == PAGE_CACHE_SIZE &&
++ trylock_page(page)) {
++ SetPageUptodate(page);
++ unlock_page(page);
++ }
++ return ret;
++ }
++
++ if (start != dirty_offset)
++ ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
++
++ if (!ret && (dirty_offset + dirty_len < end))
++ ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
++ end - dirty_offset - dirty_len);
++
++ return ret;
++}
++
+ /* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+@@ -479,7 +593,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+ SetPageUptodate(page);
+
+ cleanup:
+- bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+@@ -501,6 +614,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+ struct parallel_io *par;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
++ unsigned int pg_offset, pg_len, saved_len;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+@@ -615,10 +729,11 @@ next_page:
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+- isect, NULL);
++ isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ wdata->pnfs_error = -EINVAL;
+ goto out;
+@@ -626,7 +741,26 @@ next_page:
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+- if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
++
++ dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
++ pg_offset = offset & ~PAGE_CACHE_MASK;
++ if (pg_offset + count > PAGE_CACHE_SIZE)
++ pg_len = PAGE_CACHE_SIZE - pg_offset;
++ else
++ pg_len = count;
++
++ saved_len = pg_len;
++ if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
++ !bl_is_sector_init(be->be_inval, isect)) {
++ ret = bl_read_partial_page_sync(pages[i], cow_read,
++ pg_offset, pg_len, true);
++ if (ret) {
++ dprintk("%s bl_read_partial_page_sync fail %d\n",
++ __func__, ret);
++ wdata->pnfs_error = ret;
++ goto out;
++ }
++
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS,
+ NULL);
+@@ -636,15 +770,35 @@ next_page:
+ wdata->pnfs_error = ret;
+ goto out;
+ }
++
++ /* Expand to full page write */
++ pg_offset = 0;
++ pg_len = PAGE_CACHE_SIZE;
++ } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
++ (pg_len & (SECTOR_SIZE - 1))){
++ /* ahh, nasty case. We have to do sync full sector
++ * read-modify-write cycles.
++ */
++ unsigned int saved_offset = pg_offset;
++ ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
++ pg_len, false);
++ pg_offset = round_down(pg_offset, SECTOR_SIZE);
++ pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
++ - pg_offset;
+ }
+- bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
++
++
++ bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ isect, pages[i], be,
+- bl_end_io_write, par);
++ bl_end_io_write, par,
++ pg_offset, pg_len);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
++ offset += saved_len;
++ count -= saved_len;
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+@@ -662,12 +816,10 @@ next_page:
+ }
+
+ write_done:
+- wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+- if (count < wdata->res.count) {
+- wdata->res.count = count;
+- }
++ wdata->res.count = wdata->args.count;
+ out:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
+index 42acf7e..519a9de 100644
+--- a/fs/nfs/blocklayout/blocklayout.h
++++ b/fs/nfs/blocklayout/blocklayout.h
+@@ -40,6 +40,7 @@
+
+ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+ #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+
+ struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 516b7f0..f66439e 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1289,6 +1289,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Partition table too long (%u > %lu)\n", table_len,
+ sb->s_blocksize - sizeof(*lvd));
++ ret = 1;
+ goto out_bh;
+ }
+
+@@ -1333,8 +1334,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ UDF_ID_SPARABLE,
+ strlen(UDF_ID_SPARABLE))) {
+ if (udf_load_sparable_map(sb, map,
+- (struct sparablePartitionMap *)gpm) < 0)
++ (struct sparablePartitionMap *)gpm) < 0) {
++ ret = 1;
+ goto out_bh;
++ }
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_METADATA,
+ strlen(UDF_ID_METADATA))) {
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 7978eec..3e8f2f7 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -188,7 +188,7 @@ struct sp_node {
+
+ struct shared_policy {
+ struct rb_root root;
+- spinlock_t lock;
++ struct mutex mutex;
+ };
+
+ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 67cc215..1874c5e 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1823,7 +1823,6 @@
+ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
+ #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
+ #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
+-#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530
+
+ #define PCI_VENDOR_ID_RADISYS 0x1331
+
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index e5a7b9a..416dcb0 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1353,7 +1353,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ if (!ct || !nf_ct_is_untracked(ct)) {
+- nf_reset(skb);
++ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 6b76d81..a122196 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -292,7 +292,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
+ static int
+ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
+- return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
++ return *rdp->nxttail[RCU_DONE_TAIL +
++ ACCESS_ONCE(rsp->completed) != rdp->completed] &&
++ !rcu_gp_in_progress(rsp);
+ }
+
+ /*
+diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
+index 8b44e7f..85e9da2 100644
+--- a/kernel/sched_stoptask.c
++++ b/kernel/sched_stoptask.c
+@@ -25,8 +25,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
+ {
+ struct task_struct *stop = rq->stop;
+
+- if (stop && stop->on_rq)
++ if (stop && stop->on_rq) {
++ stop->se.exec_start = rq->clock_task;
+ return stop;
++ }
+
+ return NULL;
+ }
+@@ -50,6 +52,21 @@ static void yield_task_stop(struct rq *rq)
+
+ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+ {
++ struct task_struct *curr = rq->curr;
++ u64 delta_exec;
++
++ delta_exec = rq->clock_task - curr->se.exec_start;
++ if (unlikely((s64)delta_exec < 0))
++ delta_exec = 0;
++
++ schedstat_set(curr->se.statistics.exec_max,
++ max(curr->se.statistics.exec_max, delta_exec));
++
++ curr->se.sum_exec_runtime += delta_exec;
++ account_group_exec_runtime(curr, delta_exec);
++
++ curr->se.exec_start = rq->clock_task;
++ cpuacct_charge(curr, delta_exec);
+ }
+
+ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+@@ -58,6 +75,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+
+ static void set_curr_task_stop(struct rq *rq)
+ {
++ struct task_struct *stop = rq->stop;
++
++ stop->se.exec_start = rq->clock_task;
+ }
+
+ static void switched_to_stop(struct rq *rq, struct task_struct *p)
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 481611f..c504302 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
+ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
++ disable_nonboot_cpus();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index b413138..43a19c5 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1726,10 +1726,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
+ *nextp = n;
+ }
+
+-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
++static void cwq_activate_delayed_work(struct work_struct *work)
+ {
+- struct work_struct *work = list_first_entry(&cwq->delayed_works,
+- struct work_struct, entry);
++ struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+ struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
+
+ trace_workqueue_activate_work(work);
+@@ -1738,6 +1737,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+ cwq->nr_active++;
+ }
+
++static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
++{
++ struct work_struct *work = list_first_entry(&cwq->delayed_works,
++ struct work_struct, entry);
++
++ cwq_activate_delayed_work(work);
++}
++
+ /**
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
+ * @cwq: cwq of interest
+@@ -1869,7 +1876,9 @@ __acquires(&gcwq->lock)
+
+ spin_unlock_irq(&gcwq->lock);
+
++ smp_wmb(); /* paired with test_and_set_bit(PENDING) */
+ work_clear_pending(work);
++
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+ trace_workqueue_execute_start(work);
+@@ -2626,6 +2635,18 @@ static int try_to_grab_pending(struct work_struct *work)
+ smp_rmb();
+ if (gcwq == get_work_gcwq(work)) {
+ debug_work_deactivate(work);
++
++ /*
++ * A delayed work item cannot be grabbed directly
++ * because it might have linked NO_COLOR work items
++ * which, if left on the delayed_list, will confuse
++ * cwq->nr_active management later on and cause
++ * stall. Make sure the work item is activated
++ * before grabbing.
++ */
++ if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
++ cwq_activate_delayed_work(work);
++
+ list_del_init(&work->entry);
+ cwq_dec_nr_in_flight(get_work_cwq(work),
+ get_work_color(work),
+diff --git a/lib/gcd.c b/lib/gcd.c
+index f879033..433d89b 100644
+--- a/lib/gcd.c
++++ b/lib/gcd.c
+@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
+
+ if (a < b)
+ swap(a, b);
++
++ if (!b)
++ return a;
+ while ((r = a % b) != 0) {
+ a = b;
+ b = r;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0f897b8..d6c0fdf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2429,8 +2429,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ * from page cache lookup which is in HPAGE_SIZE units.
+ */
+ address = address & huge_page_mask(h);
+- pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+- + (vma->vm_pgoff >> PAGE_SHIFT);
++ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ vma->vm_pgoff;
+ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ /*
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 11b8d47..4c82c21 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -607,24 +607,39 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ return first;
+ }
+
+-/* Apply policy to a single VMA */
+-static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
++/*
++ * Apply policy to a single VMA
++ * This must be called with the mmap_sem held for writing.
++ */
++static int vma_replace_policy(struct vm_area_struct *vma,
++ struct mempolicy *pol)
+ {
+- int err = 0;
+- struct mempolicy *old = vma->vm_policy;
++ int err;
++ struct mempolicy *old;
++ struct mempolicy *new;
+
+ pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff,
+ vma->vm_ops, vma->vm_file,
+ vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+
+- if (vma->vm_ops && vma->vm_ops->set_policy)
++ new = mpol_dup(pol);
++ if (IS_ERR(new))
++ return PTR_ERR(new);
++
++ if (vma->vm_ops && vma->vm_ops->set_policy) {
+ err = vma->vm_ops->set_policy(vma, new);
+- if (!err) {
+- mpol_get(new);
+- vma->vm_policy = new;
+- mpol_put(old);
++ if (err)
++ goto err_out;
+ }
++
++ old = vma->vm_policy;
++ vma->vm_policy = new; /* protected by mmap_sem */
++ mpol_put(old);
++
++ return 0;
++ err_out:
++ mpol_put(new);
+ return err;
+ }
+
+@@ -675,7 +690,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ if (err)
+ goto out;
+ }
+- err = policy_vma(vma, new_pol);
++ err = vma_replace_policy(vma, new_pol);
+ if (err)
+ goto out;
+ }
+@@ -1507,8 +1522,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
+ addr);
+ if (vpol)
+ pol = vpol;
+- } else if (vma->vm_policy)
++ } else if (vma->vm_policy) {
+ pol = vma->vm_policy;
++
++ /*
++ * shmem_alloc_page() passes MPOL_F_SHARED policy with
++ * a pseudo vma whose vma->vm_ops=NULL. Take a reference
++ * count on these policies which will be dropped by
++ * mpol_cond_put() later
++ */
++ if (mpol_needs_cond_ref(pol))
++ mpol_get(pol);
++ }
+ }
+ if (!pol)
+ pol = &default_policy;
+@@ -2032,7 +2057,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+ */
+
+ /* lookup first element intersecting start-end */
+-/* Caller holds sp->lock */
++/* Caller holds sp->mutex */
+ static struct sp_node *
+ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
+ {
+@@ -2096,36 +2121,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+
+ if (!sp->root.rb_node)
+ return NULL;
+- spin_lock(&sp->lock);
++ mutex_lock(&sp->mutex);
+ sn = sp_lookup(sp, idx, idx+1);
+ if (sn) {
+ mpol_get(sn->policy);
+ pol = sn->policy;
+ }
+- spin_unlock(&sp->lock);
++ mutex_unlock(&sp->mutex);
+ return pol;
+ }
+
++static void sp_free(struct sp_node *n)
++{
++ mpol_put(n->policy);
++ kmem_cache_free(sn_cache, n);
++}
++
+ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
+ {
+ pr_debug("deleting %lx-l%lx\n", n->start, n->end);
+ rb_erase(&n->nd, &sp->root);
+- mpol_put(n->policy);
+- kmem_cache_free(sn_cache, n);
++ sp_free(n);
+ }
+
+ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
+ struct mempolicy *pol)
+ {
+- struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
++ struct sp_node *n;
++ struct mempolicy *newpol;
+
++ n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+ if (!n)
+ return NULL;
++
++ newpol = mpol_dup(pol);
++ if (IS_ERR(newpol)) {
++ kmem_cache_free(sn_cache, n);
++ return NULL;
++ }
++ newpol->flags |= MPOL_F_SHARED;
++
+ n->start = start;
+ n->end = end;
+- mpol_get(pol);
+- pol->flags |= MPOL_F_SHARED; /* for unref */
+- n->policy = pol;
++ n->policy = newpol;
++
+ return n;
+ }
+
+@@ -2133,10 +2172,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
+ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
+ unsigned long end, struct sp_node *new)
+ {
+- struct sp_node *n, *new2 = NULL;
++ struct sp_node *n;
++ int ret = 0;
+
+-restart:
+- spin_lock(&sp->lock);
++ mutex_lock(&sp->mutex);
+ n = sp_lookup(sp, start, end);
+ /* Take care of old policies in the same range. */
+ while (n && n->start < end) {
+@@ -2149,16 +2188,14 @@ restart:
+ } else {
+ /* Old policy spanning whole new range. */
+ if (n->end > end) {
++ struct sp_node *new2;
++ new2 = sp_alloc(end, n->end, n->policy);
+ if (!new2) {
+- spin_unlock(&sp->lock);
+- new2 = sp_alloc(end, n->end, n->policy);
+- if (!new2)
+- return -ENOMEM;
+- goto restart;
++ ret = -ENOMEM;
++ goto out;
+ }
+ n->end = start;
+ sp_insert(sp, new2);
+- new2 = NULL;
+ break;
+ } else
+ n->end = start;
+@@ -2169,12 +2206,9 @@ restart:
+ }
+ if (new)
+ sp_insert(sp, new);
+- spin_unlock(&sp->lock);
+- if (new2) {
+- mpol_put(new2->policy);
+- kmem_cache_free(sn_cache, new2);
+- }
+- return 0;
++out:
++ mutex_unlock(&sp->mutex);
++ return ret;
+ }
+
+ /**
+@@ -2192,7 +2226,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
+ int ret;
+
+ sp->root = RB_ROOT; /* empty tree == default mempolicy */
+- spin_lock_init(&sp->lock);
++ mutex_init(&sp->mutex);
+
+ if (mpol) {
+ struct vm_area_struct pvma;
+@@ -2246,7 +2280,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
+ }
+ err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
+ if (err && new)
+- kmem_cache_free(sn_cache, new);
++ sp_free(new);
+ return err;
+ }
+
+@@ -2258,16 +2292,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
+
+ if (!p->root.rb_node)
+ return;
+- spin_lock(&p->lock);
++ mutex_lock(&p->mutex);
+ next = rb_first(&p->root);
+ while (next) {
+ n = rb_entry(next, struct sp_node, nd);
+ next = rb_next(&n->nd);
+- rb_erase(&n->nd, &p->root);
+- mpol_put(n->policy);
+- kmem_cache_free(sn_cache, n);
++ sp_delete(p, n);
+ }
+- spin_unlock(&p->lock);
++ mutex_unlock(&p->mutex);
+ }
+
+ /* assumes fs == KERNEL_DS */
+diff --git a/mm/slab.c b/mm/slab.c
+index cd3ab93..4c3b671 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1669,9 +1669,6 @@ void __init kmem_cache_init_late(void)
+
+ g_cpucache_up = LATE;
+
+- /* Annotate slab for lockdep -- annotate the malloc caches */
+- init_lock_keys();
+-
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+@@ -1679,6 +1676,9 @@ void __init kmem_cache_init_late(void)
+ BUG();
+ mutex_unlock(&cache_chain_mutex);
+
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
+ /* Done! */
+ g_cpucache_up = FULL;
+
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 632b15e..00fb58a 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
+ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
+ return 0;
+
++ clear_page_mlock(page);
++
+ spin_lock_irq(&mapping->tree_lock);
+ if (PageDirty(page))
+ goto failed;
+
+- clear_page_mlock(page);
+ BUG_ON(page_has_private(page));
+ __delete_from_page_cache(page);
+ spin_unlock_irq(&mapping->tree_lock);
+diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+index de9da21..d7d63f4 100644
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -84,6 +84,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ *dataoff = nhoff + (iph->ihl << 2);
+ *protonum = iph->protocol;
+
++ /* Check bogus IP headers */
++ if (*dataoff > skb->len) {
++ pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
++ "nhoff %u, ihl %u, skblen %u\n",
++ nhoff, iph->ihl << 2, skb->len);
++ return -NF_ACCEPT;
++ }
++
+ return NF_ACCEPT;
+ }
+
+diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
+index 78844d9..6609a84 100644
+--- a/net/ipv4/netfilter/nf_nat_sip.c
++++ b/net/ipv4/netfilter/nf_nat_sip.c
+@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
+ hdr, NULL, &matchoff, &matchlen,
+ &addr, &port) > 0) {
+- unsigned int matchend, poff, plen, buflen, n;
++ unsigned int olen, matchend, poff, plen, buflen, n;
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+
+ /* We're only interested in headers related to this
+@@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+ goto next;
+ }
+
++ olen = *datalen;
+ if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
+ &addr, port))
+ return NF_DROP;
+
+- matchend = matchoff + matchlen;
++ matchend = matchoff + matchlen + *datalen - olen;
+
+ /* The maddr= parameter (RFC 2361) specifies where to send
+ * the reply. */
+@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
+ ret = nf_ct_expect_related(rtcp_exp);
+ if (ret == 0)
+ break;
+- else if (ret != -EBUSY) {
++ else if (ret == -EBUSY) {
++ nf_ct_unexpect_related(rtp_exp);
++ continue;
++ } else if (ret < 0) {
+ nf_ct_unexpect_related(rtp_exp);
+ port = 0;
+ break;
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 340c80d..7918eb7 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -366,23 +366,6 @@ static void evict_oldest_expect(struct nf_conn *master,
+ }
+ }
+
+-static inline int refresh_timer(struct nf_conntrack_expect *i)
+-{
+- struct nf_conn_help *master_help = nfct_help(i->master);
+- const struct nf_conntrack_expect_policy *p;
+-
+- if (!del_timer(&i->timeout))
+- return 0;
+-
+- p = &rcu_dereference_protected(
+- master_help->helper,
+- lockdep_is_held(&nf_conntrack_lock)
+- )->expect_policy[i->class];
+- i->timeout.expires = jiffies + p->timeout * HZ;
+- add_timer(&i->timeout);
+- return 1;
+-}
+-
+ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ {
+ const struct nf_conntrack_expect_policy *p;
+@@ -390,7 +373,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ struct nf_conn *master = expect->master;
+ struct nf_conn_help *master_help = nfct_help(master);
+ struct net *net = nf_ct_exp_net(expect);
+- struct hlist_node *n;
++ struct hlist_node *n, *next;
+ unsigned int h;
+ int ret = 1;
+
+@@ -401,12 +384,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ goto out;
+ }
+ h = nf_ct_expect_dst_hash(&expect->tuple);
+- hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
++ hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
+ if (expect_matches(i, expect)) {
+- /* Refresh timer: if it's dying, ignore.. */
+- if (refresh_timer(i)) {
+- ret = 0;
+- goto out;
++ if (del_timer(&i->timeout)) {
++ nf_ct_unlink_expect(i);
++ nf_ct_expect_put(i);
++ break;
+ }
+ } else if (expect_clash(i, expect)) {
+ ret = -EBUSY;
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index dfd52ba..8f3f280 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -389,8 +389,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+
+ /* Precision saver. */
+-static inline u_int32_t
+-user2credits(u_int32_t user)
++static u32 user2credits(u32 user)
+ {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+@@ -400,7 +399,7 @@ user2credits(u_int32_t user)
+ return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
+ }
+
+-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
++static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+ {
+ dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
+ if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
+@@ -531,8 +530,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ dh->rateinfo.prev = jiffies;
+ dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+ hinfo->cfg.burst);
+- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
+- hinfo->cfg.burst);
++ dh->rateinfo.credit_cap = dh->rateinfo.credit;
+ dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ } else {
+ /* update expiration timeout */
+diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
+index 32b7a57..a4c1e45 100644
+--- a/net/netfilter/xt_limit.c
++++ b/net/netfilter/xt_limit.c
+@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ }
+
+ /* Precision saver. */
+-static u_int32_t
+-user2credits(u_int32_t user)
++static u32 user2credits(u32 user)
+ {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+@@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
+
+ /* For SMP, we only want to use one set of state. */
+ r->master = priv;
++ /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
++ 128. */
++ priv->prev = jiffies;
++ priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
+ if (r->cost == 0) {
+- /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
+- 128. */
+- priv->prev = jiffies;
+- priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
+- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
++ r->credit_cap = priv->credit; /* Credits full. */
+ r->cost = user2credits(r->avg);
+ }
+ return 0;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index c5391af..10a385b 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1028,6 +1028,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
++/*
++ * Helper function to force a TCP close if the server is sending
++ * junk and/or it has put us in CLOSE_WAIT
++ */
++static void xs_tcp_force_close(struct rpc_xprt *xprt)
++{
++ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
++ xprt_force_disconnect(xprt);
++}
++
+ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1054,7 +1064,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
+ /* Sanity check of the record length */
+ if (unlikely(transport->tcp_reclen < 8)) {
+ dprintk("RPC: invalid TCP record fragment length\n");
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ return;
+ }
+ dprintk("RPC: reading TCP record fragment of length %d\n",
+@@ -1135,7 +1145,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+- xprt_force_disconnect(&transport->xprt);
++ xs_tcp_force_close(&transport->xprt);
+ }
+ xs_tcp_check_fraghdr(transport);
+ }
+@@ -1458,6 +1468,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
++ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
++ clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
+@@ -1515,8 +1527,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ break;
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+- xprt_force_disconnect(xprt);
+ xprt->connect_cookie++;
++ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+ * If the server closed down the connection, make sure that
+@@ -2159,8 +2171,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ /* We're probably in TIME_WAIT. Get rid of existing socket,
+ * and retry
+ */
+- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ break;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
+index d897278..978416d 100644
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -98,24 +98,24 @@ try-run = $(shell set -e; \
+ # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
+
+ as-option = $(call try-run,\
+- $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
+
+ # as-instr
+ # Usage: cflags-y += $(call as-instr,instr,option1,option2)
+
+ as-instr = $(call try-run,\
+- /bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
++ printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+
+ # cc-option
+ # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+
+ cc-option = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+
+ # cc-option-yn
+ # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
+ cc-option-yn = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+
+ # cc-option-align
+ # Prefix align with either -falign or -malign
+@@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
+ # cc-disable-warning
+ # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
+ cc-disable-warning = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+
+ # cc-version
+ # Usage gcc-ver := $(call cc-version)
+@@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
+ # cc-ldoption
+ # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ cc-ldoption = $(call try-run,\
+- $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+
+ # ld-option
+ # Usage: LDFLAGS += $(call ld-option, -X)
+@@ -209,7 +209,7 @@ endif
+ # >$< substitution to preserve $ when reloading .cmd file
+ # note: when using inline perl scripts [perl -e '...$$t=1;...']
+ # in $(cmd_xxx) double $$ your perl vars
+-make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
++make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
+
+ # Find any prerequisites that is newer than target or that does not exist.
+ # PHONY targets skipped in both cases.
+diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
+index debecb5..7f2126d 100644
+--- a/scripts/gcc-version.sh
++++ b/scripts/gcc-version.sh
+@@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
+ exit 1
+ fi
+
+-MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
+-MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
++MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
++MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
+ if [ "x$with_patchlevel" != "x" ] ; then
+- PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
++ PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
+ printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
+ else
+ printf "%02d%02d\\n" $MAJOR $MINOR
+diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
+index 29493dc..12dbd0b 100644
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index afaec61..973e8c1 100644
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
+index fa59cbf..854d9c7 100755
+--- a/scripts/kconfig/check.sh
++++ b/scripts/kconfig/check.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ # Needed for systems without gettext
+-$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
++$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
+ #include <libintl.h>
+ int main()
+ {
+diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
+index 82cc3a8..50df490 100644
+--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
++++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
+@@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15
+
+ # Check if we can link to ncurses
+ check() {
+- $cc -xc - -o $tmp 2>/dev/null <<'EOF'
++ $cc -x c - -o $tmp 2>/dev/null <<'EOF'
+ #include CURSES_LOC
+ main() {}
+ EOF
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index bccf07dd..3346f42 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -463,6 +463,8 @@ while(<CIN>) {
+ if (defined($configs{$1})) {
+ if ($localyesconfig) {
+ $setconfigs{$1} = 'y';
++ print "$1=y\n";
++ next;
+ } else {
+ $setconfigs{$1} = $2;
+ }
+diff --git a/scripts/package/buildtar b/scripts/package/buildtar
+index 8a7b155..d0d748e 100644
+--- a/scripts/package/buildtar
++++ b/scripts/package/buildtar
+@@ -109,7 +109,7 @@ esac
+ if tar --owner=root --group=root --help >/dev/null 2>&1; then
+ opts="--owner=root --group=root"
+ fi
+- tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
++ tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
+ )
+
+ echo "Tarball successfully created in ${tarball}${file_ext}"
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index d83bafc..193ce81 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -119,6 +119,7 @@ struct loopback_pcm {
+ unsigned int period_size_frac;
+ unsigned long last_jiffies;
+ struct timer_list timer;
++ spinlock_t timer_lock;
+ };
+
+ static struct platform_device *devices[SNDRV_CARDS];
+@@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
+ unsigned long tick;
+ unsigned int rate_shift = get_rate_shift(dpcm);
+
++ spin_lock(&dpcm->timer_lock);
+ if (rate_shift != dpcm->pcm_rate_shift) {
+ dpcm->pcm_rate_shift = rate_shift;
+ dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
+@@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
+ tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
+ dpcm->timer.expires = jiffies + tick;
+ add_timer(&dpcm->timer);
++ spin_unlock(&dpcm->timer_lock);
+ }
+
+ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
+ {
++ spin_lock(&dpcm->timer_lock);
+ del_timer(&dpcm->timer);
+ dpcm->timer.expires = 0;
++ spin_unlock(&dpcm->timer_lock);
+ }
+
+ #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
+@@ -659,6 +664,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ dpcm->substream = substream;
+ setup_timer(&dpcm->timer, loopback_timer_function,
+ (unsigned long)dpcm);
++ spin_lock_init(&dpcm->timer_lock);
+
+ cable = loopback->cables[substream->number][dev];
+ if (!cable) {
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 402f330..94f0c4a 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -139,6 +139,7 @@ struct conexant_spec {
+ unsigned int asus:1;
+ unsigned int pin_eapd_ctrls:1;
+ unsigned int single_adc_amp:1;
++ unsigned int fixup_stereo_dmic:1;
+
+ unsigned int adc_switching:1;
+
+@@ -4113,9 +4114,9 @@ static int cx_auto_init(struct hda_codec *codec)
+
+ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ const char *dir, int cidx,
+- hda_nid_t nid, int hda_dir, int amp_idx)
++ hda_nid_t nid, int hda_dir, int amp_idx, int chs)
+ {
+- static char name[32];
++ static char name[44];
+ static struct snd_kcontrol_new knew[] = {
+ HDA_CODEC_VOLUME(name, 0, 0, 0),
+ HDA_CODEC_MUTE(name, 0, 0, 0),
+@@ -4125,7 +4126,7 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+
+ for (i = 0; i < 2; i++) {
+ struct snd_kcontrol *kctl;
+- knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
++ knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, chs, amp_idx,
+ hda_dir);
+ knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
+ knew[i].index = cidx;
+@@ -4144,7 +4145,7 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ }
+
+ #define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \
+- cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
++ cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0, 3)
+
+ #define cx_auto_add_pb_volume(codec, nid, str, idx) \
+ cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
+@@ -4214,6 +4215,36 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
+ return 0;
+ }
+
++/* Returns zero if this is a normal stereo channel, and non-zero if it should
++ be split in two independent channels.
++ dest_label must be at least 44 characters. */
++static int cx_auto_get_rightch_label(struct hda_codec *codec, const char *label,
++ char *dest_label, int nid)
++{
++ struct conexant_spec *spec = codec->spec;
++ int i;
++
++ if (!spec->fixup_stereo_dmic)
++ return 0;
++
++ for (i = 0; i < AUTO_CFG_MAX_INS; i++) {
++ int def_conf;
++ if (spec->autocfg.inputs[i].pin != nid)
++ continue;
++
++ if (spec->autocfg.inputs[i].type != AUTO_PIN_MIC)
++ return 0;
++ def_conf = snd_hda_codec_get_pincfg(codec, nid);
++ if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT)
++ return 0;
++
++ /* Finally found the inverted internal mic! */
++ snprintf(dest_label, 44, "Inverted %s", label);
++ return 1;
++ }
++ return 0;
++}
++
+ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
+ const char *label, const char *pfx,
+ int cidx)
+@@ -4222,14 +4253,25 @@ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
+ int i;
+
+ for (i = 0; i < spec->num_adc_nids; i++) {
++ char rightch_label[44];
+ hda_nid_t adc_nid = spec->adc_nids[i];
+ int idx = get_input_connection(codec, adc_nid, nid);
+ if (idx < 0)
+ continue;
+ if (spec->single_adc_amp)
+ idx = 0;
++
++ if (cx_auto_get_rightch_label(codec, label, rightch_label, nid)) {
++ /* Make two independent kcontrols for left and right */
++ int err = cx_auto_add_volume_idx(codec, label, pfx,
++ cidx, adc_nid, HDA_INPUT, idx, 1);
++ if (err < 0)
++ return err;
++ return cx_auto_add_volume_idx(codec, rightch_label, pfx,
++ cidx, adc_nid, HDA_INPUT, idx, 2);
++ }
+ return cx_auto_add_volume_idx(codec, label, pfx,
+- cidx, adc_nid, HDA_INPUT, idx);
++ cidx, adc_nid, HDA_INPUT, idx, 3);
+ }
+ return 0;
+ }
+@@ -4242,9 +4284,19 @@ static int cx_auto_add_boost_volume(struct hda_codec *codec, int idx,
+ int i, con;
+
+ nid = spec->imux_info[idx].pin;
+- if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
++ if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) {
++ char rightch_label[44];
++ if (cx_auto_get_rightch_label(codec, label, rightch_label, nid)) {
++ int err = cx_auto_add_volume_idx(codec, label, " Boost",
++ cidx, nid, HDA_INPUT, 0, 1);
++ if (err < 0)
++ return err;
++ return cx_auto_add_volume_idx(codec, rightch_label, " Boost",
++ cidx, nid, HDA_INPUT, 0, 2);
++ }
+ return cx_auto_add_volume(codec, label, " Boost", cidx,
+ nid, HDA_INPUT);
++ }
+ con = __select_input_connection(codec, spec->imux_info[idx].adc, nid,
+ &mux, false, 0);
+ if (con < 0)
+@@ -4398,23 +4450,31 @@ static void apply_pincfg(struct hda_codec *codec, const struct cxt_pincfg *cfg)
+
+ }
+
+-static void apply_pin_fixup(struct hda_codec *codec,
++enum {
++ CXT_PINCFG_LENOVO_X200,
++ CXT_PINCFG_LENOVO_TP410,
++ CXT_FIXUP_STEREO_DMIC,
++};
++
++static void apply_fixup(struct hda_codec *codec,
+ const struct snd_pci_quirk *quirk,
+ const struct cxt_pincfg **table)
+ {
++ struct conexant_spec *spec = codec->spec;
++
+ quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+- if (quirk) {
++ if (quirk && table[quirk->value]) {
+ snd_printdd(KERN_INFO "hda_codec: applying pincfg for %s\n",
+ quirk->name);
+ apply_pincfg(codec, table[quirk->value]);
+ }
++ if (quirk->value == CXT_FIXUP_STEREO_DMIC) {
++ snd_printdd(KERN_INFO "hda_codec: applying internal mic workaround for %s\n",
++ quirk->name);
++ spec->fixup_stereo_dmic = 1;
++ }
+ }
+
+-enum {
+- CXT_PINCFG_LENOVO_X200,
+- CXT_PINCFG_LENOVO_TP410,
+-};
+-
+ /* ThinkPad X200 & co with cxt5051 */
+ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
+ { 0x16, 0x042140ff }, /* HP (seq# overridden) */
+@@ -4434,6 +4494,7 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
+ static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
+ [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
+ [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
++ [CXT_FIXUP_STEREO_DMIC] = NULL,
+ };
+
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
+@@ -4447,6 +4508,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ {}
+ };
+
+@@ -4486,10 +4550,10 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ break;
+ case 0x14f15051:
+ add_cx5051_fake_mutes(codec);
+- apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
++ apply_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
+ break;
+ default:
+- apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
++ apply_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
+ break;
+ }
+
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 323d4d9..0961d88 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -348,7 +348,7 @@ int main(void)
+ fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
+ if (fd < 0) {
+ syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ addr.nl_family = AF_NETLINK;
+ addr.nl_pad = 0;
+@@ -360,7 +360,7 @@ int main(void)
+ if (error < 0) {
+ syslog(LOG_ERR, "bind failed; error:%d", error);
+ close(fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ sock_opt = addr.nl_groups;
+ setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt));
+@@ -378,7 +378,7 @@ int main(void)
+ if (len < 0) {
+ syslog(LOG_ERR, "netlink_send failed; error:%d", len);
+ close(fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+
+ pfd.fd = fd;
+@@ -497,7 +497,7 @@ int main(void)
+ len = netlink_send(fd, incoming_cn_msg);
+ if (len < 0) {
+ syslog(LOG_ERR, "net_link send failed; error:%d", len);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ }
+
+diff --git a/tools/perf/Makefile b/tools/perf/Makefile
+index b98e307..e45d2b1 100644
+--- a/tools/perf/Makefile
++++ b/tools/perf/Makefile
+@@ -56,7 +56,7 @@ ifeq ($(ARCH),x86_64)
+ ARCH := x86
+ IS_X86_64 := 0
+ ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
+- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
++ IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
+ endif
+ ifeq (${IS_X86_64}, 1)
+ RAW_ARCH := x86_64
+diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
+index e8a03ac..7db8da5 100644
+--- a/tools/power/cpupower/Makefile
++++ b/tools/power/cpupower/Makefile
+@@ -100,7 +100,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo po/$$HLANG.gmo; done;}
+ export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
+
+ # check if compiler option is supported
+-cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
++cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+
+ # use '-Os' optimization if available, else use -O2
+ OPTIMIZATION := $(call cc-supports,-Os,-O2)
diff --git a/3.2.54/1032_linux-3.2.33.patch b/3.2.54/1032_linux-3.2.33.patch
new file mode 100644
index 0000000..c32fb75
--- /dev/null
+++ b/3.2.54/1032_linux-3.2.33.patch
@@ -0,0 +1,3450 @@
+diff --git a/Makefile b/Makefile
+index b6d8282..63ca1ea2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 3d5fc41..bf53047 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -28,7 +28,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
++ ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+@@ -52,7 +52,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
++ stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 1d1710e..bfa0eeb 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -295,18 +295,24 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+ asmlinkage void __cpuinit secondary_start_kernel(void)
+ {
+ struct mm_struct *mm = &init_mm;
+- unsigned int cpu = smp_processor_id();
++ unsigned int cpu;
++
++ /*
++ * The identity mapping is uncached (strongly ordered), so
++ * switch away from it before attempting any exclusive accesses.
++ */
++ cpu_switch_mm(mm->pgd, mm);
++ enter_lazy_tlb(mm, current);
++ local_flush_tlb_all();
+
+ /*
+ * All kernel threads share the same mm context; grab a
+ * reference and switch to it.
+ */
++ cpu = smp_processor_id();
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+- cpu_switch_mm(mm->pgd, mm);
+- enter_lazy_tlb(mm, current);
+- local_flush_tlb_all();
+
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index f4546e9..23817a6 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+
++#ifdef CONFIG_KPROBES
++ /*
++ * Return immediately if the kprobes fault notifier has set
++ * DIE_PAGE_FAULT.
++ */
++ if (cmd == DIE_PAGE_FAULT)
++ return NOTIFY_DONE;
++#endif /* CONFIG_KPROBES */
++
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
+index d80f79d..8e1fb82 100644
+--- a/arch/s390/boot/compressed/vmlinux.lds.S
++++ b/arch/s390/boot/compressed/vmlinux.lds.S
+@@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+ OUTPUT_ARCH(s390:64-bit)
+ #else
+ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+-OUTPUT_ARCH(s390)
++OUTPUT_ARCH(s390:31-bit)
+ #endif
+
+ ENTRY(startup)
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index e4c79eb..e43d21e 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -8,7 +8,7 @@
+
+ #ifndef CONFIG_64BIT
+ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+-OUTPUT_ARCH(s390)
++OUTPUT_ARCH(s390:31-bit)
+ ENTRY(_start)
+ jiffies = jiffies_64 + 4;
+ #else
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index 614da62..3c8f220 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -555,11 +555,13 @@ static u64 nop_for_index(int idx)
+
+ static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
+ {
+- u64 val, mask = mask_for_index(idx);
++ u64 enc, val, mask = mask_for_index(idx);
++
++ enc = perf_event_get_enc(cpuc->events[idx]);
+
+ val = cpuc->pcr;
+ val &= ~mask;
+- val |= hwc->config;
++ val |= event_encoding(enc, idx);
+ cpuc->pcr = val;
+
+ pcr_ops->write(cpuc->pcr);
+@@ -1422,8 +1424,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ {
+ unsigned long ufp;
+
+- perf_callchain_store(entry, regs->tpc);
+-
+ ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+ do {
+ struct sparc_stackf *usf, sf;
+@@ -1444,8 +1444,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ {
+ unsigned long ufp;
+
+- perf_callchain_store(entry, regs->tpc);
+-
+ ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
+ do {
+ struct sparc_stackf32 *usf, sf;
+@@ -1464,6 +1462,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ void
+ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ {
++ perf_callchain_store(entry, regs->tpc);
++
++ if (!current->mm)
++ return;
++
+ flushw_user();
+ if (test_thread_flag(TIF_32BIT))
+ perf_callchain_user_32(entry, regs);
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 441521a..5e4252b 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+ int ret;
+
+- if (current->personality == PER_LINUX32 &&
+- personality == PER_LINUX)
+- personality = PER_LINUX32;
++ if (personality(current->personality) == PER_LINUX32 &&
++ personality(personality) == PER_LINUX)
++ personality |= PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret &= ~PER_LINUX32;
+
+ return ret;
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 1d7e274..7f5f65d 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -212,24 +212,20 @@ linux_sparc_syscall:
+ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+- /* Check if force_successful_syscall_return()
+- * was invoked.
+- */
+- ldub [%g6 + TI_SYS_NOERROR], %l2
+- brnz,a,pn %l2, 80f
+- stb %g0, [%g6 + TI_SYS_NOERROR]
+-
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+-80:
++ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
++
++2:
++ stb %g0, [%g6 + TI_SYS_NOERROR]
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
++3:
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ add %l1, 0x4, %l2 ! npc = npc+4
+@@ -238,20 +234,20 @@ ret_sys_call:
+ stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+ 1:
++ /* Check if force_successful_syscall_return()
++ * was invoked.
++ */
++ ldub [%g6 + TI_SYS_NOERROR], %l2
++ brnz,pn %l2, 2b
++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+ sub %g0, %o0, %o0
+- or %g3, %g2, %g3
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+- stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+- bne,pn %icc, linux_syscall_trace2
+- add %l1, 0x4, %l2 ! npc = npc+4
+- stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
++ ba,pt %xcc, 3b
++ or %g3, %g2, %g3
+
+- b,pt %xcc, rtrap
+- stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+ linux_syscall_trace2:
+ call syscall_trace_leave
+ add %sp, PTREGS_OFF, %o0
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 8e073d8..6ff4d78 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2118,6 +2118,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ unsigned long vmemmap_table[VMEMMAP_SIZE];
+
++static long __meminitdata addr_start, addr_end;
++static int __meminitdata node_start;
++
+ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+ {
+ unsigned long vstart = (unsigned long) start;
+@@ -2148,15 +2151,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+
+ *vmem_pp = pte_base | __pa(block);
+
+- printk(KERN_INFO "[%p-%p] page_structs=%lu "
+- "node=%d entry=%lu/%lu\n", start, block, nr,
+- node,
+- addr >> VMEMMAP_CHUNK_SHIFT,
+- VMEMMAP_SIZE);
++ /* check to see if we have contiguous blocks */
++ if (addr_end != addr || node_start != node) {
++ if (addr_start)
++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
++ addr_start, addr_end-1, node_start);
++ addr_start = addr;
++ node_start = node;
++ }
++ addr_end = addr + VMEMMAP_CHUNK;
+ }
+ }
+ return 0;
+ }
++
++void __meminit vmemmap_populate_print_last(void)
++{
++ if (addr_start) {
++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
++ addr_start, addr_end-1, node_start);
++ addr_start = 0;
++ addr_end = 0;
++ node_start = 0;
++ }
++}
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ static void prot_init_common(unsigned long page_none,
+diff --git a/arch/tile/Makefile b/arch/tile/Makefile
+index 17acce7..04c637c 100644
+--- a/arch/tile/Makefile
++++ b/arch/tile/Makefile
+@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
+ endif
+ endif
+
++# The tile compiler may emit .eh_frame information for backtracing.
++# In kernel modules, this causes load failures due to unsupported relocations.
++KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
++
+ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
+ KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
+ endif
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index bcda816..4893d58 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1025,7 +1025,7 @@ ENTRY(xen_sysenter_target)
+
+ ENTRY(xen_hypervisor_callback)
+ CFI_STARTPROC
+- pushl_cfi $0
++ pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ TRACE_IRQS_OFF
+
+@@ -1067,14 +1067,16 @@ ENTRY(xen_failsafe_callback)
+ 2: mov 8(%esp),%es
+ 3: mov 12(%esp),%fs
+ 4: mov 16(%esp),%gs
++ /* EAX == 0 => Category 1 (Bad segment)
++ EAX != 0 => Category 2 (Bad IRET) */
+ testl %eax,%eax
+ popl_cfi %eax
+ lea 16(%esp),%esp
+ CFI_ADJUST_CFA_OFFSET -16
+ jz 5f
+ addl $16,%esp
+- jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
+-5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
++ jmp iret_exc
++5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ jmp ret_from_exception
+ CFI_ENDPROC
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index faf8d5e..6274f5f 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1303,7 +1303,7 @@ ENTRY(xen_failsafe_callback)
+ CFI_RESTORE r11
+ addq $0x30,%rsp
+ CFI_ADJUST_CFA_OFFSET -0x30
+- pushq_cfi $0
++ pushq_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ jmp error_exit
+ CFI_ENDPROC
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 75f9528..6bc0899 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ val |= counter_config->extra;
+ event &= model->event_mask ? model->event_mask : 0xFF;
+ val |= event & 0xFF;
+- val |= (event & 0x0F00) << 24;
++ val |= (u64)(event & 0x0F00) << 24;
+
+ return val;
+ }
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a1e21ae..69b9ef6 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -818,7 +818,16 @@ static void xen_write_cr4(unsigned long cr4)
+
+ native_write_cr4(cr4);
+ }
+-
++#ifdef CONFIG_X86_64
++static inline unsigned long xen_read_cr8(void)
++{
++ return 0;
++}
++static inline void xen_write_cr8(unsigned long val)
++{
++ BUG_ON(val);
++}
++#endif
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ int ret;
+@@ -987,6 +996,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_cr4_safe = native_read_cr4_safe,
+ .write_cr4 = xen_write_cr4,
+
++#ifdef CONFIG_X86_64
++ .read_cr8 = xen_read_cr8,
++ .write_cr8 = xen_write_cr8,
++#endif
++
+ .wbinvd = native_wbinvd,
+
+ .read_msr = native_read_msr_safe,
+@@ -997,6 +1011,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+
++ .read_tscp = native_read_tscp,
++
+ .iret = xen_iret,
+ .irq_enable_sysexit = xen_sysexit,
+ #ifdef CONFIG_X86_64
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index b19a18d..d2519b2 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -71,9 +71,6 @@ enum ec_command {
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+
+-#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
+- per one transaction */
+-
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+ MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
++/*
++ * If the number of false interrupts per one transaction exceeds
++ * this threshold, will think there is a GPE storm happened and
++ * will disable the GPE for normal transaction.
++ */
++static unsigned int ec_storm_threshold __read_mostly = 8;
++module_param(ec_storm_threshold, uint, 0644);
++MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
++
+ /* If we find an EC via the ECDT, we need to keep a ptr to its context */
+ /* External interfaces use first EC only, so remember */
+ typedef int (*acpi_ec_query_func) (void *data);
+@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ msleep(1);
+ /* It is safe to enable the GPE outside of the transaction. */
+ acpi_enable_gpe(NULL, ec->gpe);
+- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
++ } else if (t->irq_count > ec_storm_threshold) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+@@ -914,6 +920,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
++ * the GPE storm threshold back to 20
++ */
++static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
++{
++ pr_debug("Setting the EC GPE storm threshold to 20\n");
++ ec_storm_threshold = 20;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -945,10 +962,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
++ {
++ ec_enlarge_storm_threshold, "CLEVO hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+ {},
+ };
+
+-
+ int __init acpi_ec_ecdt_probe(void)
+ {
+ acpi_status status;
+diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
+index 10f92b3..7a987a7 100644
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -124,9 +124,10 @@ static int bcma_register_cores(struct bcma_bus *bus)
+
+ static void bcma_unregister_cores(struct bcma_bus *bus)
+ {
+- struct bcma_device *core;
++ struct bcma_device *core, *tmp;
+
+- list_for_each_entry(core, &bus->cores, list) {
++ list_for_each_entry_safe(core, tmp, &bus->cores, list) {
++ list_del(&core->list);
+ if (core->dev_registered)
+ device_unregister(&core->dev);
+ }
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index b366b34..0d91655 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1072,17 +1072,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+ {
+ struct tpm_chip *chip = file->private_data;
+- size_t in_size = size, out_size;
++ size_t in_size = size;
++ ssize_t out_size;
+
+ /* cannot perform a write until the read has cleared
+- either via tpm_read or a user_read_timer timeout */
+- while (atomic_read(&chip->data_pending) != 0)
+- msleep(TPM_TIMEOUT);
+-
+- mutex_lock(&chip->buffer_mutex);
++ either via tpm_read or a user_read_timer timeout.
++ This also prevents splitted buffered writes from blocking here.
++ */
++ if (atomic_read(&chip->data_pending) != 0)
++ return -EBUSY;
+
+ if (in_size > TPM_BUFSIZE)
+- in_size = TPM_BUFSIZE;
++ return -E2BIG;
++
++ mutex_lock(&chip->buffer_mutex);
+
+ if (copy_from_user
+ (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -1092,6 +1095,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++ if (out_size < 0) {
++ mutex_unlock(&chip->buffer_mutex);
++ return out_size;
++ }
+
+ atomic_set(&chip->data_pending, out_size);
+ mutex_unlock(&chip->buffer_mutex);
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index b7fe343..f6cd315 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1216,14 +1216,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
+ .relation = relation };
+
+- /*
+- * Must run on @pol->cpu. cpufreq core is responsible for ensuring
+- * that we're bound to the current CPU and pol->cpu stays online.
+- */
+- if (smp_processor_id() == pol->cpu)
+- return powernowk8_target_fn(&pta);
+- else
+- return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
++ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+ }
+
+ /* Driver entry point to verify the policy and range of frequencies */
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index c9eee6d..a9d5482 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+ * memory controller and apply to register. Search for the first
+ * bandwidth entry that is greater or equal than the setting requested
+ * and program that. If at last entry, turn off DRAM scrubbing.
++ *
++ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
++ * by falling back to the last element in scrubrates[].
+ */
+- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
++ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
+ /*
+ * skip scrub rates which aren't recommended
+ * (see F10 BKDG, F3x58)
+@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+
+ if (scrubrates[i].bandwidth <= new_bw)
+ break;
+-
+- /*
+- * if no suitable bandwidth found, turn off DRAM scrubbing
+- * entirely by falling back to the last element in the
+- * scrubrates array.
+- */
+ }
+
+ scrubval = scrubrates[i].scrubval;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 33e1555..dbe4dbe 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -999,6 +999,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ if (obj->phys_obj)
+ ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ else if (obj->gtt_space &&
++ obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index f07bde2..57152a7 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -771,6 +771,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "ZOTAC ZBOXSD-ID12/ID13",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
++ DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 2f46e0c..3ad3cc6 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -973,11 +973,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+ {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+- if (tmds) {
+- if (tmds->i2c_bus)
+- radeon_i2c_destroy(tmds->i2c_bus);
+- }
++ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 4065374..f4c3d28 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -146,14 +146,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (ret != 0) {
+ err = ret;
+- goto errorout;
++ goto error0;
+ }
+
+ ret = hv_ringbuffer_init(
+ &newchannel->inbound, in, recv_ringbuffer_size);
+ if (ret != 0) {
+ err = ret;
+- goto errorout;
++ goto error0;
+ }
+
+
+@@ -168,7 +168,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (ret != 0) {
+ err = ret;
+- goto errorout;
++ goto error0;
+ }
+
+ /* Create and init the channel open message */
+@@ -177,7 +177,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+- goto errorout;
++ goto error0;
+ }
+
+ init_completion(&open_info->waitevent);
+@@ -193,7 +193,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+- goto errorout;
++ goto error0;
+ }
+
+ if (userdatalen)
+@@ -208,19 +208,18 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ sizeof(struct vmbus_channel_open_channel));
+
+ if (ret != 0)
+- goto cleanup;
++ goto error1;
+
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ if (t == 0) {
+ err = -ETIMEDOUT;
+- goto errorout;
++ goto error1;
+ }
+
+
+ if (open_info->response.open_result.status)
+ err = open_info->response.open_result.status;
+
+-cleanup:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+@@ -228,9 +227,12 @@ cleanup:
+ kfree(open_info);
+ return err;
+
+-errorout:
+- hv_ringbuffer_cleanup(&newchannel->outbound);
+- hv_ringbuffer_cleanup(&newchannel->inbound);
++error1:
++ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
++ list_del(&open_info->msglistentry);
++ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
++
++error0:
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+ kfree(open_info);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 0634ee5..8f67c4d 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2641,7 +2641,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ else {
+ bad_sectors -= (sector - first_bad);
+ if (max_sync > bad_sectors)
+- max_sync = max_sync;
++ max_sync = bad_sectors;
+ continue;
+ }
+ }
+diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
+index 0b3e481..eab0641 100644
+--- a/drivers/media/video/au0828/au0828-video.c
++++ b/drivers/media/video/au0828/au0828-video.c
+@@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ (AUVI_INPUT(i).audio_setup)(dev, 0);
+ }
+
+- videobuf_streamoff(&fh->vb_vidq);
+- res_free(fh, AU0828_RESOURCE_VIDEO);
++ if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
++ videobuf_streamoff(&fh->vb_vidq);
++ res_free(fh, AU0828_RESOURCE_VIDEO);
++ }
+ } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ dev->vbi_timeout_running = 0;
+ del_timer_sync(&dev->vbi_timeout);
+
+- videobuf_streamoff(&fh->vb_vbiq);
+- res_free(fh, AU0828_RESOURCE_VBI);
++ if (res_check(fh, AU0828_RESOURCE_VBI)) {
++ videobuf_streamoff(&fh->vb_vbiq);
++ res_free(fh, AU0828_RESOURCE_VBI);
++ }
+ }
+
+ return 0;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 3ed9c5e..daed698 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2903,9 +2903,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= (NAND_NO_READRDY |
+- NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
++ chip->options |= NAND_NO_READRDY | NAND_NO_AUTOINCR;
+
+ return 1;
+ }
+@@ -3069,9 +3067,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+ }
+- /* Get chip options, preserve non chip based options */
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
++ /* Get chip options */
++ chip->options |= type->options;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index c5f6b0e..6546191 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -168,6 +168,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+
+ static bool e1000_vlan_used(struct e1000_adapter *adapter);
+ static void e1000_vlan_mode(struct net_device *netdev, u32 features);
++static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
++ bool filter_on);
+ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+ static void e1000_restore_vlan(struct e1000_adapter *adapter);
+@@ -1219,7 +1221,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
+ if (err)
+ goto err_register;
+
+- e1000_vlan_mode(netdev, netdev->features);
++ e1000_vlan_filter_on_off(adapter, false);
+
+ /* print bus type/speed/width info */
+ e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
+@@ -4553,6 +4555,21 @@ static bool e1000_vlan_used(struct e1000_adapter *adapter)
+ return false;
+ }
+
++static void __e1000_vlan_mode(struct e1000_adapter *adapter, u32 features)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl;
++
++ ctrl = er32(CTRL);
++ if (features & NETIF_F_HW_VLAN_RX) {
++ /* enable VLAN tag insert/strip */
++ ctrl |= E1000_CTRL_VME;
++ } else {
++ /* disable VLAN tag insert/strip */
++ ctrl &= ~E1000_CTRL_VME;
++ }
++ ew32(CTRL, ctrl);
++}
+ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ bool filter_on)
+ {
+@@ -4562,6 +4579,7 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
++ __e1000_vlan_mode(adapter, adapter->netdev->features);
+ if (filter_on) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+@@ -4584,21 +4602,11 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+- u32 ctrl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
+- ctrl = er32(CTRL);
+- if (features & NETIF_F_HW_VLAN_RX) {
+- /* enable VLAN tag insert/strip */
+- ctrl |= E1000_CTRL_VME;
+- } else {
+- /* disable VLAN tag insert/strip */
+- ctrl &= ~E1000_CTRL_VME;
+- }
+- ew32(CTRL, ctrl);
++ __e1000_vlan_mode(adapter, features);
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
+index dea0cb4..57be855 100644
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -4143,6 +4143,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
++ {
++ .ident = "ASUS P5NSLI",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+index 026f9de..cc54153 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+@@ -835,107 +835,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
+
+ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+- {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+- {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+- {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
++ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
++ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
++ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+- {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+- {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+- {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+- {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+- {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+- {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+- {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+- {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+- {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+- {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+- {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+- {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+- {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+- {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+- {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+- {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83},
+- {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84},
+- {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+- {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+- {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+- {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+- {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
++ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
++ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
++ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
++ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
++ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
++ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
++ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
++ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
++ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
++ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
++ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
++ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
++ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
++ {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
++ {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
++ {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
++ {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
++ {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
++ {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
++ {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
++ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
++ {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+- {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+- {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+- {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+- {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+- {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+- {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+- {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+- {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+- {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+- {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+- {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+- {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+- {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+- {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+- {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+- {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+- {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+- {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+- {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+- {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+- {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+- {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
++ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
++ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
++ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
++ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
++ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
++ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
++ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
++ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
++ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
++ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
++ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
++ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
++ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
++ {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
++ {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
++ {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
++ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
++ {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
++ {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
++ {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
++ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
++ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+- {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+- {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+- {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+- {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+- {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+- {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+- {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+- {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+- {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+- {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
++ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
++ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
++ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
++ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
++ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
++ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
++ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
++ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
++ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
++ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
++ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
++ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+- {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+- {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
++ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
++ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
++ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+- {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
++ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+- {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
++ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+- {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
++ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ };
+
+diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
+index 56bd370..da567f0 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
+@@ -10463,7 +10463,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
+ } else
+ len = src->len;
+
+- dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
++ dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 9b6b010..4ac4ef0 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -193,7 +193,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ * See iwlagn_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+- struct iwl6000_channel_switch_cmd cmd;
++ struct iwl6000_channel_switch_cmd *cmd;
+ const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+@@ -203,18 +203,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_vif *vif = ctx->vif;
+ struct iwl_host_cmd hcmd = {
+ .id = REPLY_CHANNEL_SWITCH,
+- .len = { sizeof(cmd), },
++ .len = { sizeof(*cmd), },
+ .flags = CMD_SYNC,
+- .data = { &cmd, },
++ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
++ int err;
+
+- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
++ if (!cmd)
++ return -ENOMEM;
++
++ hcmd.data[0] = cmd;
++
++ cmd->band = priv->band == IEEE80211_BAND_2GHZ;
+ ch = ch_switch->channel->hw_value;
+ IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+- cmd.channel = cpu_to_le16(ch);
+- cmd.rxon_flags = ctx->staging.flags;
+- cmd.rxon_filter_flags = ctx->staging.filter_flags;
++ cmd->channel = cpu_to_le16(ch);
++ cmd->rxon_flags = ctx->staging.flags;
++ cmd->rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+@@ -230,30 +237,32 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
++ cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+- cmd.switch_time = iwl_add_beacon_time(priv,
+- priv->ucode_beacon_time,
+- ucode_switch_time,
+- beacon_interval);
++ cmd->switch_time = iwl_add_beacon_time(priv,
++ priv->ucode_beacon_time,
++ ucode_switch_time,
++ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+- cmd.switch_time);
++ cmd->switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ if (ch_info)
+- cmd.expect_beacon = is_channel_radar(ch_info);
++ cmd->expect_beacon = is_channel_radar(ch_info);
+ else {
+ IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+
+- return iwl_trans_send_cmd(trans(priv), &hcmd);
++ err = iwl_trans_send_cmd(trans(priv), &hcmd);
++ kfree(cmd);
++ return err;
+ }
+
+ static struct iwl_lib_ops iwl6000_lib = {
+diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
+index 69ae2fd..b938163 100644
+--- a/drivers/pcmcia/pxa2xx_sharpsl.c
++++ b/drivers/pcmcia/pxa2xx_sharpsl.c
+@@ -219,7 +219,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+ sharpsl_pcmcia_init_reset(skt);
+ }
+
+-static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
++static struct pcmcia_low_level sharpsl_pcmcia_ops = {
+ .owner = THIS_MODULE,
+ .hw_init = sharpsl_pcmcia_hw_init,
+ .hw_shutdown = sharpsl_pcmcia_hw_shutdown,
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index af1e296..21bc1a7 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -21,6 +21,7 @@
+ #include <linux/dmi.h>
+ #include <linux/platform_device.h>
+ #include <linux/rfkill.h>
++#include <linux/acpi.h>
+
+ /*
+ * This driver is needed because a number of Samsung laptops do not hook
+@@ -226,6 +227,7 @@ static struct backlight_device *backlight_device;
+ static struct mutex sabi_mutex;
+ static struct platform_device *sdev;
+ static struct rfkill *rfk;
++static bool handle_backlight;
+ static bool has_stepping_quirk;
+
+ static int force;
+@@ -602,6 +604,13 @@ static int __init samsung_init(void)
+ int retval;
+
+ mutex_init(&sabi_mutex);
++ handle_backlight = true;
++
++#ifdef CONFIG_ACPI
++ /* Don't handle backlight here if the acpi video already handle it */
++ if (acpi_video_backlight_support())
++ handle_backlight = false;
++#endif
+
+ if (!force && !dmi_check_system(samsung_dmi_table))
+ return -ENODEV;
+@@ -661,7 +670,8 @@ static int __init samsung_init(void)
+ printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
+ printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
+
+- test_backlight();
++ if (handle_backlight)
++ test_backlight();
+ test_wireless();
+
+ retval = sabi_get_command(sabi_config->commands.get_brightness,
+@@ -680,13 +690,23 @@ static int __init samsung_init(void)
+ }
+
+ /* Check for stepping quirk */
+- check_for_stepping_quirk();
++ if (handle_backlight)
++ check_for_stepping_quirk();
++
++#ifdef CONFIG_ACPI
++ /* Only log that if we are really on a sabi platform */
++ if (acpi_video_backlight_support())
++ pr_info("Backlight controlled by ACPI video driver\n");
++#endif
+
+ /* knock up a platform device to hang stuff off of */
+ sdev = platform_device_register_simple("samsung", -1, NULL, 0);
+ if (IS_ERR(sdev))
+ goto error_no_platform;
+
++ if (!handle_backlight)
++ goto skip_backlight;
++
+ /* create a backlight device to talk to this one */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+@@ -702,6 +722,7 @@ static int __init samsung_init(void)
+ backlight_device->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(backlight_device);
+
++skip_backlight:
+ retval = init_wireless(sdev);
+ if (retval)
+ goto error_no_rfk;
+diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
+index d93a960..bc744b4 100644
+--- a/drivers/rtc/rtc-imxdi.c
++++ b/drivers/rtc/rtc-imxdi.c
+@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
+ if (imxdi->ioaddr == NULL)
+ return -ENOMEM;
+
++ spin_lock_init(&imxdi->irq_lock);
++
+ imxdi->irq = platform_get_irq(pdev, 0);
+ if (imxdi->irq < 0)
+ return imxdi->irq;
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 6888b2c..b3a729c 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2045,8 +2045,7 @@ static void unmap_region(sector_t lba, unsigned int len)
+ block = lba + alignment;
+ rem = do_div(block, granularity);
+
+- if (rem == 0 && lba + granularity <= end &&
+- block < map_size)
++ if (rem == 0 && lba + granularity < end && block < map_size)
+ clear_bit(block, map_storep);
+
+ lba += granularity - rem;
+diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
+index 48246cd..b4311bf 100644
+--- a/drivers/staging/comedi/drivers/amplc_pc236.c
++++ b/drivers/staging/comedi/drivers/amplc_pc236.c
+@@ -470,7 +470,7 @@ static int pc236_detach(struct comedi_device *dev)
+ {
+ printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor,
+ PC236_DRIVER_NAME);
+- if (devpriv)
++ if (dev->iobase)
+ pc236_intr_disable(dev);
+
+ if (dev->irq)
+diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
+index ae8c33e..abc5ac5 100644
+--- a/drivers/staging/hv/storvsc_drv.c
++++ b/drivers/staging/hv/storvsc_drv.c
+@@ -1043,7 +1043,12 @@ static int storvsc_host_reset(struct hv_device *device)
+ /*
+ * At this point, all outstanding requests in the adapter
+ * should have been flushed out and return to us
++ * There is a potential race here where the host may be in
++ * the process of responding when we return from here.
++ * Just wait for all in-transit packets to be accounted for
++ * before we return from here.
+ */
++ storvsc_wait_to_drain(stor_device);
+
+ cleanup:
+ return ret;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index dbf7d20..df7f15d 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -760,10 +760,6 @@ static const __u32 acm_tty_speed[] = {
+ 2500000, 3000000, 3500000, 4000000
+ };
+
+-static const __u8 acm_tty_size[] = {
+- 5, 6, 7, 8
+-};
+-
+ static void acm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *termios_old)
+ {
+@@ -780,7 +776,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ newline.bParityType = termios->c_cflag & PARENB ?
+ (termios->c_cflag & PARODD ? 1 : 2) +
+ (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+- newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
++ switch (termios->c_cflag & CSIZE) {
++ case CS5:
++ newline.bDataBits = 5;
++ break;
++ case CS6:
++ newline.bDataBits = 6;
++ break;
++ case CS7:
++ newline.bDataBits = 7;
++ break;
++ case CS8:
++ default:
++ newline.bDataBits = 8;
++ break;
++ }
+ /* FIXME: Needs to clear unsupported bits in the termios */
+ acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+@@ -1172,7 +1182,7 @@ made_compressed_probe:
+
+ if (usb_endpoint_xfer_int(epwrite))
+ usb_fill_int_urb(snd->urb, usb_dev,
+- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
++ usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
+ NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
+ else
+ usb_fill_bulk_urb(snd->urb, usb_dev,
+@@ -1496,6 +1506,9 @@ static const struct usb_device_id acm_ids[] = {
+ Maybe we should define a new
+ quirk for this. */
+ },
++ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 52340cc..a9a74d2 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -482,13 +482,16 @@ static void hub_tt_work(struct work_struct *work)
+ int limit = 100;
+
+ spin_lock_irqsave (&hub->tt.lock, flags);
+- while (--limit && !list_empty (&hub->tt.clear_list)) {
++ while (!list_empty(&hub->tt.clear_list)) {
+ struct list_head *next;
+ struct usb_tt_clear *clear;
+ struct usb_device *hdev = hub->hdev;
+ const struct hc_driver *drv;
+ int status;
+
++ if (!hub->quiescing && --limit < 0)
++ break;
++
+ next = hub->tt.clear_list.next;
+ clear = list_entry (next, struct usb_tt_clear, clear_list);
+ list_del (&clear->clear_list);
+@@ -952,7 +955,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
+ if (hub->has_indicators)
+ cancel_delayed_work_sync(&hub->leds);
+ if (hub->tt.hub)
+- cancel_work_sync(&hub->tt.clear_work);
++ flush_work_sync(&hub->tt.clear_work);
+ }
+
+ /* caller has locked the hub device */
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index d0ec2f0..c2815a5 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -545,7 +545,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
+ /* Pegatron Lucid (Ordissimo AIRIS) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
+- DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
++ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
++ },
++ },
++ {
++ /* Pegatron Lucid (Ordissimo) */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
++ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+ },
+ },
+ { }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 950aef8..0c6fb19 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1212,6 +1212,17 @@ static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
+ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
+ xhci->cmd_ring->dequeue, &cycle_state);
+
++ if (!cur_seg) {
++ xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
++ xhci->cmd_ring->dequeue,
++ (unsigned long long)
++ xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
++ xhci->cmd_ring->dequeue));
++ xhci_debug_ring(xhci, xhci->cmd_ring);
++ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
++ return;
++ }
++
+ /* find the command trb matched by cd from command ring */
+ for (cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb != xhci->cmd_ring->enqueue;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f5c0f38..5a23f4d 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -471,7 +471,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+
+ if (strstr(dmi_product_name, "Z420") ||
+ strstr(dmi_product_name, "Z620") ||
+- strstr(dmi_product_name, "Z820"))
++ strstr(dmi_product_name, "Z820") ||
++ strstr(dmi_product_name, "Z1"))
+ return true;
+
+ return false;
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index 42de17b..d3addb2 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -577,12 +577,14 @@ static void mct_u232_close(struct usb_serial_port *port)
+ {
+ dbg("%s port %d", __func__, port->number);
+
+- if (port->serial->dev) {
+- /* shutdown our urbs */
+- usb_kill_urb(port->write_urb);
+- usb_kill_urb(port->read_urb);
+- usb_kill_urb(port->interrupt_in_urb);
+- }
++ /*
++ * Must kill the read urb as it is actually an interrupt urb, which
++ * generic close thus fails to kill.
++ */
++ usb_kill_urb(port->read_urb);
++ usb_kill_urb(port->interrupt_in_urb);
++
++ usb_serial_generic_close(port);
+ } /* mct_u232_close */
+
+
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index b150ed9..d481f80 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -235,12 +235,10 @@ struct moschip_port {
+ int port_num; /*Actual port number in the device(1,2,etc) */
+ struct urb *write_urb; /* write URB for this port */
+ struct urb *read_urb; /* read URB for this port */
+- struct urb *int_urb;
+ __u8 shadowLCR; /* last LCR value received */
+ __u8 shadowMCR; /* last MCR value received */
+ char open;
+ char open_ports;
+- char zombie;
+ wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
+ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
+ int delta_msr_cond;
+@@ -505,7 +503,6 @@ static void mos7840_control_callback(struct urb *urb)
+ unsigned char *data;
+ struct moschip_port *mos7840_port;
+ __u8 regval = 0x0;
+- int result = 0;
+ int status = urb->status;
+
+ mos7840_port = urb->context;
+@@ -524,7 +521,7 @@ static void mos7840_control_callback(struct urb *urb)
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+- goto exit;
++ return;
+ }
+
+ dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+@@ -537,17 +534,6 @@ static void mos7840_control_callback(struct urb *urb)
+ mos7840_handle_new_msr(mos7840_port, regval);
+ else if (mos7840_port->MsrLsr == 1)
+ mos7840_handle_new_lsr(mos7840_port, regval);
+-
+-exit:
+- spin_lock(&mos7840_port->pool_lock);
+- if (!mos7840_port->zombie)
+- result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC);
+- spin_unlock(&mos7840_port->pool_lock);
+- if (result) {
+- dev_err(&urb->dev->dev,
+- "%s - Error %d submitting interrupt urb\n",
+- __func__, result);
+- }
+ }
+
+ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+@@ -655,14 +641,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
+ wreg = MODEM_STATUS_REGISTER;
+ break;
+ }
+- spin_lock(&mos7840_port->pool_lock);
+- if (!mos7840_port->zombie) {
+- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
+- } else {
+- spin_unlock(&mos7840_port->pool_lock);
+- return;
+- }
+- spin_unlock(&mos7840_port->pool_lock);
++ rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
+ }
+ }
+ }
+@@ -2594,7 +2573,6 @@ error:
+ kfree(mos7840_port->ctrl_buf);
+ usb_free_urb(mos7840_port->control_urb);
+ kfree(mos7840_port);
+- serial->port[i] = NULL;
+ }
+ return status;
+ }
+@@ -2625,9 +2603,6 @@ static void mos7840_disconnect(struct usb_serial *serial)
+ mos7840_port = mos7840_get_port_private(serial->port[i]);
+ dbg ("mos7840_port %d = %p", i, mos7840_port);
+ if (mos7840_port) {
+- spin_lock_irqsave(&mos7840_port->pool_lock, flags);
+- mos7840_port->zombie = 1;
+- spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
+ usb_kill_urb(mos7840_port->control_urb);
+ }
+ }
+@@ -2661,6 +2636,7 @@ static void mos7840_release(struct usb_serial *serial)
+ mos7840_port = mos7840_get_port_private(serial->port[i]);
+ dbg("mos7840_port %d = %p", i, mos7840_port);
+ if (mos7840_port) {
++ usb_free_urb(mos7840_port->control_urb);
+ kfree(mos7840_port->ctrl_buf);
+ kfree(mos7840_port->dr);
+ kfree(mos7840_port);
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index c248a91..d6c5ed6 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -160,7 +160,11 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
+ {
+ struct usb_serial *serial = port->serial;
+ int retval;
+- u8 buffer[2];
++ u8 *buffer;
++
++ buffer = kzalloc(1, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ buffer[0] = val;
+ /* Send the message to the vendor control endpoint
+@@ -169,6 +173,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
+ requesttype,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 0, 0, buffer, 1, 0);
++ kfree(buffer);
+
+ return retval;
+ }
+@@ -292,7 +297,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (!dr) {
+ dev_err(&port->dev, "out of memory\n");
+ count = -ENOMEM;
+- goto error;
++ goto error_no_dr;
+ }
+
+ dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
+@@ -322,6 +327,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+
+ return count;
+ error:
++ kfree(dr);
++error_no_dr:
+ usb_free_urb(urb);
+ error_no_urb:
+ kfree(buffer);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 3fd4e6f..c334670 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
+ .reserved = BIT(5),
+ };
+
++static const struct option_blacklist_info net_intf6_blacklist = {
++ .reserved = BIT(6),
++};
++
+ static const struct option_blacklist_info zte_mf626_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
+ .reserved = BIT(4),
+ };
+
++static const struct option_blacklist_info zte_1255_blacklist = {
++ .reserved = BIT(3) | BIT(4),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+@@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+@@ -880,13 +895,22 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+@@ -1002,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+@@ -1058,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+@@ -1071,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index 535d087..e1f1ebd 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -171,7 +171,6 @@ static int sierra_probe(struct usb_serial *serial,
+ {
+ int result = 0;
+ struct usb_device *udev;
+- struct sierra_intf_private *data;
+ u8 ifnum;
+
+ udev = serial->dev;
+@@ -199,11 +198,6 @@ static int sierra_probe(struct usb_serial *serial,
+ return -ENODEV;
+ }
+
+- data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL);
+- if (!data)
+- return -ENOMEM;
+- spin_lock_init(&data->susp_lock);
+-
+ return result;
+ }
+
+@@ -915,6 +909,7 @@ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+ static int sierra_startup(struct usb_serial *serial)
+ {
+ struct usb_serial_port *port;
++ struct sierra_intf_private *intfdata;
+ struct sierra_port_private *portdata;
+ struct sierra_iface_info *himemoryp = NULL;
+ int i;
+@@ -922,6 +917,14 @@ static int sierra_startup(struct usb_serial *serial)
+
+ dev_dbg(&serial->dev->dev, "%s\n", __func__);
+
++ intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL);
++ if (!intfdata)
++ return -ENOMEM;
++
++ spin_lock_init(&intfdata->susp_lock);
++
++ usb_set_serial_data(serial, intfdata);
++
+ /* Set Device mode to D0 */
+ sierra_set_power_state(serial->dev, 0x0000);
+
+@@ -937,7 +940,7 @@ static int sierra_startup(struct usb_serial *serial)
+ dev_dbg(&port->dev, "%s: kmalloc for "
+ "sierra_port_private (%d) failed!\n",
+ __func__, i);
+- return -ENOMEM;
++ goto err;
+ }
+ spin_lock_init(&portdata->lock);
+ init_usb_anchor(&portdata->active);
+@@ -974,6 +977,14 @@ static int sierra_startup(struct usb_serial *serial)
+ }
+
+ return 0;
++err:
++ for (--i; i >= 0; --i) {
++ portdata = usb_get_serial_port_data(serial->port[i]);
++ kfree(portdata);
++ }
++ kfree(intfdata);
++
++ return -ENOMEM;
+ }
+
+ static void sierra_release(struct usb_serial *serial)
+@@ -993,6 +1004,7 @@ static void sierra_release(struct usb_serial *serial)
+ continue;
+ kfree(portdata);
+ }
++ kfree(serial->private);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 5b073bc..59d646d 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -576,6 +576,7 @@ no_firmware:
+ "%s: please contact support@connecttech.com\n",
+ serial->type->description);
+ kfree(result);
++ kfree(command);
+ return -ENODEV;
+
+ no_command_private:
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 591f57f..fa8a1b2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1004,6 +1004,12 @@ UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999,
+ USB_SC_8070, USB_PR_CB, NULL,
+ US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ),
+
++/* Submitted by Oleksandr Chumachenko <ledest@gmail.com> */
++UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100,
++ "Casio",
++ "EX-N1 DigitalCamera",
++ USB_SC_8070, USB_PR_DEVICE, NULL, 0),
++
+ /* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/
+ UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001,
+ "Samsung",
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 882a51f..b76071e 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -371,7 +371,8 @@ static void handle_rx(struct vhost_net *net)
+ .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+ size_t total_len = 0;
+- int err, headcount, mergeable;
++ int err, mergeable;
++ s16 headcount;
+ size_t vhost_hlen, sock_hlen;
+ size_t vhost_len, sock_len;
+ /* TODO: check that we are running from vhost_worker? */
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index 41746bb..cb5988f 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -646,7 +646,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
+index af8f26b..db1e392 100644
+--- a/drivers/video/via/via_clock.c
++++ b/drivers/video/via/via_clock.c
+@@ -25,6 +25,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/via-core.h>
++#include <asm/olpc.h>
+ #include "via_clock.h"
+ #include "global.h"
+ #include "debug.h"
+@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
+ printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
+ }
+
++static void noop_set_clock_state(u8 state)
++{
++}
++
+ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ {
+ switch (gfx_chip) {
+@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ break;
+
+ }
++
++ if (machine_is_olpc()) {
++ /* The OLPC XO-1.5 cannot suspend/resume reliably if the
++ * IGA1/IGA2 clocks are set as on or off (memory rot
++ * occasionally happens during suspend under such
++ * configurations).
++ *
++ * The only known stable scenario is to leave this bits as-is,
++ * which in their default states are documented to enable the
++ * clock only when it is needed.
++ */
++ clock->set_primary_clock_state = noop_set_clock_state;
++ clock->set_secondary_clock_state = noop_set_clock_state;
++ }
+ }
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 9fbcdec..b001030 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -91,7 +91,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
+ * FIXME: we should try harder by querying the mds for the ino.
+ */
+ static struct dentry *__fh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_fh *fh)
++ struct ceph_nfs_fh *fh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -99,6 +99,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*fh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__fh_to_dentry %llx\n", fh->ino);
+ vino.ino = fh->ino;
+ vino.snap = CEPH_NOSNAP;
+@@ -142,7 +145,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ * convert connectable fh to dentry
+ */
+ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_confh *cfh)
++ struct ceph_nfs_confh *cfh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -150,6 +153,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__cfh_to_dentry %llx (%llx/%x)\n",
+ cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
+
+@@ -199,9 +205,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
+ if (fh_type == 1)
+- return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
++ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
++ fh_len);
+ else
+- return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
++ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
++ fh_len);
+ }
+
+ /*
+@@ -222,6 +230,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
+
+ if (fh_type == 1)
+ return ERR_PTR(-ESTALE);
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
+
+ pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
+ cfh->parent_name_hash);
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index 51352de..f854cf9 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
+
+ err = get_user(palp, &up->palette);
+ err |= get_user(length, &up->length);
++ if (err)
++ return -EFAULT;
+
+ up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
+ err = put_user(compat_ptr(palp), &up_native->palette);
+diff --git a/fs/exec.c b/fs/exec.c
+index 160cd2f..121ccae 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1095,7 +1095,7 @@ int flush_old_exec(struct linux_binprm * bprm)
+ bprm->mm = NULL; /* We're using it now */
+
+ set_fs(USER_DS);
+- current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
++ current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD | PF_NOFREEZE);
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 54f2bdc..191580a 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2715,6 +2715,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
+ #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+
++#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
++#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
++
+ /*
+ * ext4_split_extent_at() splits an extent at given block.
+ *
+@@ -2750,6 +2753,9 @@ static int ext4_split_extent_at(handle_t *handle,
+ unsigned int ee_len, depth;
+ int err = 0;
+
++ BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
++ (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
++
+ ext_debug("ext4_split_extents_at: inode %lu, logical"
+ "block %llu\n", inode->i_ino, (unsigned long long)split);
+
+@@ -2808,7 +2814,14 @@ static int ext4_split_extent_at(handle_t *handle,
+
+ err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+- err = ext4_ext_zeroout(inode, &orig_ex);
++ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
++ if (split_flag & EXT4_EXT_DATA_VALID1)
++ err = ext4_ext_zeroout(inode, ex2);
++ else
++ err = ext4_ext_zeroout(inode, ex);
++ } else
++ err = ext4_ext_zeroout(inode, &orig_ex);
++
+ if (err)
+ goto fix_extent_len;
+ /* update the extent length and mark as initialized */
+@@ -2861,12 +2874,13 @@ static int ext4_split_extent(handle_t *handle,
+ uninitialized = ext4_ext_is_uninitialized(ex);
+
+ if (map->m_lblk + map->m_len < ee_block + ee_len) {
+- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+- EXT4_EXT_MAY_ZEROOUT : 0;
++ split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
+ flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
+ if (uninitialized)
+ split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
+ EXT4_EXT_MARK_UNINIT2;
++ if (split_flag & EXT4_EXT_DATA_VALID2)
++ split_flag1 |= EXT4_EXT_DATA_VALID1;
+ err = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (err)
+@@ -2879,8 +2893,8 @@ static int ext4_split_extent(handle_t *handle,
+ return PTR_ERR(path);
+
+ if (map->m_lblk >= ee_block) {
+- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+- EXT4_EXT_MAY_ZEROOUT : 0;
++ split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
++ EXT4_EXT_DATA_VALID2);
+ if (uninitialized)
+ split_flag1 |= EXT4_EXT_MARK_UNINIT1;
+ if (split_flag & EXT4_EXT_MARK_UNINIT2)
+@@ -3158,26 +3172,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
+
+ split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag |= EXT4_EXT_MARK_UNINIT2;
+-
++ if (flags & EXT4_GET_BLOCKS_CONVERT)
++ split_flag |= EXT4_EXT_DATA_VALID2;
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags);
+ }
+
+ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path *path)
++ struct inode *inode,
++ struct ext4_map_blocks *map,
++ struct ext4_ext_path *path)
+ {
+ struct ext4_extent *ex;
++ ext4_lblk_t ee_block;
++ unsigned int ee_len;
+ int depth;
+ int err = 0;
+
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
++ ee_block = le32_to_cpu(ex->ee_block);
++ ee_len = ext4_ext_get_actual_len(ex);
+
+ ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+- (unsigned long long)le32_to_cpu(ex->ee_block),
+- ext4_ext_get_actual_len(ex));
++ (unsigned long long)ee_block, ee_len);
++
++ /* If extent is larger than requested then split is required */
++ if (ee_block != map->m_lblk || ee_len > map->m_len) {
++ err = ext4_split_unwritten_extents(handle, inode, map, path,
++ EXT4_GET_BLOCKS_CONVERT);
++ if (err < 0)
++ goto out;
++ ext4_ext_drop_refs(path);
++ path = ext4_ext_find_extent(inode, map->m_lblk, path);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ goto out;
++ }
++ depth = ext_depth(inode);
++ ex = path[depth].p_ext;
++ }
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+@@ -3479,7 +3514,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+ }
+ /* IO end_io complete, convert the filled extent to written */
+ if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
+- ret = ext4_convert_unwritten_extents_endio(handle, inode,
++ ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
+ path);
+ if (ret >= 0) {
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
+index fe9945f..5235d6e 100644
+--- a/fs/gfs2/export.c
++++ b/fs/gfs2/export.c
+@@ -167,6 +167,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ case GFS2_SMALL_FH_SIZE:
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_SMALL_FH_SIZE)
++ return NULL;
+ this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
+ this.no_formal_ino |= be32_to_cpu(fh[1]);
+ this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
+@@ -186,6 +188,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
+ switch (fh_type) {
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_LARGE_FH_SIZE)
++ return NULL;
+ parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
+ parent.no_formal_ino |= be32_to_cpu(fh[5]);
+ parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index dd4687f..516eb21 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
+ {
+ struct isofs_fid *ifid = (struct isofs_fid *)fid;
+
+- if (fh_type != 2)
++ if (fh_len < 2 || fh_type != 2)
+ return NULL;
+
+ return isofs_export_iget(sb,
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 8799207..931bf95 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -86,7 +86,12 @@ nope:
+ static void release_data_buffer(struct buffer_head *bh)
+ {
+ if (buffer_freed(bh)) {
++ WARN_ON_ONCE(buffer_dirty(bh));
+ clear_buffer_freed(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
+ release_buffer_page(bh);
+ } else
+ put_bh(bh);
+@@ -847,17 +852,35 @@ restart_loop:
+ * there's no point in keeping a checkpoint record for
+ * it. */
+
+- /* A buffer which has been freed while still being
+- * journaled by a previous transaction may end up still
+- * being dirty here, but we want to avoid writing back
+- * that buffer in the future after the "add to orphan"
+- * operation been committed, That's not only a performance
+- * gain, it also stops aliasing problems if the buffer is
+- * left behind for writeback and gets reallocated for another
+- * use in a different page. */
+- if (buffer_freed(bh) && !jh->b_next_transaction) {
+- clear_buffer_freed(bh);
+- clear_buffer_jbddirty(bh);
++ /*
++ * A buffer which has been freed while still being journaled by
++ * a previous transaction.
++ */
++ if (buffer_freed(bh)) {
++ /*
++ * If the running transaction is the one containing
++ * "add to orphan" operation (b_next_transaction !=
++ * NULL), we have to wait for that transaction to
++ * commit before we can really get rid of the buffer.
++ * So just clear b_modified to not confuse transaction
++ * credit accounting and refile the buffer to
++ * BJ_Forget of the running transaction. If the just
++ * committed transaction contains "add to orphan"
++ * operation, we can completely invalidate the buffer
++ * now. We are rather throughout in that since the
++ * buffer may be still accessible when blocksize <
++ * pagesize and it is attached to the last partial
++ * page.
++ */
++ jh->b_modified = 0;
++ if (!jh->b_next_transaction) {
++ clear_buffer_freed(bh);
++ clear_buffer_jbddirty(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
++ }
+ }
+
+ if (buffer_jbddirty(bh)) {
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index 7e59c6e..edac004 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1839,15 +1839,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+ * We're outside-transaction here. Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
++ int partial_page)
+ {
+ transaction_t *transaction;
+ struct journal_head *jh;
+ int may_free = 1;
+- int ret;
+
+ BUFFER_TRACE(bh, "entry");
+
++retry:
+ /*
+ * It is safe to proceed here without the j_list_lock because the
+ * buffers cannot be stolen by try_to_free_buffers as long as we are
+@@ -1875,10 +1876,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * clear the buffer dirty bit at latest at the moment when the
+ * transaction marking the buffer as freed in the filesystem
+ * structures is committed because from that moment on the
+- * buffer can be reallocated and used by a different page.
++ * block can be reallocated and used by a different page.
+ * Since the block hasn't been freed yet but the inode has
+ * already been added to orphan list, it is safe for us to add
+ * the buffer to BJ_Forget list of the newest transaction.
++ *
++ * Also we have to clear buffer_mapped flag of a truncated buffer
++ * because the buffer_head may be attached to the page straddling
++ * i_size (can happen only when blocksize < pagesize) and thus the
++ * buffer_head can be reused when the file is extended again. So we end
++ * up keeping around invalidated buffers attached to transactions'
++ * BJ_Forget list just to stop checkpointing code from cleaning up
++ * the transaction this buffer was modified in.
+ */
+ transaction = jh->b_transaction;
+ if (transaction == NULL) {
+@@ -1905,13 +1914,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * committed, the buffer won't be needed any
+ * longer. */
+ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_running_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* There is no currently-running transaction. So the
+ * orphan record which we wrote for this file must have
+@@ -1919,13 +1924,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * the committing transaction, if it exists. */
+ if (journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "give to committing trans");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_committing_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* The orphan record's transaction has
+ * committed. We can cleanse this buffer */
+@@ -1946,10 +1947,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+ /*
+ * The buffer is committing, we simply cannot touch
+- * it. So we just set j_next_transaction to the
+- * running transaction (if there is one) and mark
+- * buffer as freed so that commit code knows it should
+- * clear dirty bits when it is done with the buffer.
++ * it. If the page is straddling i_size we have to wait
++ * for commit and try again.
++ */
++ if (partial_page) {
++ tid_t tid = journal->j_committing_transaction->t_tid;
++
++ journal_put_journal_head(jh);
++ spin_unlock(&journal->j_list_lock);
++ jbd_unlock_bh_state(bh);
++ spin_unlock(&journal->j_state_lock);
++ log_wait_commit(journal, tid);
++ goto retry;
++ }
++ /*
++ * OK, buffer won't be reachable after truncate. We just set
++ * j_next_transaction to the running transaction (if there is
++ * one) and mark buffer as freed so that commit code knows it
++ * should clear dirty bits when it is done with the buffer.
+ */
+ set_buffer_freed(bh);
+ if (journal->j_running_transaction && buffer_jbddirty(bh))
+@@ -1972,6 +1987,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+
+ zap_buffer:
++ /*
++ * This is tricky. Although the buffer is truncated, it may be reused
++ * if blocksize < pagesize and it is attached to the page straddling
++ * EOF. Since the buffer might have been added to BJ_Forget list of the
++ * running transaction, journal_get_write_access() won't clear
++ * b_modified and credit accounting gets confused. So clear b_modified
++ * here. */
++ jh->b_modified = 0;
+ journal_put_journal_head(jh);
+ zap_buffer_no_jh:
+ spin_unlock(&journal->j_list_lock);
+@@ -2020,7 +2043,8 @@ void journal_invalidatepage(journal_t *journal,
+ if (offset <= curr_off) {
+ /* This block is wholly outside the truncation point */
+ lock_buffer(bh);
+- may_free &= journal_unmap_buffer(journal, bh);
++ may_free &= journal_unmap_buffer(journal, bh,
++ offset > 0);
+ unlock_buffer(bh);
+ }
+ curr_off = next_off;
+diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
+index 36057ce..6e2a2d5 100644
+--- a/fs/lockd/clntxdr.c
++++ b/fs/lockd/clntxdr.c
+@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
+ {
+ __be32 *p;
+
+- BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
++ WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ p = xdr_reserve_space(xdr, 4);
+ *p = stat;
+ }
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index df753a1..23d7451 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -40,7 +40,6 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
+- char *nodename;
+ };
+
+ struct nsm_res {
+@@ -94,7 +93,6 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
+- .nodename = utsname()->nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+@@ -431,7 +429,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, argp->nodename);
++ encode_nsm_string(xdr, utsname()->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index d27aab1..d413af3 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+
+ /* Obtain file pointer. Not used by FREE_ALL call. */
+ if (filp != NULL) {
+- if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
++ error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
++ if (error != 0)
+ goto no_locks;
+ *filp = file;
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 4cfe260..d225b51 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3673,6 +3673,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+
+ nfsd4_close_open_stateid(stp);
++ release_last_closed_stateid(oo);
+ oo->oo_last_closed_stid = stp;
+
+ /* place unused nfs4_stateowners on so_close_lru list to be
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index 0855e6f..4c9a859 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -24,11 +24,14 @@
+
+ static cputime64_t get_idle_time(int cpu)
+ {
+- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
++ u64 idle_time = -1ULL;
+ cputime64_t idle;
+
++ if (cpu_online(cpu))
++ idle_time = get_cpu_idle_time_us(cpu, NULL);
++
+ if (idle_time == -1ULL) {
+- /* !NO_HZ so we can rely on cpustat.idle */
++ /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
+ idle = kstat_cpu(cpu).cpustat.idle;
+ idle = cputime64_add(idle, arch_idle_time(cpu));
+ } else
+@@ -39,11 +42,14 @@ static cputime64_t get_idle_time(int cpu)
+
+ static cputime64_t get_iowait_time(int cpu)
+ {
+- u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
++ u64 iowait_time = -1ULL;
+ cputime64_t iowait;
+
++ if (cpu_online(cpu))
++ iowait_time = get_cpu_iowait_time_us(cpu, NULL);
++
+ if (iowait_time == -1ULL)
+- /* !NO_HZ so we can rely on cpustat.iowait */
++ /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
+ iowait = kstat_cpu(cpu).cpustat.iowait;
+ else
+ iowait = usecs_to_cputime64(iowait_time);
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 950f13a..5809abb 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ reiserfs_warning(sb, "reiserfs-13077",
+ "nfsd/reiserfs, fhtype=%d, len=%d - odd",
+ fh_type, fh_len);
+- fh_type = 5;
++ fh_type = fh_len;
+ }
++ if (fh_len < 2)
++ return NULL;
+
+ return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
+ (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
+@@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
++ if (fh_type > fh_len)
++ fh_type = fh_len;
+ if (fh_type < 4)
+ return NULL;
+
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 7fdf6a7..fabbb81 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -430,20 +430,18 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+ /**
+ * sysfs_pathname - return full path to sysfs dirent
+ * @sd: sysfs_dirent whose path we want
+- * @path: caller allocated buffer
++ * @path: caller allocated buffer of size PATH_MAX
+ *
+ * Gives the name "/" to the sysfs_root entry; any path returned
+ * is relative to wherever sysfs is mounted.
+- *
+- * XXX: does no error checking on @path size
+ */
+ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
+ {
+ if (sd->s_parent) {
+ sysfs_pathname(sd->s_parent, path);
+- strcat(path, "/");
++ strlcat(path, "/", PATH_MAX);
+ }
+- strcat(path, sd->s_name);
++ strlcat(path, sd->s_name, PATH_MAX);
+ return path;
+ }
+
+@@ -476,9 +474,11 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+ char *path = kzalloc(PATH_MAX, GFP_KERNEL);
+ WARN(1, KERN_WARNING
+ "sysfs: cannot create duplicate filename '%s'\n",
+- (path == NULL) ? sd->s_name :
+- strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"),
+- sd->s_name));
++ (path == NULL) ? sd->s_name
++ : (sysfs_pathname(acxt->parent_sd, path),
++ strlcat(path, "/", PATH_MAX),
++ strlcat(path, sd->s_name, PATH_MAX),
++ path));
+ kfree(path);
+ }
+
+diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
+index 558910f..5703fb8 100644
+--- a/fs/xfs/xfs_export.c
++++ b/fs/xfs/xfs_export.c
+@@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
+ struct inode *inode = NULL;
+
++ if (fh_len < xfs_fileid_length(fileid_type))
++ return NULL;
++
+ switch (fileid_type) {
+ case FILEID_INO32_GEN_PARENT:
+ inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 12d5543..c944c4f 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -97,6 +97,8 @@ static inline int is_vlan_dev(struct net_device *dev)
+ }
+
+ #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
++#define vlan_tx_nonzero_tag_present(__skb) \
++ (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
+ #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+
+ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+@@ -106,7 +108,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+
+-extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
++extern bool vlan_do_receive(struct sk_buff **skb);
+ extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+
+ #else
+@@ -128,10 +130,8 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
+ return 0;
+ }
+
+-static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
++static inline bool vlan_do_receive(struct sk_buff **skb)
+ {
+- if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
+- (*skb)->pkt_type = PACKET_OTHERHOST;
+ return false;
+ }
+
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 904131b..b25b09b 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -215,9 +215,6 @@ typedef enum {
+ #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
+ && (chip->page_shift > 9))
+
+-/* Mask to zero out the chip options, which come from the id table */
+-#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
+-
+ /* Non chip related options */
+ /* This option skips the bbt scan during initialization. */
+ #define NAND_SKIP_BBTSCAN 0x00010000
+diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
+index c0405ac..e3a9978 100644
+--- a/include/linux/netfilter/xt_set.h
++++ b/include/linux/netfilter/xt_set.h
+@@ -58,8 +58,8 @@ struct xt_set_info_target_v1 {
+ struct xt_set_info_target_v2 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+- u32 flags;
+- u32 timeout;
++ __u32 flags;
++ __u32 timeout;
+ };
+
+ #endif /*_XT_SET_H*/
+diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
+index a88fb69..ea6f8a4 100644
+--- a/include/net/netfilter/nf_conntrack_ecache.h
++++ b/include/net/netfilter/nf_conntrack_ecache.h
+@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
+ u16 ctmask; /* bitmask of ct events to be delivered */
+ u16 expmask; /* bitmask of expect events to be delivered */
+ u32 pid; /* netlink pid of destroyer */
++ struct timer_list timeout;
+ };
+
+ static inline struct nf_conntrack_ecache *
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index cdc0354..6337535 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1803,9 +1803,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
+ * trading it for newcg is protected by cgroup_mutex, we're safe to drop
+ * it here; it will be freed under RCU.
+ */
+- put_css_set(oldcg);
+-
+ set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
++ put_css_set(oldcg);
+ return 0;
+ }
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 6969ef0..6c8fa34 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2659,6 +2659,10 @@ static int check_module_license_and_versions(struct module *mod)
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+
++ /* lve claims to be GPL but upstream won't provide source */
++ if (strcmp(mod->name, "lve") == 0)
++ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
++
+ #ifdef CONFIG_MODVERSIONS
+ if ((mod->num_syms && !mod->crcs)
+ || (mod->num_gpl_syms && !mod->gpl_crcs)
+diff --git a/kernel/sys.c b/kernel/sys.c
+index c504302..d7c4ab0 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1171,15 +1171,16 @@ DECLARE_RWSEM(uts_sem);
+ * Work around broken programs that cannot handle "Linux 3.0".
+ * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+ */
+-static int override_release(char __user *release, int len)
++static int override_release(char __user *release, size_t len)
+ {
+ int ret = 0;
+- char buf[65];
+
+ if (current->personality & UNAME26) {
+- char *rest = UTS_RELEASE;
++ const char *rest = UTS_RELEASE;
++ char buf[65] = { 0 };
+ int ndots = 0;
+ unsigned v;
++ size_t copy;
+
+ while (*rest) {
+ if (*rest == '.' && ++ndots >= 3)
+@@ -1189,8 +1190,9 @@ static int override_release(char __user *release, int len)
+ rest++;
+ }
+ v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+- snprintf(buf, len, "2.6.%u%s", v, rest);
+- ret = copy_to_user(release, buf, len);
++ copy = min(sizeof(buf), max_t(size_t, 1, len));
++ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
++ ret = copy_to_user(release, buf, copy + 1);
+ }
+ return ret;
+ }
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 5ee1ac0..cb7f33e 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -992,7 +992,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+ }
+
+ /* Accumulate raw time */
+- raw_nsecs = timekeeper.raw_interval << shift;
++ raw_nsecs = (u64)timekeeper.raw_interval << shift;
+ raw_nsecs += raw_time.tv_nsec;
+ if (raw_nsecs >= NSEC_PER_SEC) {
+ u64 raw_secs = raw_nsecs;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 9c3c62b..c219db6 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
+ #define TVR_SIZE (1 << TVR_BITS)
+ #define TVN_MASK (TVN_SIZE - 1)
+ #define TVR_MASK (TVR_SIZE - 1)
++#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+
+ struct tvec {
+ struct list_head vec[TVN_SIZE];
+@@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+ vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+ } else {
+ int i;
+- /* If the timeout is larger than 0xffffffff on 64-bit
+- * architectures then we use the maximum timeout:
++ /* If the timeout is larger than MAX_TVAL (on 64-bit
++ * architectures or with CONFIG_BASE_SMALL=1) then we
++ * use the maximum timeout.
+ */
+- if (idx > 0xffffffffUL) {
+- idx = 0xffffffffUL;
++ if (idx > MAX_TVAL) {
++ idx = MAX_TVAL;
+ expires = idx + base->timer_jiffies;
+ }
+ i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index f352cc4..716f947 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -176,7 +176,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
+ struct gen_pool_chunk *chunk;
+ int nbits = size >> pool->min_alloc_order;
+ int nbytes = sizeof(struct gen_pool_chunk) +
+- (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
++ BITS_TO_LONGS(nbits) * sizeof(long);
+
+ chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
+ if (unlikely(chunk == NULL))
+diff --git a/mm/rmap.c b/mm/rmap.c
+index a4fd368..8685697 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -56,6 +56,7 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/hugetlb.h>
++#include <linux/backing-dev.h>
+
+ #include <asm/tlbflush.h>
+
+@@ -935,11 +936,8 @@ int page_mkclean(struct page *page)
+
+ if (page_mapped(page)) {
+ struct address_space *mapping = page_mapping(page);
+- if (mapping) {
++ if (mapping)
+ ret = page_mkclean_file(mapping, page);
+- if (page_test_and_clear_dirty(page_to_pfn(page), 1))
+- ret = 1;
+- }
+ }
+
+ return ret;
+@@ -1120,6 +1118,8 @@ void page_add_file_rmap(struct page *page)
+ */
+ void page_remove_rmap(struct page *page)
+ {
++ struct address_space *mapping = page_mapping(page);
++
+ /* page still mapped by someone else? */
+ if (!atomic_add_negative(-1, &page->_mapcount))
+ return;
+@@ -1130,8 +1130,19 @@ void page_remove_rmap(struct page *page)
+ * this if the page is anon, so about to be freed; but perhaps
+ * not if it's in swapcache - there might be another pte slot
+ * containing the swap entry, but page not yet written to swap.
++ *
++ * And we can skip it on file pages, so long as the filesystem
++ * participates in dirty tracking; but need to catch shm and tmpfs
++ * and ramfs pages which have been modified since creation by read
++ * fault.
++ *
++ * Note that mapping must be decided above, before decrementing
++ * mapcount (which luckily provides a barrier): once page is unmapped,
++ * it could be truncated and page->mapping reset to NULL at any moment.
++ * Note also that we are relying on page_mapping(page) to set mapping
++ * to &swapper_space when PageSwapCache(page).
+ */
+- if ((!PageAnon(page) || PageSwapCache(page)) &&
++ if (mapping && !mapping_cap_account_dirty(mapping) &&
+ page_test_and_clear_dirty(page_to_pfn(page), 1))
+ set_page_dirty(page);
+ /*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 7a82174..126ca35 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1962,12 +1962,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ {
+ struct inode *inode;
+ struct dentry *dentry = NULL;
+- u64 inum = fid->raw[2];
+- inum = (inum << 32) | fid->raw[1];
++ u64 inum;
+
+ if (fh_len < 3)
+ return NULL;
+
++ inum = fid->raw[2];
++ inum = (inum << 32) | fid->raw[1];
++
+ inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ shmem_match, fid->raw);
+ if (inode) {
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 9ddbd4e..e860a4f 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -5,7 +5,7 @@
+ #include <linux/export.h>
+ #include "vlan.h"
+
+-bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
++bool vlan_do_receive(struct sk_buff **skbp)
+ {
+ struct sk_buff *skb = *skbp;
+ u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+@@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
+ struct vlan_pcpu_stats *rx_stats;
+
+ vlan_dev = vlan_find_dev(skb->dev, vlan_id);
+- if (!vlan_dev) {
+- /* Only the last call to vlan_do_receive() should change
+- * pkt_type to PACKET_OTHERHOST
+- */
+- if (vlan_id && last_handler)
+- skb->pkt_type = PACKET_OTHERHOST;
++ if (!vlan_dev)
+ return false;
+- }
+
+ skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index c27b4e3..1849ee0 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -30,6 +30,8 @@
+
+ #define SMP_TIMEOUT 30000 /* 30 seconds */
+
++#define AUTH_REQ_MASK 0x07
++
+ static inline void swap128(u8 src[16], u8 dst[16])
+ {
+ int i;
+@@ -206,7 +208,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
+ req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ req->init_key_dist = dist_keys;
+ req->resp_key_dist = dist_keys;
+- req->auth_req = authreq;
++ req->auth_req = (authreq & AUTH_REQ_MASK);
+ return;
+ }
+
+@@ -215,7 +217,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
+ rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ rsp->init_key_dist = req->init_key_dist & dist_keys;
+ rsp->resp_key_dist = req->resp_key_dist & dist_keys;
+- rsp->auth_req = authreq;
++ rsp->auth_req = (authreq & AUTH_REQ_MASK);
+ }
+
+ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index abe1147..f500a69 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3278,18 +3278,18 @@ another_round:
+ ncls:
+ #endif
+
+- rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (vlan_tx_tag_present(skb)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+- if (vlan_do_receive(&skb, !rx_handler))
++ if (vlan_do_receive(&skb))
+ goto another_round;
+ else if (unlikely(!skb))
+ goto out;
+ }
+
++ rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (rx_handler) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+@@ -3309,6 +3309,9 @@ ncls:
+ }
+ }
+
++ if (vlan_tx_nonzero_tag_present(skb))
++ skb->pkt_type = PACKET_OTHERHOST;
++
+ /* deliver only exact match when indicated */
+ null_or_dev = deliver_exact ? skb->dev : NULL;
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 7aafaed..5b9709f 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1254,8 +1254,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+ if (!dst)
+ goto discard;
+
+- __skb_pull(skb, skb_network_offset(skb));
+-
+ if (!neigh_event_send(neigh, skb)) {
+ int err;
+ struct net_device *dev = neigh->dev;
+@@ -1265,6 +1263,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+ neigh_hh_init(neigh, dst);
+
+ do {
++ __skb_pull(skb, skb_network_offset(skb));
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+@@ -1295,9 +1294,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
+ unsigned int seq;
+ int err;
+
+- __skb_pull(skb, skb_network_offset(skb));
+-
+ do {
++ __skb_pull(skb, skb_network_offset(skb));
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index df878de..7bc9991 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2935,7 +2935,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
+ pkt_dev->pkt_overhead;
+
+- if (datalen < sizeof(struct pktgen_hdr)) {
++ if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
+ datalen = sizeof(struct pktgen_hdr);
+ if (net_ratelimit())
+ pr_info("increased datalen to %d\n", datalen);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index de69cec..58c09a0 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -651,10 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+ arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
+ /* When socket is gone, all binding information is lost.
+- * routing might fail in this case. using iif for oif to
+- * make sure we can deliver it
++ * routing might fail in this case. No choice here, if we choose to force
++ * input interface, we will misroute in case of asymmetric route.
+ */
+- arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
++ if (sk)
++ arg.bound_dev_if = sk->sk_bound_dev_if;
+
+ net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 4a56574..ccab3c8 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1048,7 +1048,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+ __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
+
+ fl6.flowi6_proto = IPPROTO_TCP;
+- fl6.flowi6_oif = inet6_iif(skb);
++ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
++ fl6.flowi6_oif = inet6_iif(skb);
+ fl6.fl6_dport = t1->dest;
+ fl6.fl6_sport = t1->source;
+ security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index 28a39bb..a582504 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
+ if (status->flag & RX_FLAG_MMIC_ERROR)
+ goto mic_fail;
+
+- if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
++ if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
+ goto update_iv;
+
+ return RX_CONTINUE;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 1d15193..7489bd3 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -247,12 +247,15 @@ static void death_by_event(unsigned long ul_conntrack)
+ {
+ struct nf_conn *ct = (void *)ul_conntrack;
+ struct net *net = nf_ct_net(ct);
++ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
++
++ BUG_ON(ecache == NULL);
+
+ if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
+ /* bad luck, let's retry again */
+- ct->timeout.expires = jiffies +
++ ecache->timeout.expires = jiffies +
+ (random32() % net->ct.sysctl_events_retry_timeout);
+- add_timer(&ct->timeout);
++ add_timer(&ecache->timeout);
+ return;
+ }
+ /* we've got the event delivered, now it's dying */
+@@ -266,6 +269,9 @@ static void death_by_event(unsigned long ul_conntrack)
+ void nf_ct_insert_dying_list(struct nf_conn *ct)
+ {
+ struct net *net = nf_ct_net(ct);
++ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
++
++ BUG_ON(ecache == NULL);
+
+ /* add this conntrack to the dying list */
+ spin_lock_bh(&nf_conntrack_lock);
+@@ -273,10 +279,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
+ &net->ct.dying);
+ spin_unlock_bh(&nf_conntrack_lock);
+ /* set a new timer to retry event delivery */
+- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
+- ct->timeout.expires = jiffies +
++ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
++ ecache->timeout.expires = jiffies +
+ (random32() % net->ct.sysctl_events_retry_timeout);
+- add_timer(&ct->timeout);
++ add_timer(&ecache->timeout);
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
+
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 96531d4..88eace5 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
+ rds_stats_inc(s_send_pong);
+
+ if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+- rds_send_xmit(conn);
++ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+ rds_message_put(rm);
+ return 0;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 4530a91..237a2ee 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1404,11 +1404,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos,
+ struct cache_detail *cd)
+ {
+- char tbuf[20];
++ char tbuf[22];
+ unsigned long p = *ppos;
+ size_t len;
+
+- sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
++ snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
+ len = strlen(tbuf);
+ if (p >= len)
+ return 0;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 10a385b..65fe23b 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -254,7 +254,6 @@ struct sock_xprt {
+ void (*old_data_ready)(struct sock *, int);
+ void (*old_state_change)(struct sock *);
+ void (*old_write_space)(struct sock *);
+- void (*old_error_report)(struct sock *);
+ };
+
+ /*
+@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
+ case -ECONNRESET:
+- case -EPIPE:
+ xs_tcp_shutdown(xprt);
+ case -ECONNREFUSED:
+ case -ENOTCONN:
++ case -EPIPE:
+ clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ }
+
+@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
+ transport->old_data_ready = sk->sk_data_ready;
+ transport->old_state_change = sk->sk_state_change;
+ transport->old_write_space = sk->sk_write_space;
+- transport->old_error_report = sk->sk_error_report;
+ }
+
+ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
+@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
+ sk->sk_data_ready = transport->old_data_ready;
+ sk->sk_state_change = transport->old_state_change;
+ sk->sk_write_space = transport->old_write_space;
+- sk->sk_error_report = transport->old_error_report;
+ }
+
+ static void xs_reset_transport(struct sock_xprt *transport)
+@@ -1465,7 +1462,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ xprt_clear_connecting(xprt);
+ }
+
+-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
++static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+@@ -1473,6 +1470,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
++}
++
++static void xs_sock_mark_closed(struct rpc_xprt *xprt)
++{
++ xs_sock_reset_connection_flags(xprt);
+ /* Mark transport as closed and wake up all pending tasks */
+ xprt_disconnect_done(xprt);
+ }
+@@ -1528,6 +1530,7 @@ static void xs_tcp_state_change(struct sock *sk)
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+ xprt->connect_cookie++;
++ clear_bit(XPRT_CONNECTED, &xprt->state);
+ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+@@ -1552,25 +1555,6 @@ static void xs_tcp_state_change(struct sock *sk)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
+-/**
+- * xs_error_report - callback mainly for catching socket errors
+- * @sk: socket
+- */
+-static void xs_error_report(struct sock *sk)
+-{
+- struct rpc_xprt *xprt;
+-
+- read_lock_bh(&sk->sk_callback_lock);
+- if (!(xprt = xprt_from_sock(sk)))
+- goto out;
+- dprintk("RPC: %s client %p...\n"
+- "RPC: error %d\n",
+- __func__, xprt, sk->sk_err);
+- xprt_wake_pending_tasks(xprt, -EAGAIN);
+-out:
+- read_unlock_bh(&sk->sk_callback_lock);
+-}
+-
+ static void xs_write_space(struct sock *sk)
+ {
+ struct socket *sock;
+@@ -1870,7 +1854,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
+ sk->sk_user_data = xprt;
+ sk->sk_data_ready = xs_local_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+- sk->sk_error_report = xs_error_report;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ xprt_clear_connected(xprt);
+@@ -1959,7 +1942,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ sk->sk_user_data = xprt;
+ sk->sk_data_ready = xs_udp_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+- sk->sk_error_report = xs_error_report;
+ sk->sk_no_check = UDP_CSUM_NORCV;
+ sk->sk_allocation = GFP_ATOMIC;
+
+@@ -2027,10 +2009,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
+ any.sa_family = AF_UNSPEC;
+ result = kernel_connect(transport->sock, &any, sizeof(any), 0);
+ if (!result)
+- xs_sock_mark_closed(&transport->xprt);
+- else
+- dprintk("RPC: AF_UNSPEC connect return code %d\n",
+- result);
++ xs_sock_reset_connection_flags(&transport->xprt);
++ dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
+ }
+
+ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
+@@ -2075,7 +2055,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ sk->sk_data_ready = xs_tcp_data_ready;
+ sk->sk_state_change = xs_tcp_state_change;
+ sk->sk_write_space = xs_tcp_write_space;
+- sk->sk_error_report = xs_error_report;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ /* socket options */
+@@ -2488,6 +2467,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
+ static struct rpc_xprt_ops bc_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xprt_release_xprt,
++ .alloc_slot = xprt_alloc_slot,
+ .buf_alloc = bc_malloc,
+ .buf_free = bc_free,
+ .send_request = bc_send_request,
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index fac51ee..1e7cfba 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
+ tmp.index = ac97->num;
+ kctl = snd_ctl_new1(&tmp, ac97);
+ }
++ if (!kctl)
++ return -ENOMEM;
+ if (reg >= AC97_PHONE && reg <= AC97_PCM)
+ set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
+ else
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index 6a3e567..d37b946 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ .ca0108_chip = 1,
+ .spk71 = 1,
+ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
++ /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
++ /* This is MAEM8986, 0202 is MAEM8980 */
++ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
++ .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
++ .id = "EMU1010",
++ .emu10k2_chip = 1,
++ .ca0108_chip = 1,
++ .spk71 = 1,
++ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
+ /* Tested by James@superbug.co.uk 8th July 2005. */
+ /* This is MAEM8810, 0202 is MAEM8820 */
+ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index ec0518e..e449278 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -1404,7 +1404,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+@@ -1949,7 +1949,7 @@ static int patch_cs421x(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 94f0c4a..58c287b 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4463,7 +4463,9 @@ static void apply_fixup(struct hda_codec *codec,
+ struct conexant_spec *spec = codec->spec;
+
+ quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+- if (quirk && table[quirk->value]) {
++ if (!quirk)
++ return;
++ if (table[quirk->value]) {
+ snd_printdd(KERN_INFO "hda_codec: applying pincfg for %s\n",
+ quirk->name);
+ apply_pincfg(codec, table[quirk->value]);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 32c8169..c2c7f90 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -620,6 +620,8 @@ static void alc_line_automute(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+
++ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
++ return;
+ /* check LO jack only when it's different from HP */
+ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+ return;
+@@ -2663,8 +2665,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
+ return "PCM";
+ break;
+ }
+- if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name)))
++ if (ch >= ARRAY_SIZE(channel_name)) {
++ snd_BUG();
+ return "PCM";
++ }
+
+ return channel_name[ch];
+ }
+@@ -5080,6 +5084,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
+index af0f22f..aca6edc 100644
+--- a/usr/gen_init_cpio.c
++++ b/usr/gen_init_cpio.c
+@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
+ int retval;
+ int rc = -1;
+ int namesize;
+- int i;
++ unsigned int i;
+
+ mode |= S_IFREG;
+
+@@ -381,25 +381,28 @@ error:
+
+ static char *cpio_replace_env(char *new_location)
+ {
+- char expanded[PATH_MAX + 1];
+- char env_var[PATH_MAX + 1];
+- char *start;
+- char *end;
+-
+- for (start = NULL; (start = strstr(new_location, "${")); ) {
+- end = strchr(start, '}');
+- if (start < end) {
+- *env_var = *expanded = '\0';
+- strncat(env_var, start + 2, end - start - 2);
+- strncat(expanded, new_location, start - new_location);
+- strncat(expanded, getenv(env_var), PATH_MAX);
+- strncat(expanded, end + 1, PATH_MAX);
+- strncpy(new_location, expanded, PATH_MAX);
+- } else
+- break;
+- }
+-
+- return new_location;
++ char expanded[PATH_MAX + 1];
++ char env_var[PATH_MAX + 1];
++ char *start;
++ char *end;
++
++ for (start = NULL; (start = strstr(new_location, "${")); ) {
++ end = strchr(start, '}');
++ if (start < end) {
++ *env_var = *expanded = '\0';
++ strncat(env_var, start + 2, end - start - 2);
++ strncat(expanded, new_location, start - new_location);
++ strncat(expanded, getenv(env_var),
++ PATH_MAX - strlen(expanded));
++ strncat(expanded, end + 1,
++ PATH_MAX - strlen(expanded));
++ strncpy(new_location, expanded, PATH_MAX);
++ new_location[PATH_MAX] = 0;
++ } else
++ break;
++ }
++
++ return new_location;
+ }
+
+
diff --git a/3.2.54/1033_linux-3.2.34.patch b/3.2.54/1033_linux-3.2.34.patch
new file mode 100644
index 0000000..d647b38
--- /dev/null
+++ b/3.2.54/1033_linux-3.2.34.patch
@@ -0,0 +1,3678 @@
+diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
+index 3d84912..47c4ec2 100644
+--- a/Documentation/feature-removal-schedule.txt
++++ b/Documentation/feature-removal-schedule.txt
+@@ -6,14 +6,6 @@ be removed from this file.
+
+ ---------------------------
+
+-What: x86 floppy disable_hlt
+-When: 2012
+-Why: ancient workaround of dubious utility clutters the
+- code used by everybody else.
+-Who: Len Brown <len.brown@intel.com>
+-
+----------------------------
+-
+ What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
+ When: 2012
+ Why: This optional sub-feature of APM is of dubious reliability,
+diff --git a/Makefile b/Makefile
+index 63ca1ea2..14ebacf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
+index 143eebb..929fd91 100644
+--- a/arch/arm/mach-at91/at91rm9200_devices.c
++++ b/arch/arm/mach-at91/at91rm9200_devices.c
+@@ -462,7 +462,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91rm9200_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
+index 2590988..465e026 100644
+--- a/arch/arm/mach-at91/at91sam9260_devices.c
++++ b/arch/arm/mach-at91/at91sam9260_devices.c
+@@ -467,7 +467,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9260_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
+index daf3e66..d6d1e76 100644
+--- a/arch/arm/mach-at91/at91sam9261_devices.c
++++ b/arch/arm/mach-at91/at91sam9261_devices.c
+@@ -284,7 +284,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9261_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
+index 32a7e43..e051376e 100644
+--- a/arch/arm/mach-at91/at91sam9263_devices.c
++++ b/arch/arm/mach-at91/at91sam9263_devices.c
+@@ -540,7 +540,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9263_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
+index 628eb56..4862b23 100644
+--- a/arch/arm/mach-at91/at91sam9rl_devices.c
++++ b/arch/arm/mach-at91/at91sam9rl_devices.c
+@@ -319,7 +319,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9rl_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
+index f5bbe0ef..0d264bf 100644
+--- a/arch/arm/mach-at91/setup.c
++++ b/arch/arm/mach-at91/setup.c
+@@ -163,7 +163,7 @@ static void __init soc_detect(u32 dbgu_base)
+ }
+
+ /* at91sam9g10 */
+- if ((cidr & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
++ if ((socid & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
+ at91_soc_initdata.type = AT91_SOC_SAM9G10;
+ at91_boot_soc = at91sam9261_soc;
+ }
+diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
+index 2d2f01c..d75adff 100644
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -93,10 +93,6 @@ do { \
+ "memory"); \
+ } while (0)
+
+-/*
+- * disable hlt during certain critical i/o operations
+- */
+-#define HAVE_DISABLE_HLT
+ #else
+
+ /* frame pointer must be last for get_wchan */
+@@ -392,9 +388,6 @@ static inline void clflush(volatile void *__p)
+
+ #define nop() asm volatile ("nop")
+
+-void disable_hlt(void);
+-void enable_hlt(void);
+-
+ void cpu_idle_wait(void);
+
+ extern unsigned long arch_align_stack(unsigned long sp);
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index ee5d4fb..59b9b37 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -341,34 +341,10 @@ void (*pm_idle)(void);
+ EXPORT_SYMBOL(pm_idle);
+ #endif
+
+-#ifdef CONFIG_X86_32
+-/*
+- * This halt magic was a workaround for ancient floppy DMA
+- * wreckage. It should be safe to remove.
+- */
+-static int hlt_counter;
+-void disable_hlt(void)
+-{
+- hlt_counter++;
+-}
+-EXPORT_SYMBOL(disable_hlt);
+-
+-void enable_hlt(void)
+-{
+- hlt_counter--;
+-}
+-EXPORT_SYMBOL(enable_hlt);
+-
+-static inline int hlt_use_halt(void)
+-{
+- return (!hlt_counter && boot_cpu_data.hlt_works_ok);
+-}
+-#else
+ static inline int hlt_use_halt(void)
+ {
+ return 1;
+ }
+-#endif
+
+ /*
+ * We use this if we don't have any better
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index ec3d603..2b8b0de 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1203,6 +1203,25 @@ unsigned long xen_read_cr2_direct(void)
+ return percpu_read(xen_vcpu_info.arch.cr2);
+ }
+
++void xen_flush_tlb_all(void)
++{
++ struct mmuext_op *op;
++ struct multicall_space mcs;
++
++ trace_xen_mmu_flush_tlb_all(0);
++
++ preempt_disable();
++
++ mcs = xen_mc_entry(sizeof(*op));
++
++ op = mcs.args;
++ op->cmd = MMUEXT_TLB_FLUSH_ALL;
++ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
++
++ xen_mc_issue(PARAVIRT_LAZY_MMU);
++
++ preempt_enable();
++}
+ static void xen_flush_tlb(void)
+ {
+ struct mmuext_op *op;
+@@ -2366,7 +2385,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ err = 0;
+ out:
+
+- flush_tlb_all();
++ xen_flush_tlb_all();
+
+ return err;
+ }
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 671d4d6..7bdd61b 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work)
+ struct crypto_async_request *req, *backlog;
+
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+- /* Only handle one request at a time to avoid hogging crypto
+- * workqueue. preempt_disable/enable is used to prevent
+- * being preempted by cryptd_enqueue_request() */
++ /*
++ * Only handle one request at a time to avoid hogging crypto workqueue.
++ * preempt_disable/enable is used to prevent being preempted by
++ * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
++ * cryptd_enqueue_request() being accessed from software interrupts.
++ */
++ local_bh_disable();
+ preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+ preempt_enable();
++ local_bh_enable();
+
+ if (!req)
+ return;
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index c864add..7a90d4a 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -1032,37 +1032,6 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+ return 0;
+ }
+
+-static DEFINE_SPINLOCK(floppy_hlt_lock);
+-static int hlt_disabled;
+-static void floppy_disable_hlt(void)
+-{
+- unsigned long flags;
+-
+- WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
+- spin_lock_irqsave(&floppy_hlt_lock, flags);
+- if (!hlt_disabled) {
+- hlt_disabled = 1;
+-#ifdef HAVE_DISABLE_HLT
+- disable_hlt();
+-#endif
+- }
+- spin_unlock_irqrestore(&floppy_hlt_lock, flags);
+-}
+-
+-static void floppy_enable_hlt(void)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(&floppy_hlt_lock, flags);
+- if (hlt_disabled) {
+- hlt_disabled = 0;
+-#ifdef HAVE_DISABLE_HLT
+- enable_hlt();
+-#endif
+- }
+- spin_unlock_irqrestore(&floppy_hlt_lock, flags);
+-}
+-
+ static void setup_DMA(void)
+ {
+ unsigned long f;
+@@ -1107,7 +1076,6 @@ static void setup_DMA(void)
+ fd_enable_dma();
+ release_dma_lock(f);
+ #endif
+- floppy_disable_hlt();
+ }
+
+ static void show_floppy(void);
+@@ -1709,7 +1677,6 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
+ fd_disable_dma();
+ release_dma_lock(f);
+
+- floppy_enable_hlt();
+ do_floppy = NULL;
+ if (fdc >= N_FDC || FDCS->address == -1) {
+ /* we don't even know which FDC is the culprit */
+@@ -1858,8 +1825,6 @@ static void floppy_shutdown(unsigned long data)
+ show_floppy();
+ cancel_activity();
+
+- floppy_enable_hlt();
+-
+ flags = claim_dma_lock();
+ fd_disable_dma();
+ release_dma_lock(flags);
+@@ -4198,6 +4163,7 @@ static int __init floppy_init(void)
+
+ disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
+ if (!disks[dr]->queue) {
++ put_disk(disks[dr]);
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+@@ -4339,7 +4305,7 @@ static int __init floppy_init(void)
+
+ err = platform_device_register(&floppy_device[drive]);
+ if (err)
+- goto out_flush_work;
++ goto out_remove_drives;
+
+ err = device_create_file(&floppy_device[drive].dev,
+ &dev_attr_cmos);
+@@ -4357,6 +4323,15 @@ static int __init floppy_init(void)
+
+ out_unreg_platform_dev:
+ platform_device_unregister(&floppy_device[drive]);
++out_remove_drives:
++ while (drive--) {
++ if ((allowed_drive_mask & (1 << drive)) &&
++ fdc_state[FDC(drive)].version != FDC_NONE) {
++ del_gendisk(disks[drive]);
++ device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
++ platform_device_unregister(&floppy_device[drive]);
++ }
++ }
+ out_flush_work:
+ flush_work_sync(&floppy_work);
+ if (atomic_read(&usage_count))
+@@ -4510,7 +4485,6 @@ static void floppy_release_irq_and_dma(void)
+ #if N_FDC > 1
+ set_dor(1, ~8, 0);
+ #endif
+- floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors * 1024;
+diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
+index c593bd4..edff410 100644
+--- a/drivers/gpio/gpio-timberdale.c
++++ b/drivers/gpio/gpio-timberdale.c
+@@ -116,7 +116,7 @@ static void timbgpio_irq_disable(struct irq_data *d)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+- tgpio->last_ier &= ~(1 << offset);
++ tgpio->last_ier &= ~(1UL << offset);
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+ }
+@@ -128,7 +128,7 @@ static void timbgpio_irq_enable(struct irq_data *d)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+- tgpio->last_ier |= 1 << offset;
++ tgpio->last_ier |= 1UL << offset;
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+ }
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 828bf65..020b103 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -136,8 +136,11 @@ int drm_open(struct inode *inode, struct file *filp)
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+- if (!dev->open_count++)
++ if (!dev->open_count++) {
+ retcode = drm_setup(dev);
++ if (retcode)
++ dev->open_count--;
++ }
+ }
+ if (!retcode) {
+ mutex_lock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 83e820e..bcadf74 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -227,12 +227,12 @@ struct dip_infoframe {
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+- } avi;
++ } __attribute__ ((packed)) avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+- } spd;
++ } __attribute__ ((packed)) spd;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+ } __attribute__((packed));
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+index cdf17d4..478b51f 100644
+--- a/drivers/gpu/drm/i915/intel_overlay.c
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -428,9 +428,17 @@ static int intel_overlay_off(struct intel_overlay *overlay)
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ /* turn overlay off */
+- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+- OUT_RING(flip_addr);
+- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++ if (IS_I830(dev)) {
++ /* Workaround: Don't disable the overlay fully, since otherwise
++ * it dies on the next OVERLAY_ON cmd. */
++ OUT_RING(MI_NOOP);
++ OUT_RING(MI_NOOP);
++ OUT_RING(MI_NOOP);
++ } else {
++ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
++ OUT_RING(flip_addr);
++ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++ }
+ ADVANCE_LP_RING();
+
+ return intel_overlay_do_wait_request(overlay, request,
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index bbf247c..3f4afba 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -868,31 +868,38 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+ }
+ #endif
+
+-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
++static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
++ unsigned if_index, uint8_t tx_rate,
++ uint8_t *data, unsigned length)
+ {
+- struct dip_infoframe avi_if = {
+- .type = DIP_TYPE_AVI,
+- .ver = DIP_VERSION_AVI,
+- .len = DIP_LEN_AVI,
+- };
+- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+- uint8_t set_buf_index[2] = { 1, 0 };
+- uint64_t *data = (uint64_t *)&avi_if;
+- unsigned i;
+-
+- intel_dip_infoframe_csum(&avi_if);
++ uint8_t set_buf_index[2] = { if_index, 0 };
++ uint8_t hbuf_size, tmp[8];
++ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+- for (i = 0; i < sizeof(avi_if); i += 8) {
++ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
++ &hbuf_size, 1))
++ return false;
++
++ /* Buffer size is 0 based, hooray! */
++ hbuf_size++;
++
++ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
++ if_index, length, hbuf_size);
++
++ for (i = 0; i < hbuf_size; i += 8) {
++ memset(tmp, 0, 8);
++ if (i < length)
++ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
++
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+- data, 8))
++ tmp, 8))
+ return false;
+- data++;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+@@ -900,6 +907,28 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+ &tx_rate, 1);
+ }
+
++static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
++{
++ struct dip_infoframe avi_if = {
++ .type = DIP_TYPE_AVI,
++ .ver = DIP_VERSION_AVI,
++ .len = DIP_LEN_AVI,
++ };
++ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
++
++ intel_dip_infoframe_csum(&avi_if);
++
++ /* sdvo spec says that the ecc is handled by the hw, and it looks like
++ * we must not send the ecc field, either. */
++ memcpy(sdvo_data, &avi_if, 3);
++ sdvo_data[3] = avi_if.checksum;
++ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
++
++ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
++ SDVO_HBUF_TX_VSYNC,
++ sdvo_data, sizeof(sdvo_data));
++}
++
+ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+ {
+ struct intel_sdvo_tv_format format;
+diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+index 372f33b..4193c54 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
++++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+@@ -708,6 +708,8 @@ struct intel_sdvo_enhancements_arg {
+ #define SDVO_CMD_SET_AUDIO_STAT 0x91
+ #define SDVO_CMD_GET_AUDIO_STAT 0x92
+ #define SDVO_CMD_SET_HBUF_INDEX 0x93
++ #define SDVO_HBUF_INDEX_ELD 0
++ #define SDVO_HBUF_INDEX_AVI_IF 1
+ #define SDVO_CMD_GET_HBUF_INDEX 0x94
+ #define SDVO_CMD_GET_HBUF_INFO 0x95
+ #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
+index 9791d13..8c084c0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
+@@ -178,8 +178,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+- NV_INFO(dev, "Disabling fbcon acceleration...\n");
+- nouveau_fbcon_save_disable_accel(dev);
++ if (dev->mode_config.num_crtc) {
++ NV_INFO(dev, "Disabling fbcon acceleration...\n");
++ nouveau_fbcon_save_disable_accel(dev);
++ }
+
+ NV_INFO(dev, "Unpinning framebuffer(s)...\n");
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+@@ -246,10 +248,12 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+- console_lock();
+- nouveau_fbcon_set_suspend(dev, 1);
+- console_unlock();
+- nouveau_fbcon_restore_accel(dev);
++ if (dev->mode_config.num_crtc) {
++ console_lock();
++ nouveau_fbcon_set_suspend(dev, 1);
++ console_unlock();
++ nouveau_fbcon_restore_accel(dev);
++ }
+ return 0;
+
+ out_abort:
+@@ -275,7 +279,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+- nouveau_fbcon_save_disable_accel(dev);
++ if (dev->mode_config.num_crtc)
++ nouveau_fbcon_save_disable_accel(dev);
+
+ NV_INFO(dev, "We're back, enabling device...\n");
+ pci_set_power_state(pdev, PCI_D0);
+@@ -376,15 +381,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ nv_crtc->lut.depth = 0;
+ }
+
+- console_lock();
+- nouveau_fbcon_set_suspend(dev, 0);
+- console_unlock();
++ if (dev->mode_config.num_crtc) {
++ console_lock();
++ nouveau_fbcon_set_suspend(dev, 0);
++ console_unlock();
+
+- nouveau_fbcon_zfill_all(dev);
++ nouveau_fbcon_zfill_all(dev);
++ }
+
+ drm_helper_resume_force_mode(dev);
+
+- nouveau_fbcon_restore_accel(dev);
++ if (dev->mode_config.num_crtc)
++ nouveau_fbcon_restore_accel(dev);
+ return 0;
+ }
+
+@@ -466,9 +474,7 @@ static int __init nouveau_init(void)
+ #ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ nouveau_modeset = 0;
+- else
+ #endif
+- nouveau_modeset = 1;
+ }
+
+ if (!nouveau_modeset)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+index d8831ab..01adcfb 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -46,6 +46,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
++ u32 pclass = dev->pdev->class >> 8;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+@@ -481,7 +482,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ }
+
+ /* headless mode */
+- if (nouveau_modeset == 2) {
++ if (nouveau_modeset == 2 ||
++ (nouveau_modeset < 0 && pclass != PCI_CLASS_DISPLAY_VGA)) {
+ engine->display.early_init = nouveau_stub_init;
+ engine->display.late_takedown = nouveau_stub_takedown;
+ engine->display.create = nouveau_stub_init;
+diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
+index e000455..2d6bfd0 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dac.c
++++ b/drivers/gpu/drm/nouveau/nv04_dac.c
+@@ -209,7 +209,7 @@ out:
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
+
+ if (blue == 0x18) {
+- NV_INFO(dev, "Load detected on head A\n");
++ NV_DEBUG(dev, "Load detected on head A\n");
+ return connector_status_connected;
+ }
+
+@@ -323,7 +323,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+
+ if (nv17_dac_sample_load(encoder) &
+ NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+- NV_INFO(dev, "Load detected on output %c\n",
++ NV_DEBUG(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
+ return connector_status_connected;
+ } else {
+@@ -398,7 +398,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++ NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ }
+@@ -447,7 +447,7 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+- NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
+index 12098bf..752440c 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
+@@ -468,7 +468,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++ NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ }
+@@ -511,7 +511,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+- NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ if (was_powersaving && is_powersaving_dpms(mode))
+@@ -556,7 +556,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+- NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dfp_update_backlight(encoder, mode);
+diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
+index 3eb605d..4de1fbe 100644
+--- a/drivers/gpu/drm/nouveau/nv04_tv.c
++++ b/drivers/gpu/drm/nouveau/nv04_tv.c
+@@ -69,7 +69,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ uint8_t crtc1A;
+
+- NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
+@@ -162,7 +162,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++ NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
+ '@' + ffs(nv_encoder->dcb->or));
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index b61f490..ca94e23 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1164,7 +1164,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+- if (save->crtc_enabled) {
++ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 3ad3cc6..8165953 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -650,6 +650,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+ WREG32(RADEON_DAC_CNTL, tmp);
+
++ tmp = dac_macro_cntl;
+ tmp &= ~(RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+index 3fa884d..27151f7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+@@ -306,7 +306,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(old_mem_type != TTM_PL_VRAM &&
+- old_mem_type != VMW_PL_FLAG_GMR);
++ old_mem_type != VMW_PL_GMR);
+
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ if (pin)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 033fc96..b639536 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1048,6 +1048,11 @@ static void vmw_pm_complete(struct device *kdev)
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
++ mutex_lock(&dev_priv->hw_mutex);
++ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
++ (void) vmw_read(dev_priv, SVGA_REG_ID);
++ mutex_unlock(&dev_priv->hw_mutex);
++
+ /**
+ * Reclaim 3d reference held by fbdev and potentially
+ * start fifo.
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index e5c699b..3899989 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -29,22 +29,30 @@
+ #define MS_RDESC 0x08
+ #define MS_NOGET 0x10
+ #define MS_DUPLICATE_USAGES 0x20
++#define MS_RDESC_3K 0x40
+
+-/*
+- * Microsoft Wireless Desktop Receiver (Model 1028) has
+- * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
+- */
+ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
++ /*
++ * Microsoft Wireless Desktop Receiver (Model 1028) has
++ * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
++ */
+ if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
+ rdesc[559] == 0x29) {
+ hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
+ rdesc[557] = 0x35;
+ rdesc[559] = 0x45;
+ }
++ /* the same as above (s/usage/physical/) */
++ if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
++ !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
++ &rdesc[94], 4)) {
++ rdesc[94] = 0x35;
++ rdesc[96] = 0x45;
++ }
+ return rdesc;
+ }
+
+@@ -193,7 +201,7 @@ static const struct hid_device_id ms_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
+ .driver_data = MS_PRESENTER },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
+- .driver_data = MS_ERGONOMY },
++ .driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
+ .driver_data = MS_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index ceaec92..b6a3ce3 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -2015,6 +2015,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
+ mutex_init(&data->lock);
+ mutex_init(&data->update_lock);
+ data->name = w83627ehf_device_names[sio_data->kind];
++ data->bank = 0xff; /* Force initial bank selection */
+ platform_set_drvdata(pdev, data);
+
+ /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
+diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
+index 29d5ed4..80d4610 100644
+--- a/drivers/input/touchscreen/tsc40.c
++++ b/drivers/input/touchscreen/tsc40.c
+@@ -107,7 +107,6 @@ static int tsc_connect(struct serio *serio, struct serio_driver *drv)
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+ input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0);
+ input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0);
+- input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0);
+
+ serio_set_drvdata(serio, ptsc);
+
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 11ddd838..69fc888 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -3060,8 +3060,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
+
+ /* Reading this mask interrupts as side effect */
+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+- if (status == 0 || status == ~0)
++ if (status == 0 || status == ~0) {
++ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+ return IRQ_NONE;
++ }
+
+ prefetch(&hw->st_le[hw->st_idx]);
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 4b43bc5..b8db4cd 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -77,7 +77,7 @@ static const int multicast_filter_limit = 32;
+ #define MAC_ADDR_LEN 6
+
+ #define MAX_READ_REQUEST_SHIFT 12
+-#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
++#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
+ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
+ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
+
+@@ -3521,6 +3521,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_25:
++ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32:
+@@ -6064,6 +6066,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ mc_filter[1] = swab32(data);
+ }
+
++ if (tp->mac_version == RTL_GIGA_MAC_VER_35)
++ mc_filter[1] = mc_filter[0] = 0xffffffff;
++
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index b873b5d..dc53a8f 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1156,6 +1156,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+ usb_anchor_urb(urb, &dev->deferred);
+ /* no use to process more packets */
+ netif_stop_queue(net);
++ usb_put_urb(urb);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+ goto deferred;
+@@ -1297,6 +1298,8 @@ void usbnet_disconnect (struct usb_interface *intf)
+
+ cancel_work_sync(&dev->kevent);
+
++ usb_scuttle_anchored_urbs(&dev->deferred);
++
+ if (dev->driver_info->unbind)
+ dev->driver_info->unbind (dev, intf);
+
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index c59c592..18da100 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -288,6 +288,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
+ }
+
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
++ bf->bf_next = NULL;
+ list_del(&bf->list);
+
+ spin_unlock_bh(&sc->tx.txbuflock);
+@@ -369,7 +370,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
+- bool rc_update = true;
++ bool rc_update = true, isba;
+ struct ieee80211_tx_rate rates[4];
+ struct ath_frame_info *fi;
+ int nframes;
+@@ -407,13 +408,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ an = (struct ath_node *)sta->drv_priv;
+ tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+ tid = ATH_AN_2_TID(an, tidno);
++ isba = ts->ts_flags & ATH9K_TX_BA;
+
+ /*
+ * The hardware occasionally sends a tx status for the wrong TID.
+ * In this case, the BA status cannot be considered valid and all
+ * subframes need to be retransmitted
++ *
++ * Only BlockAcks have a TID and therefore normal Acks cannot be
++ * checked
+ */
+- if (tidno != ts->tid)
++ if (isba && tidno != ts->tid)
+ txok = false;
+
+ isaggr = bf_isaggr(bf);
+@@ -1710,6 +1715,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ if (tid)
+ INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+
++ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+ ath_tx_fill_desc(sc, bf, txq, fi->framelen);
+ ath_tx_txqaddbuf(sc, txq, &bf_head, false);
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 1ba079d..fb19447 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -2141,7 +2141,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
+ /*
+ * Check if temperature compensation is supported.
+ */
+- if (tssi_bounds[4] == 0xff)
++ if (tssi_bounds[4] == 0xff || step == 0xff)
+ return 0;
+
+ /*
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index f35cb10..6fa7222 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3523,7 +3523,9 @@ restart:
+ */
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
++ wait_event_interruptible(conn->queues_wq,
++ !iscsit_conn_all_queues_empty(conn) ||
++ ts->status == ISCSI_THREAD_SET_RESET);
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+ signal_pending(current))
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index dae283f..bd8ce01 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -491,6 +491,7 @@ struct iscsi_tmr_req {
+ };
+
+ struct iscsi_conn {
++ wait_queue_head_t queues_wq;
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 2ec5339..eb0c9fe 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -44,6 +44,7 @@ extern spinlock_t sess_idr_lock;
+
+ static int iscsi_login_init_conn(struct iscsi_conn *conn)
+ {
++ init_waitqueue_head(&conn->queues_wq);
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 99f2af3..e612722 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -659,7 +659,7 @@ void iscsit_add_cmd_to_immediate_queue(
+ atomic_set(&conn->check_immediate_queue, 1);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+- wake_up_process(conn->thread_set->tx_thread);
++ wake_up(&conn->queues_wq);
+ }
+
+ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+@@ -733,7 +733,7 @@ void iscsit_add_cmd_to_response_queue(
+ atomic_inc(&cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+- wake_up_process(conn->thread_set->tx_thread);
++ wake_up(&conn->queues_wq);
+ }
+
+ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+@@ -787,6 +787,24 @@ static void iscsit_remove_cmd_from_response_queue(
+ }
+ }
+
++bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
++{
++ bool empty;
++
++ spin_lock_bh(&conn->immed_queue_lock);
++ empty = list_empty(&conn->immed_queue_list);
++ spin_unlock_bh(&conn->immed_queue_lock);
++
++ if (!empty)
++ return empty;
++
++ spin_lock_bh(&conn->response_queue_lock);
++ empty = list_empty(&conn->response_queue_list);
++ spin_unlock_bh(&conn->response_queue_lock);
++
++ return empty;
++}
++
+ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+ {
+ struct iscsi_queue_req *qr, *qr_tmp;
+diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
+index 835bf7d..cfac698 100644
+--- a/drivers/target/iscsi/iscsi_target_util.h
++++ b/drivers/target/iscsi/iscsi_target_util.h
+@@ -28,6 +28,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_
+ extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
++extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
+ extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+ extern void iscsit_release_cmd(struct iscsi_cmd *);
+ extern void iscsit_free_cmd(struct iscsi_cmd *);
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 0b01bfc..013b133 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3205,7 +3205,8 @@ static int __init target_core_init_configfs(void)
+ if (ret < 0)
+ goto out;
+
+- if (core_dev_setup_virtual_lun0() < 0)
++ ret = core_dev_setup_virtual_lun0();
++ if (ret < 0)
+ goto out;
+
+ return 0;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index f8773ae..a0143a0 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -835,20 +835,20 @@ int se_dev_check_shutdown(struct se_device *dev)
+
+ u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+ {
+- u32 tmp, aligned_max_sectors;
++ u32 aligned_max_sectors;
++ u32 alignment;
+ /*
+ * Limit max_sectors to a PAGE_SIZE aligned value for modern
+ * transport_allocate_data_tasks() operation.
+ */
+- tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
+- aligned_max_sectors = (tmp / block_size);
+- if (max_sectors != aligned_max_sectors) {
+- printk(KERN_INFO "Rounding down aligned max_sectors from %u"
+- " to %u\n", max_sectors, aligned_max_sectors);
+- return aligned_max_sectors;
+- }
++ alignment = max(1ul, PAGE_SIZE / block_size);
++ aligned_max_sectors = rounddown(max_sectors, alignment);
++
++ if (max_sectors != aligned_max_sectors)
++ pr_info("Rounding down aligned max_sectors from %u to %u\n",
++ max_sectors, aligned_max_sectors);
+
+- return max_sectors;
++ return aligned_max_sectors;
+ }
+
+ void se_dev_set_default_attribs(
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index d481f80..43a38aa 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -2585,7 +2585,6 @@ error:
+ static void mos7840_disconnect(struct usb_serial *serial)
+ {
+ int i;
+- unsigned long flags;
+ struct moschip_port *mos7840_port;
+ dbg("%s", " disconnect :entering..........");
+
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 625890c..080b186 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -105,6 +105,21 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
+ #endif
+ }
+
++static void gntdev_free_map(struct grant_map *map)
++{
++ if (map == NULL)
++ return;
++
++ if (map->pages)
++ free_xenballooned_pages(map->count, map->pages);
++ kfree(map->pages);
++ kfree(map->grants);
++ kfree(map->map_ops);
++ kfree(map->unmap_ops);
++ kfree(map->kmap_ops);
++ kfree(map);
++}
++
+ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+ {
+ struct grant_map *add;
+@@ -142,12 +157,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+ return add;
+
+ err:
+- kfree(add->pages);
+- kfree(add->grants);
+- kfree(add->map_ops);
+- kfree(add->unmap_ops);
+- kfree(add->kmap_ops);
+- kfree(add);
++ gntdev_free_map(add);
+ return NULL;
+ }
+
+@@ -196,17 +206,9 @@ static void gntdev_put_map(struct grant_map *map)
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ notify_remote_via_evtchn(map->notify.event);
+
+- if (map->pages) {
+- if (!use_ptemod)
+- unmap_grant_pages(map, 0, map->count);
+-
+- free_xenballooned_pages(map->count, map->pages);
+- }
+- kfree(map->pages);
+- kfree(map->grants);
+- kfree(map->map_ops);
+- kfree(map->unmap_ops);
+- kfree(map);
++ if (map->pages && !use_ptemod)
++ unmap_grant_pages(map, 0, map->count);
++ gntdev_free_map(map);
+ }
+
+ /* ------------------------------------------------------------------ */
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 72ddf23..b3522af 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -225,6 +225,13 @@ sid_to_str(struct cifs_sid *sidptr, char *sidstr)
+ }
+
+ static void
++cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
++{
++ memcpy(dst, src, sizeof(*dst));
++ dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
++}
++
++static void
+ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
+ struct cifs_sid_id **psidid, char *typestr)
+ {
+@@ -248,7 +255,7 @@ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
+ }
+ }
+
+- memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
++ cifs_copy_sid(&(*psidid)->sid, sidptr);
+ (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
+ (*psidid)->refcount = 0;
+
+@@ -354,7 +361,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
+ * any fields of the node after a reference is put .
+ */
+ if (test_bit(SID_ID_MAPPED, &psidid->state)) {
+- memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
++ cifs_copy_sid(ssid, &psidid->sid);
+ psidid->time = jiffies; /* update ts for accessing */
+ goto id_sid_out;
+ }
+@@ -370,14 +377,14 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
+ if (IS_ERR(sidkey)) {
+ rc = -EINVAL;
+ cFYI(1, "%s: Can't map and id to a SID", __func__);
++ } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
++ rc = -EIO;
++ cFYI(1, "%s: Downcall contained malformed key "
++ "(datalen=%hu)", __func__, sidkey->datalen);
+ } else {
+ lsid = (struct cifs_sid *)sidkey->payload.data;
+- memcpy(&psidid->sid, lsid,
+- sidkey->datalen < sizeof(struct cifs_sid) ?
+- sidkey->datalen : sizeof(struct cifs_sid));
+- memcpy(ssid, &psidid->sid,
+- sidkey->datalen < sizeof(struct cifs_sid) ?
+- sidkey->datalen : sizeof(struct cifs_sid));
++ cifs_copy_sid(&psidid->sid, lsid);
++ cifs_copy_sid(ssid, &psidid->sid);
+ set_bit(SID_ID_MAPPED, &psidid->state);
+ key_put(sidkey);
+ kfree(psidid->sidstr);
+@@ -396,7 +403,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
+ return rc;
+ }
+ if (test_bit(SID_ID_MAPPED, &psidid->state))
+- memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
++ cifs_copy_sid(ssid, &psidid->sid);
+ else
+ rc = -EINVAL;
+ }
+@@ -674,8 +681,6 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
+ static void copy_sec_desc(const struct cifs_ntsd *pntsd,
+ struct cifs_ntsd *pnntsd, __u32 sidsoffset)
+ {
+- int i;
+-
+ struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+ struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+
+@@ -691,26 +696,14 @@ static void copy_sec_desc(const struct cifs_ntsd *pntsd,
+ owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+ nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
+-
+- nowner_sid_ptr->revision = owner_sid_ptr->revision;
+- nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
+- for (i = 0; i < 6; i++)
+- nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
+- for (i = 0; i < 5; i++)
+- nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
++ cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
+
+ /* copy group sid */
+ group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+ ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
+ sizeof(struct cifs_sid));
+-
+- ngroup_sid_ptr->revision = group_sid_ptr->revision;
+- ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
+- for (i = 0; i < 6; i++)
+- ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
+- for (i = 0; i < 5; i++)
+- ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
++ cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
+
+ return;
+ }
+@@ -1117,8 +1110,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ kfree(nowner_sid_ptr);
+ return rc;
+ }
+- memcpy(owner_sid_ptr, nowner_sid_ptr,
+- sizeof(struct cifs_sid));
++ cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
+ kfree(nowner_sid_ptr);
+ *aclflag = CIFS_ACL_OWNER;
+ }
+@@ -1136,8 +1128,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ kfree(ngroup_sid_ptr);
+ return rc;
+ }
+- memcpy(group_sid_ptr, ngroup_sid_ptr,
+- sizeof(struct cifs_sid));
++ cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
+ kfree(ngroup_sid_ptr);
+ *aclflag = CIFS_ACL_GROUP;
+ }
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index 1cfef9f..94afdfd 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -280,6 +280,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
+ char *fnek_src;
+ char *cipher_key_bytes_src;
+ char *fn_cipher_key_bytes_src;
++ u8 cipher_code;
+
+ *check_ruid = 0;
+
+@@ -421,6 +422,18 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
+ && !fn_cipher_key_bytes_set)
+ mount_crypt_stat->global_default_fn_cipher_key_bytes =
+ mount_crypt_stat->global_default_cipher_key_size;
++
++ cipher_code = ecryptfs_code_for_cipher_string(
++ mount_crypt_stat->global_default_cipher_name,
++ mount_crypt_stat->global_default_cipher_key_size);
++ if (!cipher_code) {
++ ecryptfs_printk(KERN_ERR,
++ "eCryptfs doesn't support cipher: %s",
++ mount_crypt_stat->global_default_cipher_name);
++ rc = -EINVAL;
++ goto out;
++ }
++
+ mutex_lock(&key_tfm_list_mutex);
+ if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
+ NULL)) {
+@@ -506,7 +519,6 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ goto out;
+ }
+
+- s->s_flags = flags;
+ rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
+ if (rc)
+ goto out1;
+@@ -542,6 +554,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ }
+
+ ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
++
++ /**
++ * Set the POSIX ACL flag based on whether they're enabled in the lower
++ * mount. Force a read-only eCryptfs mount if the lower mount is ro.
++ * Allow a ro eCryptfs mount even when the lower mount is rw.
++ */
++ s->s_flags = flags & ~MS_POSIXACL;
++ s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
++
+ s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ s->s_blocksize = path.dentry->d_sb->s_blocksize;
+ s->s_magic = ECRYPTFS_SUPER_MAGIC;
+diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
+index a6e711a..ee02db5 100644
+--- a/fs/nfs/dns_resolve.c
++++ b/fs/nfs/dns_resolve.c
+@@ -213,7 +213,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+ {
+ char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
+ struct nfs_dns_ent key, *item;
+- unsigned long ttl;
++ unsigned int ttl;
+ ssize_t len;
+ int ret = -EINVAL;
+
+@@ -236,7 +236,8 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+ key.namelen = len;
+ memset(&key.h, 0, sizeof(key.h));
+
+- ttl = get_expiry(&buf);
++ if (get_uint(&buf, &ttl) < 0)
++ goto out;
+ if (ttl == 0)
+ goto out;
+ key.h.expiry_time = ttl + seconds_since_boot();
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 68b3f20..c5af878 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -274,8 +274,9 @@ extern void nfs_sb_active(struct super_block *sb);
+ extern void nfs_sb_deactive(struct super_block *sb);
+
+ /* namespace.c */
++#define NFS_PATH_CANONICAL 1
+ extern char *nfs_path(char **p, struct dentry *dentry,
+- char *buffer, ssize_t buflen);
++ char *buffer, ssize_t buflen, unsigned flags);
+ extern struct vfsmount *nfs_d_automount(struct path *path);
+ #ifdef CONFIG_NFS_V4
+ rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
+@@ -364,7 +365,7 @@ static inline char *nfs_devname(struct dentry *dentry,
+ char *buffer, ssize_t buflen)
+ {
+ char *dummy;
+- return nfs_path(&dummy, dentry, buffer, buflen);
++ return nfs_path(&dummy, dentry, buffer, buflen, NFS_PATH_CANONICAL);
+ }
+
+ /*
+diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
+index d4c2d6b..3d93216 100644
+--- a/fs/nfs/mount_clnt.c
++++ b/fs/nfs/mount_clnt.c
+@@ -181,7 +181,7 @@ int nfs_mount(struct nfs_mount_request *info)
+ else
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
+
+- status = rpc_call_sync(mnt_clnt, &msg, 0);
++ status = rpc_call_sync(mnt_clnt, &msg, RPC_TASK_SOFT|RPC_TASK_TIMEOUT);
+ rpc_shutdown_client(mnt_clnt);
+
+ if (status < 0)
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 8102391..a86873e 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -37,6 +37,7 @@ static struct vfsmount *nfs_do_submount(struct dentry *dentry,
+ * @dentry - pointer to dentry
+ * @buffer - result buffer
+ * @buflen - length of buffer
++ * @flags - options (see below)
+ *
+ * Helper function for constructing the server pathname
+ * by arbitrary hashed dentry.
+@@ -44,8 +45,14 @@ static struct vfsmount *nfs_do_submount(struct dentry *dentry,
+ * This is mainly for use in figuring out the path on the
+ * server side when automounting on top of an existing partition
+ * and in generating /proc/mounts and friends.
++ *
++ * Supported flags:
++ * NFS_PATH_CANONICAL: ensure there is exactly one slash after
++ * the original device (export) name
++ * (if unset, the original name is returned verbatim)
+ */
+-char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen)
++char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen,
++ unsigned flags)
+ {
+ char *end;
+ int namelen;
+@@ -78,7 +85,7 @@ rename_retry:
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+- if (*end != '/') {
++ if ((flags & NFS_PATH_CANONICAL) && *end != '/') {
+ if (--buflen < 0) {
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+@@ -95,9 +102,11 @@ rename_retry:
+ return end;
+ }
+ namelen = strlen(base);
+- /* Strip off excess slashes in base string */
+- while (namelen > 0 && base[namelen - 1] == '/')
+- namelen--;
++ if (flags & NFS_PATH_CANONICAL) {
++ /* Strip off excess slashes in base string */
++ while (namelen > 0 && base[namelen - 1] == '/')
++ namelen--;
++ }
+ buflen -= namelen;
+ if (buflen < 0) {
+ spin_unlock(&dentry->d_lock);
+diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
+index bb80c49..96f2b67 100644
+--- a/fs/nfs/nfs4namespace.c
++++ b/fs/nfs/nfs4namespace.c
+@@ -57,7 +57,8 @@ Elong:
+ static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
+ {
+ char *limit;
+- char *path = nfs_path(&limit, dentry, buffer, buflen);
++ char *path = nfs_path(&limit, dentry, buffer, buflen,
++ NFS_PATH_CANONICAL);
+ if (!IS_ERR(path)) {
+ char *colon = strchr(path, ':');
+ if (colon && colon < limit)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 61796a40..864b831 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -303,8 +303,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
+ dprintk("%s ERROR: %d Reset session\n", __func__,
+ errorcode);
+ nfs4_schedule_session_recovery(clp->cl_session);
+- exception->retry = 1;
+- break;
++ goto wait_on_recovery;
+ #endif /* defined(CONFIG_NFS_V4_1) */
+ case -NFS4ERR_FILE_OPEN:
+ if (exception->timeout > HZ) {
+@@ -1464,9 +1463,11 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
+ data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->o_arg.server,
+ &data->o_arg.seq_args,
+- &data->o_res.seq_res, 1, task))
+- return;
+- rpc_call_start(task);
++ &data->o_res.seq_res,
++ 1, task) != 0)
++ nfs_release_seqid(data->o_arg.seqid);
++ else
++ rpc_call_start(task);
+ return;
+ unlock_no_action:
+ rcu_read_unlock();
+@@ -2046,9 +2047,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
+ &calldata->arg.seq_args, &calldata->res.seq_res,
+- 1, task))
+- return;
+- rpc_call_start(task);
++ 1, task) != 0)
++ nfs_release_seqid(calldata->arg.seqid);
++ else
++ rpc_call_start(task);
+ }
+
+ static const struct rpc_call_ops nfs4_close_ops = {
+@@ -4148,6 +4150,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
+ if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+ }
++ nfs_release_seqid(calldata->arg.seqid);
+ }
+
+ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
+@@ -4164,9 +4167,11 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
+ calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(calldata->server,
+ &calldata->arg.seq_args,
+- &calldata->res.seq_res, 1, task))
+- return;
+- rpc_call_start(task);
++ &calldata->res.seq_res,
++ 1, task) != 0)
++ nfs_release_seqid(calldata->arg.seqid);
++ else
++ rpc_call_start(task);
+ }
+
+ static const struct rpc_call_ops nfs4_locku_ops = {
+@@ -4310,7 +4315,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
+ /* Do we need to do an open_to_lock_owner? */
+ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
+ if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
+- return;
++ goto out_release_lock_seqid;
+ data->arg.open_stateid = &state->stateid;
+ data->arg.new_lock_owner = 1;
+ data->res.open_seqid = data->arg.open_seqid;
+@@ -4319,10 +4324,15 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
+ data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->server,
+ &data->arg.seq_args,
+- &data->res.seq_res, 1, task))
++ &data->res.seq_res,
++ 1, task) == 0) {
++ rpc_call_start(task);
+ return;
+- rpc_call_start(task);
+- dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
++ }
++ nfs_release_seqid(data->arg.open_seqid);
++out_release_lock_seqid:
++ nfs_release_seqid(data->arg.lock_seqid);
++ dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
+ }
+
+ static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e42d6f6..8150344 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -768,7 +768,7 @@ static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
+ int err = 0;
+ if (!page)
+ return -ENOMEM;
+- devname = nfs_path(&dummy, mnt->mnt_root, page, PAGE_SIZE);
++ devname = nfs_path(&dummy, mnt->mnt_root, page, PAGE_SIZE, 0);
+ if (IS_ERR(devname))
+ err = PTR_ERR(devname);
+ else
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 5f312ab..a0205fc 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -401,7 +401,7 @@ fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc)
+ int migrated, i, err;
+
+ /* listsize */
+- err = get_int(mesg, &fsloc->locations_count);
++ err = get_uint(mesg, &fsloc->locations_count);
+ if (err)
+ return err;
+ if (fsloc->locations_count > MAX_FS_LOCATIONS)
+@@ -459,7 +459,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
+ return -EINVAL;
+
+ for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
+- err = get_int(mesg, &f->pseudoflavor);
++ err = get_uint(mesg, &f->pseudoflavor);
+ if (err)
+ return err;
+ /*
+@@ -468,7 +468,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
+ * problem at export time instead of when a client fails
+ * to authenticate.
+ */
+- err = get_int(mesg, &f->flags);
++ err = get_uint(mesg, &f->flags);
+ if (err)
+ return err;
+ /* Only some flags are allowed to differ between flavors: */
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index f35794b..a506360 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -21,6 +21,7 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
++ break;
+ case (FSNOTIFY_EVENT_NONE):
+ return true;
+ default:
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 4f5d0ce..86ca506 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3514,7 +3514,7 @@ xlog_do_recovery_pass(
+ * - order is important.
+ */
+ error = xlog_bread_offset(log, 0,
+- bblks - split_bblks, hbp,
++ bblks - split_bblks, dbp,
+ offset + BBTOB(split_bblks));
+ if (error)
+ goto bread_err2;
+diff --git a/include/linux/if_link.h b/include/linux/if_link.h
+index c52d4b5..4b24ff4 100644
+--- a/include/linux/if_link.h
++++ b/include/linux/if_link.h
+@@ -137,6 +137,7 @@ enum {
+ IFLA_AF_SPEC,
+ IFLA_GROUP, /* Group the device belongs to */
+ IFLA_NET_NS_FD,
++ IFLA_EXT_MASK, /* Extended info mask, VFs, etc */
+ __IFLA_MAX
+ };
+
+diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
+index 8e872ea..577592e 100644
+--- a/include/linux/rtnetlink.h
++++ b/include/linux/rtnetlink.h
+@@ -602,6 +602,9 @@ struct tcamsg {
+ #define TCA_ACT_TAB 1 /* attr type must be >=1 */
+ #define TCAA_MAX 1
+
++/* New extended info filters for IFLA_EXT_MASK */
++#define RTEXT_FILTER_VF (1 << 0)
++
+ /* End of information exported to user level */
+
+ #ifdef __KERNEL__
+diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
+index 5efd8ce..f0c6ab5 100644
+--- a/include/linux/sunrpc/cache.h
++++ b/include/linux/sunrpc/cache.h
+@@ -224,6 +224,22 @@ static inline int get_int(char **bpp, int *anint)
+ return 0;
+ }
+
++static inline int get_uint(char **bpp, unsigned int *anint)
++{
++ char buf[50];
++ int len = qword_get(bpp, buf, sizeof(buf));
++
++ if (len < 0)
++ return -EINVAL;
++ if (len == 0)
++ return -ENOENT;
++
++ if (kstrtouint(buf, 0, anint))
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * timestamps kept in the cache are expressed in seconds
+ * since boot. This is the best for measuring differences in
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 95852e3..19d632d 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -2431,6 +2431,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
+ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
+
+ /**
++ * ieee80211_get_mesh_hdrlen - get mesh extension header length
++ * @meshhdr: the mesh extension header, only the flags field
++ * (first byte) will be accessed
++ * Returns the length of the extension header, which is always at
++ * least 6 bytes and at most 18 if address 5 and 6 are present.
++ */
++unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
++
++/**
+ * DOC: Data path helpers
+ *
+ * In addition to generic utilities, cfg80211 also offers
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 678f1ff..3702939 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -6,7 +6,7 @@
+
+ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *);
+ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+-typedef u16 (*rtnl_calcit_func)(struct sk_buff *);
++typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
+
+ extern int __rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func,
+diff --git a/include/sound/core.h b/include/sound/core.h
+index 3be5ab7..222f11e 100644
+--- a/include/sound/core.h
++++ b/include/sound/core.h
+@@ -132,6 +132,7 @@ struct snd_card {
+ int shutdown; /* this card is going down */
+ int free_on_last_close; /* free in context of file_release */
+ wait_queue_head_t shutdown_sleep;
++ atomic_t refcount; /* refcount for disconnection */
+ struct device *dev; /* device assigned to this card */
+ struct device *card_dev; /* cardX object for sysfs */
+
+@@ -189,6 +190,7 @@ struct snd_minor {
+ const struct file_operations *f_ops; /* file operations */
+ void *private_data; /* private data for f_ops->open */
+ struct device *dev; /* device for sysfs */
++ struct snd_card *card_ptr; /* assigned card instance */
+ };
+
+ /* return a device pointer linked to each sound device as a parent */
+@@ -295,6 +297,7 @@ int snd_card_info_done(void);
+ int snd_component_add(struct snd_card *card, const char *component);
+ int snd_card_file_add(struct snd_card *card, struct file *file);
+ int snd_card_file_remove(struct snd_card *card, struct file *file);
++void snd_card_unref(struct snd_card *card);
+
+ #define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
+
+diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
+index 92f1a79..348c4fe 100644
+--- a/include/trace/events/xen.h
++++ b/include/trace/events/xen.h
+@@ -377,6 +377,14 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+
++TRACE_EVENT(xen_mmu_flush_tlb_all,
++ TP_PROTO(int x),
++ TP_ARGS(x),
++ TP_STRUCT__entry(__array(char, x, 0)),
++ TP_fast_assign((void)x),
++ TP_printk("%s", "")
++ );
++
+ TRACE_EVENT(xen_mmu_flush_tlb,
+ TP_PROTO(int x),
+ TP_ARGS(x),
+diff --git a/kernel/module.c b/kernel/module.c
+index 6c8fa34..65362d9 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2193,15 +2193,17 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+
+ src = (void *)info->hdr + symsect->sh_offset;
+ nsrc = symsect->sh_size / sizeof(*src);
+- for (ndst = i = 1; i < nsrc; ++i, ++src)
+- if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
+- unsigned int j = src->st_name;
++ for (ndst = i = 0; i < nsrc; i++) {
++ if (i == 0 ||
++ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++ unsigned int j = src[i].st_name;
+
+ while (!__test_and_set_bit(j, info->strmap)
+ && info->strtab[j])
+ ++j;
+ ++ndst;
+ }
++ }
+
+ /* Append room for core symbols at end of core part. */
+ info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+@@ -2238,14 +2240,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+
+ mod->core_symtab = dst = mod->module_core + info->symoffs;
+ src = mod->symtab;
+- *dst = *src;
+- for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
+- if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
+- continue;
+- dst[ndst] = *src;
+- dst[ndst].st_name = bitmap_weight(info->strmap,
+- dst[ndst].st_name);
+- ++ndst;
++ for (ndst = i = 0; i < mod->num_symtab; i++) {
++ if (i == 0 ||
++ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++ dst[ndst] = src[i];
++ dst[ndst].st_name = bitmap_weight(info->strmap,
++ dst[ndst].st_name);
++ ++ndst;
++ }
+ }
+ mod->core_num_syms = ndst;
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 86eb848..313381c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3015,6 +3015,8 @@ static int kswapd(void *p)
+ &balanced_classzone_idx);
+ }
+ }
++
++ current->reclaim_state = NULL;
+ return 0;
+ }
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 1fb1aec..aa12649 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -642,8 +642,10 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ {
+ BT_DBG("conn %p", conn);
+
++#ifdef CONFIG_BT_L2CAP
+ if (conn->type == LE_LINK)
+ return smp_conn_security(conn, sec_level);
++#endif
+
+ /* For sdp we don't need the link key. */
+ if (sec_level == BT_SECURITY_SDP)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f500a69..480be72 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1633,7 +1633,7 @@ static inline int deliver_skb(struct sk_buff *skb,
+
+ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
+ {
+- if (ptype->af_packet_priv == NULL)
++ if (!ptype->af_packet_priv || !skb->sk)
+ return false;
+
+ if (ptype->id_match)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 0cf604b..5229c7f 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -60,7 +60,6 @@ struct rtnl_link {
+ };
+
+ static DEFINE_MUTEX(rtnl_mutex);
+-static u16 min_ifinfo_dump_size;
+
+ void rtnl_lock(void)
+ {
+@@ -727,10 +726,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
+ }
+
+ /* All VF info */
+-static inline int rtnl_vfinfo_size(const struct net_device *dev)
++static inline int rtnl_vfinfo_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+- if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
+-
++ if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
++ (ext_filter_mask & RTEXT_FILTER_VF)) {
+ int num_vfs = dev_num_vf(dev->dev.parent);
+ size_t size = nla_total_size(sizeof(struct nlattr));
+ size += nla_total_size(num_vfs * sizeof(struct nlattr));
+@@ -769,7 +769,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
+ return port_self_size;
+ }
+
+-static noinline size_t if_nlmsg_size(const struct net_device *dev)
++static noinline size_t if_nlmsg_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+@@ -787,8 +788,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
+ + nla_total_size(4) /* IFLA_MASTER */
+ + nla_total_size(1) /* IFLA_OPERSTATE */
+ + nla_total_size(1) /* IFLA_LINKMODE */
+- + nla_total_size(4) /* IFLA_NUM_VF */
+- + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
++ + nla_total_size(ext_filter_mask
++ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
++ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+@@ -871,7 +873,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+
+ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ int type, u32 pid, u32 seq, u32 change,
+- unsigned int flags)
++ unsigned int flags, u32 ext_filter_mask)
+ {
+ struct ifinfomsg *ifm;
+ struct nlmsghdr *nlh;
+@@ -944,10 +946,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ goto nla_put_failure;
+ copy_rtnl_link_stats64(nla_data(attr), stats);
+
+- if (dev->dev.parent)
++ if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
+ NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+
+- if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
++ if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
++ && (ext_filter_mask & RTEXT_FILTER_VF)) {
+ int i;
+
+ struct nlattr *vfinfo, *vf;
+@@ -1051,6 +1054,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ struct net_device *dev;
+ struct hlist_head *head;
+ struct hlist_node *node;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+@@ -1058,6 +1063,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
++ if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+@@ -1067,7 +1079,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI) <= 0)
++ NLM_F_MULTI,
++ ext_filter_mask) <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+@@ -1103,6 +1116,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_VF_PORTS] = { .type = NLA_NESTED },
+ [IFLA_PORT_SELF] = { .type = NLA_NESTED },
+ [IFLA_AF_SPEC] = { .type = NLA_NESTED },
++ [IFLA_EXT_MASK] = { .type = NLA_U32 },
+ };
+ EXPORT_SYMBOL(ifla_policy);
+
+@@ -1845,6 +1859,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ struct net_device *dev = NULL;
+ struct sk_buff *nskb;
+ int err;
++ u32 ext_filter_mask = 0;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+@@ -1853,6 +1868,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+@@ -1864,12 +1882,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (dev == NULL)
+ return -ENODEV;
+
+- nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
++ nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
+ if (nskb == NULL)
+ return -ENOBUFS;
+
+ err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
+- nlh->nlmsg_seq, 0, 0);
++ nlh->nlmsg_seq, 0, 0, ext_filter_mask);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+@@ -1880,8 +1898,32 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ return err;
+ }
+
+-static u16 rtnl_calcit(struct sk_buff *skb)
++static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ {
++ struct net *net = sock_net(skb->sk);
++ struct net_device *dev;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
++ u16 min_ifinfo_dump_size = 0;
++
++ if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
++ if (!ext_filter_mask)
++ return NLMSG_GOODSIZE;
++ /*
++ * traverse the list of net devices and compute the minimum
++ * buffer size based upon the filter mask.
++ */
++ list_for_each_entry(dev, &net->dev_base_head, dev_list) {
++ min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
++ if_nlmsg_size(dev,
++ ext_filter_mask));
++ }
++
+ return min_ifinfo_dump_size;
+ }
+
+@@ -1916,13 +1958,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+ int err = -ENOBUFS;
+ size_t if_info_size;
+
+- skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
++ skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+
+- min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size);
+-
+- err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
++ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+@@ -1980,7 +2020,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ return -EOPNOTSUPP;
+ calcit = rtnl_get_calcit(family, type);
+ if (calcit)
+- min_dump_alloc = calcit(skb);
++ min_dump_alloc = calcit(skb, nlh);
+
+ __rtnl_unlock();
+ rtnl = net->rtnl;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7397ad8..52edbb8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -481,14 +481,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+ !tp->urg_data ||
+ before(tp->urg_seq, tp->copied_seq) ||
+ !before(tp->urg_seq, tp->rcv_nxt)) {
+- struct sk_buff *skb;
+
+ answ = tp->rcv_nxt - tp->copied_seq;
+
+- /* Subtract 1, if FIN is in queue. */
+- skb = skb_peek_tail(&sk->sk_receive_queue);
+- if (answ && skb)
+- answ -= tcp_hdr(skb)->fin;
++ /* Subtract 1, if FIN was received */
++ if (answ && sock_flag(sk, SOCK_DONE))
++ answ--;
+ } else
+ answ = tp->urg_seq - tp->copied_seq;
+ release_sock(sk);
+diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
+index 813b43a..834857f 100644
+--- a/net/ipv4/tcp_illinois.c
++++ b/net/ipv4/tcp_illinois.c
+@@ -313,11 +313,13 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
+ .tcpv_rttcnt = ca->cnt_rtt,
+ .tcpv_minrtt = ca->base_rtt,
+ };
+- u64 t = ca->sum_rtt;
+
+- do_div(t, ca->cnt_rtt);
+- info.tcpv_rtt = t;
++ if (info.tcpv_rttcnt > 0) {
++ u64 t = ca->sum_rtt;
+
++ do_div(t, info.tcpv_rttcnt);
++ info.tcpv_rtt = t;
++ }
+ nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ }
+ }
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 0cb78d7..9ffc37f 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -606,7 +606,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
+ {
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifa;
+- struct in6_addr mcaddr;
++ struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+
+ idev = in6_dev_get(dev);
+ if (!idev)
+@@ -614,7 +614,6 @@ static void ndisc_send_unsol_na(struct net_device *dev)
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+- addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
+ ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
+ /*router=*/ !!idev->cnf.forwarding,
+ /*solicited=*/ false, /*override=*/ true,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 488a1b7..19724bd 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -185,7 +185,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
+ };
+
+ static const u32 ip6_template_metrics[RTAX_MAX] = {
+- [RTAX_HOPLIMIT - 1] = 255,
++ [RTAX_HOPLIMIT - 1] = 0,
+ };
+
+ static struct rt6_info ip6_null_entry_template = {
+@@ -1097,7 +1097,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
+ ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.plen = 128;
+ rt->rt6i_idev = idev;
+- dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
++ dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
+
+ spin_lock_bh(&icmp6_dst_lock);
+ rt->dst.next = icmp6_dst_gc_list;
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index 2cef50b..64164fb 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -269,6 +269,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+
+ out_del_dev:
+ free_netdev(dev);
++ spriv->dev = NULL;
+ out_del_session:
+ l2tp_session_delete(session);
+ out:
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 3ece106..8c7364b 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -940,7 +940,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
+ sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
+ sdata->u.ibss.ibss_join_req = jiffies;
+
+- memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
++ memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
+ sdata->u.ibss.ssid_len = params->ssid_len;
+
+ mutex_unlock(&sdata->u.ibss.mtx);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index cda4875..cd6cbdb 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -515,6 +515,11 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
+
+ if (ieee80211_is_action(hdr->frame_control)) {
+ u8 category;
++
++ /* make sure category field is present */
++ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
++ return RX_DROP_MONITOR;
++
+ mgmt = (struct ieee80211_mgmt *)hdr;
+ category = mgmt->u.action.category;
+ if (category != WLAN_CATEGORY_MESH_ACTION &&
+@@ -854,14 +859,16 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
+ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
+ if (rx->sta && rx->sta->dummy &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+- u16 ethertype;
+- u8 *payload;
+-
+- payload = rx->skb->data +
+- ieee80211_hdrlen(hdr->frame_control);
+- ethertype = (payload[6] << 8) | payload[7];
+- if (cpu_to_be16(ethertype) ==
+- rx->sdata->control_port_protocol)
++ unsigned int hdrlen;
++ __be16 ethertype;
++
++ hdrlen = ieee80211_hdrlen(hdr->frame_control);
++
++ if (rx->skb->len < hdrlen + 8)
++ return RX_DROP_MONITOR;
++
++ skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
++ if (ethertype == rx->sdata->control_port_protocol)
+ return RX_CONTINUE;
+ }
+ return RX_DROP_MONITOR;
+@@ -1449,11 +1456,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+
+ hdr = (struct ieee80211_hdr *)rx->skb->data;
+ fc = hdr->frame_control;
++
++ if (ieee80211_is_ctl(fc))
++ return RX_CONTINUE;
++
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ frag = sc & IEEE80211_SCTL_FRAG;
+
+ if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
+- (rx->skb)->len < 24 ||
+ is_multicast_ether_addr(hdr->addr1))) {
+ /* not fragmented */
+ goto out;
+@@ -1887,6 +1897,20 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
++
++ /* make sure fixed part of mesh header is there, also checks skb len */
++ if (!pskb_may_pull(rx->skb, hdrlen + 6))
++ return RX_DROP_MONITOR;
++
++ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
++
++ /* make sure full mesh header is there, also checks skb len */
++ if (!pskb_may_pull(rx->skb,
++ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
++ return RX_DROP_MONITOR;
++
++ /* reload pointers */
++ hdr = (struct ieee80211_hdr *) skb->data;
+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ /* frame is in RMC, don't forward */
+@@ -1895,7 +1919,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
+ return RX_DROP_MONITOR;
+
+- if (!ieee80211_is_data(hdr->frame_control))
++ if (!ieee80211_is_data(hdr->frame_control) ||
++ !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ if (!mesh_hdr->ttl)
+@@ -1916,9 +1941,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ mpp_addr = hdr->addr3;
+ proxied_addr = mesh_hdr->eaddr1;
+- } else {
++ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
++ /* has_a4 already checked in ieee80211_rx_mesh_check */
+ mpp_addr = hdr->addr4;
+ proxied_addr = mesh_hdr->eaddr2;
++ } else {
++ return RX_DROP_MONITOR;
+ }
+
+ rcu_read_lock();
+@@ -1941,7 +1969,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+
+ mesh_hdr->ttl--;
+
+- if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
++ {
+ if (!mesh_hdr->ttl)
+ IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
+ dropped_frames_ttl);
+@@ -2295,6 +2323,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
+ }
+ break;
+ case WLAN_CATEGORY_SELF_PROTECTED:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.self_prot.action_code)))
++ break;
++
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+@@ -2313,6 +2345,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
+ }
+ break;
+ case WLAN_CATEGORY_MESH_ACTION:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.mesh_action.action_code)))
++ break;
++
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ if (mesh_action_is_path_sel(mgmt) &&
+@@ -2870,10 +2906,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+ status->rx_flags |= IEEE80211_RX_IN_SCAN;
+
+- if (ieee80211_is_mgmt(fc))
+- err = skb_linearize(skb);
+- else
++ if (ieee80211_is_mgmt(fc)) {
++ /* drop frame if too short for header */
++ if (skb->len < ieee80211_hdrlen(fc))
++ err = -ENOBUFS;
++ else
++ err = skb_linearize(skb);
++ } else {
+ err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
++ }
+
+ if (err) {
+ dev_kfree_skb(skb);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 38b78b9..3d1d55d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -137,6 +137,8 @@ static void netlink_destroy_callback(struct netlink_callback *cb);
+ static DEFINE_RWLOCK(nl_table_lock);
+ static atomic_t nl_table_users = ATOMIC_INIT(0);
+
++#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
++
+ static ATOMIC_NOTIFIER_HEAD(netlink_chain);
+
+ static u32 netlink_group_mask(u32 group)
+@@ -331,6 +333,11 @@ netlink_update_listeners(struct sock *sk)
+ struct hlist_node *node;
+ unsigned long mask;
+ unsigned int i;
++ struct listeners *listeners;
++
++ listeners = nl_deref_protected(tbl->listeners);
++ if (!listeners)
++ return;
+
+ for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
+ mask = 0;
+@@ -338,7 +345,7 @@ netlink_update_listeners(struct sock *sk)
+ if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
+ mask |= nlk_sk(sk)->groups[i];
+ }
+- tbl->listeners->masks[i] = mask;
++ listeners->masks[i] = mask;
+ }
+ /* this function is only called with the netlink table "grabbed", which
+ * makes sure updates are visible before bind or setsockopt return. */
+@@ -519,7 +526,11 @@ static int netlink_release(struct socket *sock)
+ if (netlink_is_kernel(sk)) {
+ BUG_ON(nl_table[sk->sk_protocol].registered == 0);
+ if (--nl_table[sk->sk_protocol].registered == 0) {
+- kfree(nl_table[sk->sk_protocol].listeners);
++ struct listeners *old;
++
++ old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
++ RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
++ kfree_rcu(old, rcu);
+ nl_table[sk->sk_protocol].module = NULL;
+ nl_table[sk->sk_protocol].registered = 0;
+ }
+@@ -950,7 +961,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
+ rcu_read_lock();
+ listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
+
+- if (group - 1 < nl_table[sk->sk_protocol].groups)
++ if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
+ res = test_bit(group - 1, listeners->masks);
+
+ rcu_read_unlock();
+@@ -1584,7 +1595,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
+ new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+- old = rcu_dereference_protected(tbl->listeners, 1);
++ old = nl_deref_protected(tbl->listeners);
+ memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+ rcu_assign_pointer(tbl->listeners, new);
+
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 76388b0..9032d50 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1604,8 +1604,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
+ asoc->outqueue.outstanding_bytes;
+ sackh.num_gap_ack_blocks = 0;
+ sackh.num_dup_tsns = 0;
++ chunk->subh.sack_hdr = &sackh;
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
+- SCTP_SACKH(&sackh));
++ SCTP_CHUNK(chunk));
+ break;
+
+ case SCTP_CMD_DISCARD_PACKET:
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 8f5042d..ea93f4b 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -548,8 +548,7 @@ int wiphy_register(struct wiphy *wiphy)
+ for (i = 0; i < sband->n_channels; i++) {
+ sband->channels[i].orig_flags =
+ sband->channels[i].flags;
+- sband->channels[i].orig_mag =
+- sband->channels[i].max_antenna_gain;
++ sband->channels[i].orig_mag = INT_MAX;
+ sband->channels[i].orig_mpwr =
+ sband->channels[i].max_power;
+ sband->channels[i].band = band;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 22fb802..5fba039 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -301,23 +301,21 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
+
+-static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
++unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+ {
+ int ae = meshhdr->flags & MESH_FLAGS_AE;
+- /* 7.1.3.5a.2 */
++ /* 802.11-2012, 8.2.4.7.3 */
+ switch (ae) {
++ default:
+ case 0:
+ return 6;
+ case MESH_FLAGS_AE_A4:
+ return 12;
+ case MESH_FLAGS_AE_A5_A6:
+ return 18;
+- case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6):
+- return 24;
+- default:
+- return 6;
+ }
+ }
++EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
+
+ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+@@ -365,6 +363,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
++ if (meshdr->flags & MESH_FLAGS_AE_A4)
++ return -1;
+ if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+@@ -389,6 +389,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
++ if (meshdr->flags & MESH_FLAGS_AE_A5_A6)
++ return -1;
+ if (meshdr->flags & MESH_FLAGS_AE_A4)
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 819a5c5..5511307 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -86,6 +86,7 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
+ write_lock_irqsave(&card->ctl_files_rwlock, flags);
+ list_add_tail(&ctl->list, &card->ctl_files);
+ write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
++ snd_card_unref(card);
+ return 0;
+
+ __error:
+@@ -93,6 +94,8 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
+ __error2:
+ snd_card_file_remove(card, file);
+ __error1:
++ if (card)
++ snd_card_unref(card);
+ return err;
+ }
+
+@@ -1433,6 +1436,8 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
+ spin_unlock_irq(&ctl->read_lock);
+ schedule();
+ remove_wait_queue(&ctl->change_sleep, &wait);
++ if (ctl->card->shutdown)
++ return -ENODEV;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ spin_lock_irq(&ctl->read_lock);
+diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
+index 75ea16f..3f7f662 100644
+--- a/sound/core/hwdep.c
++++ b/sound/core/hwdep.c
+@@ -100,8 +100,10 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
+ if (hw == NULL)
+ return -ENODEV;
+
+- if (!try_module_get(hw->card->module))
++ if (!try_module_get(hw->card->module)) {
++ snd_card_unref(hw->card);
+ return -EFAULT;
++ }
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&hw->open_wait, &wait);
+@@ -129,6 +131,10 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
+ mutex_unlock(&hw->open_mutex);
+ schedule();
+ mutex_lock(&hw->open_mutex);
++ if (hw->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+@@ -148,6 +154,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
+ mutex_unlock(&hw->open_mutex);
+ if (err < 0)
+ module_put(hw->card->module);
++ snd_card_unref(hw->card);
+ return err;
+ }
+
+@@ -459,12 +466,15 @@ static int snd_hwdep_dev_disconnect(struct snd_device *device)
+ mutex_unlock(&register_mutex);
+ return -EINVAL;
+ }
++ mutex_lock(&hwdep->open_mutex);
++ wake_up(&hwdep->open_wait);
+ #ifdef CONFIG_SND_OSSEMUL
+ if (hwdep->ossreg)
+ snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device);
+ #endif
+ snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device);
+ list_del_init(&hwdep->list);
++ mutex_unlock(&hwdep->open_mutex);
+ mutex_unlock(&register_mutex);
+ return 0;
+ }
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 3ac49b1..fa0f35b 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -212,6 +212,7 @@ int snd_card_create(int idx, const char *xid,
+ spin_lock_init(&card->files_lock);
+ INIT_LIST_HEAD(&card->files_list);
+ init_waitqueue_head(&card->shutdown_sleep);
++ atomic_set(&card->refcount, 0);
+ #ifdef CONFIG_PM
+ mutex_init(&card->power_lock);
+ init_waitqueue_head(&card->power_sleep);
+@@ -445,21 +446,36 @@ static int snd_card_do_free(struct snd_card *card)
+ return 0;
+ }
+
++/**
++ * snd_card_unref - release the reference counter
++ * @card: the card instance
++ *
++ * Decrements the reference counter. When it reaches to zero, wake up
++ * the sleeper and call the destructor if needed.
++ */
++void snd_card_unref(struct snd_card *card)
++{
++ if (atomic_dec_and_test(&card->refcount)) {
++ wake_up(&card->shutdown_sleep);
++ if (card->free_on_last_close)
++ snd_card_do_free(card);
++ }
++}
++EXPORT_SYMBOL(snd_card_unref);
++
+ int snd_card_free_when_closed(struct snd_card *card)
+ {
+- int free_now = 0;
+- int ret = snd_card_disconnect(card);
+- if (ret)
+- return ret;
++ int ret;
+
+- spin_lock(&card->files_lock);
+- if (list_empty(&card->files_list))
+- free_now = 1;
+- else
+- card->free_on_last_close = 1;
+- spin_unlock(&card->files_lock);
++ atomic_inc(&card->refcount);
++ ret = snd_card_disconnect(card);
++ if (ret) {
++ atomic_dec(&card->refcount);
++ return ret;
++ }
+
+- if (free_now)
++ card->free_on_last_close = 1;
++ if (atomic_dec_and_test(&card->refcount))
+ snd_card_do_free(card);
+ return 0;
+ }
+@@ -473,7 +489,7 @@ int snd_card_free(struct snd_card *card)
+ return ret;
+
+ /* wait, until all devices are ready for the free operation */
+- wait_event(card->shutdown_sleep, list_empty(&card->files_list));
++ wait_event(card->shutdown_sleep, !atomic_read(&card->refcount));
+ snd_card_do_free(card);
+ return 0;
+ }
+@@ -854,6 +870,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file)
+ return -ENODEV;
+ }
+ list_add(&mfile->list, &card->files_list);
++ atomic_inc(&card->refcount);
+ spin_unlock(&card->files_lock);
+ return 0;
+ }
+@@ -876,7 +893,6 @@ EXPORT_SYMBOL(snd_card_file_add);
+ int snd_card_file_remove(struct snd_card *card, struct file *file)
+ {
+ struct snd_monitor_file *mfile, *found = NULL;
+- int last_close = 0;
+
+ spin_lock(&card->files_lock);
+ list_for_each_entry(mfile, &card->files_list, list) {
+@@ -891,19 +907,13 @@ int snd_card_file_remove(struct snd_card *card, struct file *file)
+ break;
+ }
+ }
+- if (list_empty(&card->files_list))
+- last_close = 1;
+ spin_unlock(&card->files_lock);
+- if (last_close) {
+- wake_up(&card->shutdown_sleep);
+- if (card->free_on_last_close)
+- snd_card_do_free(card);
+- }
+ if (!found) {
+ snd_printk(KERN_ERR "ALSA card file remove problem (%p)\n", file);
+ return -ENOENT;
+ }
+ kfree(found);
++ snd_card_unref(card);
+ return 0;
+ }
+
+diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
+index 18297f7..c353768 100644
+--- a/sound/core/oss/mixer_oss.c
++++ b/sound/core/oss/mixer_oss.c
+@@ -52,14 +52,19 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file)
+ SNDRV_OSS_DEVICE_TYPE_MIXER);
+ if (card == NULL)
+ return -ENODEV;
+- if (card->mixer_oss == NULL)
++ if (card->mixer_oss == NULL) {
++ snd_card_unref(card);
+ return -ENODEV;
++ }
+ err = snd_card_file_add(card, file);
+- if (err < 0)
++ if (err < 0) {
++ snd_card_unref(card);
+ return err;
++ }
+ fmixer = kzalloc(sizeof(*fmixer), GFP_KERNEL);
+ if (fmixer == NULL) {
+ snd_card_file_remove(card, file);
++ snd_card_unref(card);
+ return -ENOMEM;
+ }
+ fmixer->card = card;
+@@ -68,8 +73,10 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file)
+ if (!try_module_get(card->module)) {
+ kfree(fmixer);
+ snd_card_file_remove(card, file);
++ snd_card_unref(card);
+ return -EFAULT;
+ }
++ snd_card_unref(card);
+ return 0;
+ }
+
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 3cc4b86..542f69e 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -2441,6 +2441,10 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
+ mutex_unlock(&pcm->open_mutex);
+ schedule();
+ mutex_lock(&pcm->open_mutex);
++ if (pcm->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+@@ -2450,6 +2454,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
+ mutex_unlock(&pcm->open_mutex);
+ if (err < 0)
+ goto __error;
++ snd_card_unref(pcm->card);
+ return err;
+
+ __error:
+@@ -2457,6 +2462,8 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
+ __error2:
+ snd_card_file_remove(pcm->card, file);
+ __error1:
++ if (pcm)
++ snd_card_unref(pcm->card);
+ return err;
+ }
+
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 8928ca87..13eaeb3 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -1046,11 +1046,19 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ if (list_empty(&pcm->list))
+ goto unlock;
+
++ mutex_lock(&pcm->open_mutex);
++ wake_up(&pcm->open_wait);
+ list_del_init(&pcm->list);
+ for (cidx = 0; cidx < 2; cidx++)
+- for (substream = pcm->streams[cidx].substream; substream; substream = substream->next)
+- if (substream->runtime)
++ for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) {
++ snd_pcm_stream_lock_irq(substream);
++ if (substream->runtime) {
+ substream->runtime->status->state = SNDRV_PCM_STATE_DISCONNECTED;
++ wake_up(&substream->runtime->sleep);
++ wake_up(&substream->runtime->tsleep);
++ }
++ snd_pcm_stream_unlock_irq(substream);
++ }
+ list_for_each_entry(notify, &snd_pcm_notify_list, list) {
+ notify->n_disconnect(pcm);
+ }
+@@ -1066,6 +1074,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ }
+ snd_unregister_device(devtype, pcm->card, pcm->device);
+ }
++ mutex_unlock(&pcm->open_mutex);
+ unlock:
+ mutex_unlock(&register_mutex);
+ return 0;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 25ed9fe..7ada40e 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -369,6 +369,14 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
+ return usecs;
+ }
+
++static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
++{
++ snd_pcm_stream_lock_irq(substream);
++ if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
++ substream->runtime->status->state = state;
++ snd_pcm_stream_unlock_irq(substream);
++}
++
+ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+@@ -452,7 +460,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ runtime->boundary *= 2;
+
+ snd_pcm_timer_resolution_change(substream);
+- runtime->status->state = SNDRV_PCM_STATE_SETUP;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
+
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+@@ -464,7 +472,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ /* hardware might be unusable from this time,
+ so we force application to retry to set
+ the correct hardware parameter settings */
+- runtime->status->state = SNDRV_PCM_STATE_OPEN;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ if (substream->ops->hw_free != NULL)
+ substream->ops->hw_free(substream);
+ return err;
+@@ -512,7 +520,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
+ return -EBADFD;
+ if (substream->ops->hw_free)
+ result = substream->ops->hw_free(substream);
+- runtime->status->state = SNDRV_PCM_STATE_OPEN;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ return result;
+ }
+@@ -1320,7 +1328,7 @@ static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ runtime->control->appl_ptr = runtime->status->hw_ptr;
+- runtime->status->state = SNDRV_PCM_STATE_PREPARED;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
+ }
+
+ static struct action_ops snd_pcm_action_prepare = {
+@@ -1500,6 +1508,10 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
+ down_read(&snd_pcm_link_rwsem);
+ snd_pcm_stream_lock_irq(substream);
+ remove_wait_queue(&to_check->sleep, &wait);
++ if (card->shutdown) {
++ result = -ENODEV;
++ break;
++ }
+ if (tout == 0) {
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+ result = -ESTRPIPE;
+@@ -1620,6 +1632,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ _end:
+ write_unlock_irq(&snd_pcm_link_rwlock);
+ up_write(&snd_pcm_link_rwsem);
++ snd_card_unref(substream1->pcm->card);
+ fput(file);
+ return res;
+ }
+@@ -2092,7 +2105,10 @@ static int snd_pcm_playback_open(struct inode *inode, struct file *file)
+ return err;
+ pcm = snd_lookup_minor_data(iminor(inode),
+ SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+- return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
++ err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
++ if (pcm)
++ snd_card_unref(pcm->card);
++ return err;
+ }
+
+ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
+@@ -2103,7 +2119,10 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
+ return err;
+ pcm = snd_lookup_minor_data(iminor(inode),
+ SNDRV_DEVICE_TYPE_PCM_CAPTURE);
+- return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
++ err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
++ if (pcm)
++ snd_card_unref(pcm->card);
++ return err;
+ }
+
+ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
+@@ -2140,6 +2159,10 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
+ mutex_unlock(&pcm->open_mutex);
+ schedule();
+ mutex_lock(&pcm->open_mutex);
++ if (pcm->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index ebf6e49..1bb95ae 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -379,8 +379,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ if (rmidi == NULL)
+ return -ENODEV;
+
+- if (!try_module_get(rmidi->card->module))
++ if (!try_module_get(rmidi->card->module)) {
++ snd_card_unref(rmidi->card);
+ return -ENXIO;
++ }
+
+ mutex_lock(&rmidi->open_mutex);
+ card = rmidi->card;
+@@ -422,6 +424,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ mutex_unlock(&rmidi->open_mutex);
+ schedule();
+ mutex_lock(&rmidi->open_mutex);
++ if (rmidi->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+@@ -440,6 +446,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ #endif
+ file->private_data = rawmidi_file;
+ mutex_unlock(&rmidi->open_mutex);
++ snd_card_unref(rmidi->card);
+ return 0;
+
+ __error:
+@@ -447,6 +454,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ __error_card:
+ mutex_unlock(&rmidi->open_mutex);
+ module_put(rmidi->card->module);
++ snd_card_unref(rmidi->card);
+ return err;
+ }
+
+@@ -991,6 +999,8 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
+ spin_unlock_irq(&runtime->lock);
+ schedule();
+ remove_wait_queue(&runtime->sleep, &wait);
++ if (rfile->rmidi->card->shutdown)
++ return -ENODEV;
+ if (signal_pending(current))
+ return result > 0 ? result : -ERESTARTSYS;
+ if (!runtime->avail)
+@@ -1234,6 +1244,8 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
+ spin_unlock_irq(&runtime->lock);
+ timeout = schedule_timeout(30 * HZ);
+ remove_wait_queue(&runtime->sleep, &wait);
++ if (rfile->rmidi->card->shutdown)
++ return -ENODEV;
+ if (signal_pending(current))
+ return result > 0 ? result : -ERESTARTSYS;
+ if (!runtime->avail && !timeout)
+@@ -1609,9 +1621,20 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ static int snd_rawmidi_dev_disconnect(struct snd_device *device)
+ {
+ struct snd_rawmidi *rmidi = device->device_data;
++ int dir;
+
+ mutex_lock(&register_mutex);
++ mutex_lock(&rmidi->open_mutex);
++ wake_up(&rmidi->open_wait);
+ list_del_init(&rmidi->list);
++ for (dir = 0; dir < 2; dir++) {
++ struct snd_rawmidi_substream *s;
++ list_for_each_entry(s, &rmidi->streams[dir].substreams, list) {
++ if (s->runtime)
++ wake_up(&s->runtime->sleep);
++ }
++ }
++
+ #ifdef CONFIG_SND_OSSEMUL
+ if (rmidi->ossreg) {
+ if ((int)rmidi->device == midi_map[rmidi->card->number]) {
+@@ -1626,6 +1649,7 @@ static int snd_rawmidi_dev_disconnect(struct snd_device *device)
+ }
+ #endif /* CONFIG_SND_OSSEMUL */
+ snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
++ mutex_unlock(&rmidi->open_mutex);
+ mutex_unlock(&register_mutex);
+ return 0;
+ }
+diff --git a/sound/core/sound.c b/sound/core/sound.c
+index 828af35..8e17b4d 100644
+--- a/sound/core/sound.c
++++ b/sound/core/sound.c
+@@ -99,6 +99,10 @@ static void snd_request_other(int minor)
+ *
+ * Checks that a minor device with the specified type is registered, and returns
+ * its user data pointer.
++ *
++ * This function increments the reference counter of the card instance
++ * if an associated instance with the given minor number and type is found.
++ * The caller must call snd_card_unref() appropriately later.
+ */
+ void *snd_lookup_minor_data(unsigned int minor, int type)
+ {
+@@ -109,9 +113,11 @@ void *snd_lookup_minor_data(unsigned int minor, int type)
+ return NULL;
+ mutex_lock(&sound_mutex);
+ mreg = snd_minors[minor];
+- if (mreg && mreg->type == type)
++ if (mreg && mreg->type == type) {
+ private_data = mreg->private_data;
+- else
++ if (private_data && mreg->card_ptr)
++ atomic_inc(&mreg->card_ptr->refcount);
++ } else
+ private_data = NULL;
+ mutex_unlock(&sound_mutex);
+ return private_data;
+@@ -275,6 +281,7 @@ int snd_register_device_for_dev(int type, struct snd_card *card, int dev,
+ preg->device = dev;
+ preg->f_ops = f_ops;
+ preg->private_data = private_data;
++ preg->card_ptr = card;
+ mutex_lock(&sound_mutex);
+ #ifdef CONFIG_SND_DYNAMIC_MINORS
+ minor = snd_find_free_minor(type);
+diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
+index c700920..ec86009 100644
+--- a/sound/core/sound_oss.c
++++ b/sound/core/sound_oss.c
+@@ -40,6 +40,9 @@
+ static struct snd_minor *snd_oss_minors[SNDRV_OSS_MINORS];
+ static DEFINE_MUTEX(sound_oss_mutex);
+
++/* NOTE: This function increments the refcount of the associated card like
++ * snd_lookup_minor_data(); the caller must call snd_card_unref() appropriately
++ */
+ void *snd_lookup_oss_minor_data(unsigned int minor, int type)
+ {
+ struct snd_minor *mreg;
+@@ -49,9 +52,11 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type)
+ return NULL;
+ mutex_lock(&sound_oss_mutex);
+ mreg = snd_oss_minors[minor];
+- if (mreg && mreg->type == type)
++ if (mreg && mreg->type == type) {
+ private_data = mreg->private_data;
+- else
++ if (private_data && mreg->card_ptr)
++ atomic_inc(&mreg->card_ptr->refcount);
++ } else
+ private_data = NULL;
+ mutex_unlock(&sound_oss_mutex);
+ return private_data;
+@@ -123,6 +128,7 @@ int snd_register_oss_device(int type, struct snd_card *card, int dev,
+ preg->device = dev;
+ preg->f_ops = f_ops;
+ preg->private_data = private_data;
++ preg->card_ptr = card;
+ mutex_lock(&sound_oss_mutex);
+ snd_oss_minors[minor] = preg;
+ minor_unit = SNDRV_MINOR_OSS_DEVICE(minor);
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index bcb3310..b4890f9 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -573,6 +573,7 @@ static int ad198x_build_pcms(struct hda_codec *codec)
+ if (spec->multiout.dig_out_nid) {
+ info++;
+ codec->num_pcms++;
++ codec->spdif_status_reset = 1;
+ info->name = "AD198x Digital";
+ info->pcm_type = HDA_PCM_TYPE_SPDIF;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index e449278..0ed6867 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -93,8 +93,8 @@ enum {
+ #define CS420X_VENDOR_NID 0x11
+ #define CS_DIG_OUT1_PIN_NID 0x10
+ #define CS_DIG_OUT2_PIN_NID 0x15
+-#define CS_DMIC1_PIN_NID 0x12
+-#define CS_DMIC2_PIN_NID 0x0e
++#define CS_DMIC1_PIN_NID 0x0e
++#define CS_DMIC2_PIN_NID 0x12
+
+ /* coef indices */
+ #define IDX_SPDIF_STAT 0x0000
+@@ -1088,14 +1088,18 @@ static void init_input(struct hda_codec *codec)
+ cs_automic(codec);
+
+ coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
++ cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
++
++ coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG);
+ if (is_active_pin(codec, CS_DMIC2_PIN_NID))
+- coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */
++ coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */
+ if (is_active_pin(codec, CS_DMIC1_PIN_NID))
+- coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off
++ coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off
+ * No effect if SPDIF_OUT2 is
+ * selected in IDX_SPDIF_CTL.
+ */
+- cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
++
++ cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef);
+ }
+ }
+
+@@ -1109,7 +1113,7 @@ static const struct hda_verb cs_coef_init_verbs[] = {
+ | 0x0400 /* Disable Coefficient Auto increment */
+ )},
+ /* Beep */
+- {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
++ {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
+
+ {} /* terminator */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c2c7f90..3ce2da2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6039,6 +6039,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ .patch = patch_alc662 },
+ { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
+ { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
++ { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
+ { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
+ { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
+ { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
+@@ -6056,6 +6057,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
+ { .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 },
+ { .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 },
++ { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 },
+ {} /* terminator */
+ };
+
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 7160ff2..9e0c889 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -1856,11 +1856,11 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+ const struct auto_pin_cfg *cfg = &spec->autocfg;
+- int i, dac_num;
++ int i;
+ hda_nid_t nid;
+
++ spec->multiout.num_dacs = 0;
+ spec->multiout.dac_nids = spec->private_dac_nids;
+- dac_num = 0;
+ for (i = 0; i < cfg->line_outs; i++) {
+ hda_nid_t dac = 0;
+ nid = cfg->line_out_pins[i];
+@@ -1871,16 +1871,13 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec)
+ if (!i && parse_output_path(codec, nid, dac, 1,
+ &spec->out_mix_path))
+ dac = spec->out_mix_path.path[0];
+- if (dac) {
+- spec->private_dac_nids[i] = dac;
+- dac_num++;
+- }
++ if (dac)
++ spec->private_dac_nids[spec->multiout.num_dacs++] = dac;
+ }
+ if (!spec->out_path[0].depth && spec->out_mix_path.depth) {
+ spec->out_path[0] = spec->out_mix_path;
+ spec->out_mix_path.depth = 0;
+ }
+- spec->multiout.num_dacs = dac_num;
+ return 0;
+ }
+
+@@ -3689,6 +3686,18 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ }
+
++/* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e
++ * Replace this with mixer NID 0x1c
++ */
++static void fix_vt1802_connections(struct hda_codec *codec)
++{
++ static hda_nid_t conn_24[] = { 0x14, 0x1c };
++ static hda_nid_t conn_33[] = { 0x1c };
++
++ snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24);
++ snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33);
++}
++
+ /* patch for vt2002P */
+ static int patch_vt2002P(struct hda_codec *codec)
+ {
+@@ -3703,6 +3712,8 @@ static int patch_vt2002P(struct hda_codec *codec)
+ spec->aa_mix_nid = 0x21;
+ override_mic_boost(codec, 0x2b, 0, 3, 40);
+ override_mic_boost(codec, 0x29, 0, 3, 40);
++ if (spec->codec_type == VT1802)
++ fix_vt1802_connections(codec);
+ add_secret_dac_path(codec);
+
+ /* automatic parse from the BIOS config */
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 0f6dc0d..566acb3 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -336,7 +336,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
+ return -ENOMEM;
+ }
+
+- mutex_init(&chip->shutdown_mutex);
++ init_rwsem(&chip->shutdown_rwsem);
+ chip->index = idx;
+ chip->dev = dev;
+ chip->card = card;
+@@ -555,9 +555,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ return;
+
+ card = chip->card;
+- mutex_lock(&register_mutex);
+- mutex_lock(&chip->shutdown_mutex);
++ down_write(&chip->shutdown_rwsem);
+ chip->shutdown = 1;
++ up_write(&chip->shutdown_rwsem);
++
++ mutex_lock(&register_mutex);
+ chip->num_interfaces--;
+ if (chip->num_interfaces <= 0) {
+ snd_card_disconnect(card);
+@@ -574,11 +576,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ snd_usb_mixer_disconnect(p);
+ }
+ usb_chip[chip->index] = NULL;
+- mutex_unlock(&chip->shutdown_mutex);
+ mutex_unlock(&register_mutex);
+ snd_card_free_when_closed(card);
+ } else {
+- mutex_unlock(&chip->shutdown_mutex);
+ mutex_unlock(&register_mutex);
+ }
+ }
+@@ -610,16 +610,20 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ {
+ int err = -ENODEV;
+
++ down_read(&chip->shutdown_rwsem);
+ if (!chip->shutdown && !chip->probing)
+ err = usb_autopm_get_interface(chip->pm_intf);
++ up_read(&chip->shutdown_rwsem);
+
+ return err;
+ }
+
+ void snd_usb_autosuspend(struct snd_usb_audio *chip)
+ {
++ down_read(&chip->shutdown_rwsem);
+ if (!chip->shutdown && !chip->probing)
+ usb_autopm_put_interface(chip->pm_intf);
++ up_read(&chip->shutdown_rwsem);
+ }
+
+ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index a39edcc..665e297 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -86,6 +86,7 @@ struct snd_usb_substream {
+ struct snd_urb_ctx syncurb[SYNC_URBS]; /* sync urb table */
+ char *syncbuf; /* sync buffer for all sync URBs */
+ dma_addr_t sync_dma; /* DMA address of syncbuf */
++ unsigned int speed; /* USB_SPEED_XXX */
+
+ u64 formats; /* format bitmasks (all or'ed) */
+ unsigned int num_formats; /* number of supported audio formats (list) */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 08dcce5..24c5114 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -148,8 +148,10 @@ void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
+ int i;
+
+ /* stop urbs (to be sure) */
+- deactivate_urbs(subs, force, 1);
+- wait_clear_urbs(subs);
++ if (!subs->stream->chip->shutdown) {
++ deactivate_urbs(subs, force, 1);
++ wait_clear_urbs(subs);
++ }
+
+ for (i = 0; i < MAX_URBS; i++)
+ release_urb_ctx(&subs->dataurb[i]);
+@@ -895,7 +897,8 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
+ subs->dev = as->chip->dev;
+ subs->txfr_quirk = as->chip->txfr_quirk;
+ subs->ops = audio_urb_ops[stream];
+- if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
++ subs->speed = snd_usb_get_speed(subs->dev);
++ if (subs->speed >= USB_SPEED_HIGH)
+ subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index ab23869..6730a33 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -287,25 +287,32 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int v
+ unsigned char buf[2];
+ int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
+ int timeout = 10;
+- int err;
++ int idx = 0, err;
+
+ err = snd_usb_autoresume(cval->mixer->chip);
+ if (err < 0)
+ return -EIO;
++ down_read(&chip->shutdown_rwsem);
+ while (timeout-- > 0) {
++ if (chip->shutdown)
++ break;
++ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
+- buf, val_len) >= val_len) {
++ validx, idx, buf, val_len) >= val_len) {
+ *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len));
+- snd_usb_autosuspend(cval->mixer->chip);
+- return 0;
++ err = 0;
++ goto out;
+ }
+ }
+- snd_usb_autosuspend(cval->mixer->chip);
+ snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type);
+- return -EINVAL;
++ request, validx, idx, cval->val_type);
++ err = -EINVAL;
++
++ out:
++ up_read(&chip->shutdown_rwsem);
++ snd_usb_autosuspend(cval->mixer->chip);
++ return err;
+ }
+
+ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
+@@ -313,7 +320,7 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int v
+ struct snd_usb_audio *chip = cval->mixer->chip;
+ unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */
+ unsigned char *val;
+- int ret, size;
++ int idx = 0, ret, size;
+ __u8 bRequest;
+
+ if (request == UAC_GET_CUR) {
+@@ -330,16 +337,22 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int v
+ if (ret)
+ goto error;
+
+- ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
++ down_read(&chip->shutdown_rwsem);
++ if (chip->shutdown)
++ ret = -ENODEV;
++ else {
++ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
++ ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
+- buf, size);
++ validx, idx, buf, size);
++ }
++ up_read(&chip->shutdown_rwsem);
+ snd_usb_autosuspend(chip);
+
+ if (ret < 0) {
+ error:
+ snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type);
++ request, validx, idx, cval->val_type);
+ return ret;
+ }
+
+@@ -417,7 +430,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ {
+ struct snd_usb_audio *chip = cval->mixer->chip;
+ unsigned char buf[2];
+- int val_len, err, timeout = 10;
++ int idx = 0, val_len, err, timeout = 10;
+
+ if (cval->mixer->protocol == UAC_VERSION_1) {
+ val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
+@@ -440,19 +453,27 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ err = snd_usb_autoresume(chip);
+ if (err < 0)
+ return -EIO;
+- while (timeout-- > 0)
++ down_read(&chip->shutdown_rwsem);
++ while (timeout-- > 0) {
++ if (chip->shutdown)
++ break;
++ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ if (snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), request,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
+- buf, val_len) >= 0) {
+- snd_usb_autosuspend(chip);
+- return 0;
++ validx, idx, buf, val_len) >= 0) {
++ err = 0;
++ goto out;
+ }
+- snd_usb_autosuspend(chip);
++ }
+ snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n",
+- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type, buf[0], buf[1]);
+- return -EINVAL;
++ request, validx, idx, cval->val_type, buf[0], buf[1]);
++ err = -EINVAL;
++
++ out:
++ up_read(&chip->shutdown_rwsem);
++ snd_usb_autosuspend(chip);
++ return err;
+ }
+
+ static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value)
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ab125ee..38a607a 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -186,6 +186,11 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
+ if (value > 1)
+ return -EINVAL;
+ changed = value != mixer->audigy2nx_leds[index];
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown) {
++ err = -ENODEV;
++ goto out;
++ }
+ if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+@@ -202,6 +207,8 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ value, index + 2, NULL, 0);
++ out:
++ up_read(&mixer->chip->shutdown_rwsem);
+ if (err < 0)
+ return err;
+ mixer->audigy2nx_leds[index] = value;
+@@ -295,11 +302,16 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
+
+ for (i = 0; jacks[i].name; ++i) {
+ snd_iprintf(buffer, "%s: ", jacks[i].name);
+- err = snd_usb_ctl_msg(mixer->chip->dev,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ err = 0;
++ else
++ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_rcvctrlpipe(mixer->chip->dev, 0),
+ UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE, 0,
+ jacks[i].unitid << 8, buf, 3);
++ up_read(&mixer->chip->shutdown_rwsem);
+ if (err == 3 && (buf[0] == 3 || buf[0] == 6))
+ snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
+ else
+@@ -329,10 +341,15 @@ static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
+ else
+ new_status = old_status & ~0x02;
+ changed = new_status != old_status;
+- err = snd_usb_ctl_msg(mixer->chip->dev,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ err = -ENODEV;
++ else
++ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 50, 0, &new_status, 1);
++ up_read(&mixer->chip->shutdown_rwsem);
+ if (err < 0)
+ return err;
+ mixer->xonar_u1_status = new_status;
+@@ -371,11 +388,17 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
+ u16 wIndex = kcontrol->private_value & 0xffff;
+ u8 tmp;
++ int ret;
+
+- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ ret = -ENODEV;
++ else
++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0, cpu_to_le16(wIndex),
+ &tmp, sizeof(tmp), 1000);
++ up_read(&mixer->chip->shutdown_rwsem);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR
+@@ -396,11 +419,17 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
+ u16 wIndex = kcontrol->private_value & 0xffff;
+ u16 wValue = ucontrol->value.integer.value[0];
++ int ret;
+
+- int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ ret = -ENODEV;
++ else
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ cpu_to_le16(wValue), cpu_to_le16(wIndex),
+ NULL, 0, 1000);
++ up_read(&mixer->chip->shutdown_rwsem);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 839165f..983e071 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -67,6 +67,8 @@ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream
+ unsigned int hwptr_done;
+
+ subs = (struct snd_usb_substream *)substream->runtime->private_data;
++ if (subs->stream->chip->shutdown)
++ return SNDRV_PCM_POS_XRUN;
+ spin_lock(&subs->lock);
+ hwptr_done = subs->hwptr_done;
+ substream->runtime->delay = snd_usb_pcm_delay(subs,
+@@ -373,8 +375,14 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ changed = subs->cur_audiofmt != fmt ||
+ subs->period_bytes != params_period_bytes(hw_params) ||
+ subs->cur_rate != rate;
++
++ down_read(&subs->stream->chip->shutdown_rwsem);
++ if (subs->stream->chip->shutdown) {
++ ret = -ENODEV;
++ goto unlock;
++ }
+ if ((ret = set_format(subs, fmt)) < 0)
+- return ret;
++ goto unlock;
+
+ if (subs->cur_rate != rate) {
+ struct usb_host_interface *alts;
+@@ -383,12 +391,11 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ alts = &iface->altsetting[fmt->altset_idx];
+ ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+ if (ret < 0)
+- return ret;
++ goto unlock;
+ subs->cur_rate = rate;
+ }
+
+ if (changed) {
+- mutex_lock(&subs->stream->chip->shutdown_mutex);
+ /* format changed */
+ snd_usb_release_substream_urbs(subs, 0);
+ /* influenced: period_bytes, channels, rate, format, */
+@@ -396,9 +403,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ params_rate(hw_params),
+ snd_pcm_format_physical_width(params_format(hw_params)) *
+ params_channels(hw_params));
+- mutex_unlock(&subs->stream->chip->shutdown_mutex);
+ }
+
++unlock:
++ up_read(&subs->stream->chip->shutdown_rwsem);
+ return ret;
+ }
+
+@@ -414,9 +422,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
+ subs->cur_audiofmt = NULL;
+ subs->cur_rate = 0;
+ subs->period_bytes = 0;
+- mutex_lock(&subs->stream->chip->shutdown_mutex);
++ down_read(&subs->stream->chip->shutdown_rwsem);
+ snd_usb_release_substream_urbs(subs, 0);
+- mutex_unlock(&subs->stream->chip->shutdown_mutex);
++ up_read(&subs->stream->chip->shutdown_rwsem);
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+ }
+
+@@ -429,12 +437,18 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_usb_substream *subs = runtime->private_data;
++ int ret = 0;
+
+ if (! subs->cur_audiofmt) {
+ snd_printk(KERN_ERR "usbaudio: no format is specified!\n");
+ return -ENXIO;
+ }
+
++ down_read(&subs->stream->chip->shutdown_rwsem);
++ if (subs->stream->chip->shutdown) {
++ ret = -ENODEV;
++ goto unlock;
++ }
+ /* some unit conversions in runtime */
+ subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize);
+ subs->curframesize = bytes_to_frames(runtime, subs->curpacksize);
+@@ -447,7 +461,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ subs->last_frame_number = 0;
+ runtime->delay = 0;
+
+- return snd_usb_substream_prepare(subs, runtime);
++ ret = snd_usb_substream_prepare(subs, runtime);
++ unlock:
++ up_read(&subs->stream->chip->shutdown_rwsem);
++ return ret;
+ }
+
+ static struct snd_pcm_hardware snd_usb_hardware =
+@@ -500,7 +517,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs,
+ return 0;
+ }
+ /* check whether the period time is >= the data packet interval */
+- if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) {
++ if (subs->speed != USB_SPEED_FULL) {
+ ptime = 125 * (1 << fp->datainterval);
+ if (ptime > pt->max || (ptime == pt->max && pt->openmax)) {
+ hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max);
+@@ -776,7 +793,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
+ return err;
+
+ param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME;
+- if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
++ if (subs->speed == USB_SPEED_FULL)
+ /* full speed devices have fixed data packet interval */
+ ptmin = 1000;
+ if (ptmin == 1000)
+diff --git a/sound/usb/proc.c b/sound/usb/proc.c
+index 961c9a2..aef03db 100644
+--- a/sound/usb/proc.c
++++ b/sound/usb/proc.c
+@@ -107,7 +107,7 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
+ }
+ snd_iprintf(buffer, "\n");
+ }
+- if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
++ if (subs->speed != USB_SPEED_FULL)
+ snd_iprintf(buffer, " Data packet interval: %d us\n",
+ 125 * (1 << fp->datainterval));
+ // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
+@@ -128,7 +128,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
+ snd_iprintf(buffer, "]\n");
+ snd_iprintf(buffer, " Packet Size = %d\n", subs->curpacksize);
+ snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
+- snd_usb_get_speed(subs->dev) == USB_SPEED_FULL
++ subs->speed == USB_SPEED_FULL
+ ? get_full_speed_hz(subs->freqm)
+ : get_high_speed_hz(subs->freqm),
+ subs->freqm >> 16, subs->freqm & 0xffff);
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 3e2b035..6c805a5 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -36,7 +36,7 @@ struct snd_usb_audio {
+ struct snd_card *card;
+ struct usb_interface *pm_intf;
+ u32 usb_id;
+- struct mutex shutdown_mutex;
++ struct rw_semaphore shutdown_rwsem;
+ unsigned int shutdown:1;
+ unsigned int probing:1;
+ unsigned int autosuspended:1;
diff --git a/3.2.54/1034_linux-3.2.35.patch b/3.2.54/1034_linux-3.2.35.patch
new file mode 100644
index 0000000..76a9c19
--- /dev/null
+++ b/3.2.54/1034_linux-3.2.35.patch
@@ -0,0 +1,3014 @@
+diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
+index cc0ebc5..fd129f6 100644
+--- a/Documentation/cgroups/memory.txt
++++ b/Documentation/cgroups/memory.txt
+@@ -440,6 +440,10 @@ Note:
+ 5.3 swappiness
+
+ Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
++Please note that unlike the global swappiness, memcg knob set to 0
++really prevents from any swapping even if there is a swap storage
++available. This might lead to memcg OOM killer if there are no file
++pages to reclaim.
+
+ Following cgroups' swappiness can't be changed.
+ - root cgroup (uses /proc/sys/vm/swappiness).
+diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
+index e67be7a..48d25ea 100755
+--- a/Documentation/dvb/get_dvb_firmware
++++ b/Documentation/dvb/get_dvb_firmware
+@@ -115,7 +115,7 @@ sub tda10045 {
+
+ sub tda10046 {
+ my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip";
+- my $url = "http://www.tt-download.com/download/updates/219/$sourcefile";
++ my $url = "http://technotrend.com.ua/download/software/219/$sourcefile";
+ my $hash = "6a7e1e2f2644b162ff0502367553c72d";
+ my $outfile = "dvb-fe-tda10046.fw";
+ my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
+diff --git a/Makefile b/Makefile
+index 14ebacf..d985af0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 9fdc151..27bcd12 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -542,6 +542,7 @@ config ARCH_KIRKWOOD
+ bool "Marvell Kirkwood"
+ select CPU_FEROCEON
+ select PCI
++ select PCI_QUIRKS
+ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_CLOCKEVENTS
+ select PLAT_ORION
+diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h
+index 3ad9f94..11799c3 100644
+--- a/arch/arm/mach-dove/include/mach/pm.h
++++ b/arch/arm/mach-dove/include/mach/pm.h
+@@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
+
+ static inline int irq_to_pmu(int irq)
+ {
+- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
++ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
+ return irq - IRQ_DOVE_PMU_START;
+
+ return -EINVAL;
+diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
+index f07fd16..9f2fd10 100644
+--- a/arch/arm/mach-dove/irq.c
++++ b/arch/arm/mach-dove/irq.c
+@@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d)
+ int pin = irq_to_pmu(d->irq);
+ u32 u;
+
++ /*
++ * The PMU mask register is not RW0C: it is RW. This means that
++ * the bits take whatever value is written to them; if you write
++ * a '1', you will set the interrupt.
++ *
++ * Unfortunately this means there is NO race free way to clear
++ * these interrupts.
++ *
++ * So, let's structure the code so that the window is as small as
++ * possible.
++ */
+ u = ~(1 << (pin & 31));
+- writel(u, PMU_INTERRUPT_CAUSE);
++ u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
++ writel_relaxed(u, PMU_INTERRUPT_CAUSE);
+ }
+
+ static struct irq_chip pmu_irq_chip = {
+diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
+index 74b992d..a881c54 100644
+--- a/arch/arm/mach-kirkwood/pcie.c
++++ b/arch/arm/mach-kirkwood/pcie.c
+@@ -213,14 +213,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
+ return 1;
+ }
+
++/*
++ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
++ * is operating as a root complex this needs to be switched to
++ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
++ * the device. Decoding setup is handled by the orion code.
++ */
+ static void __devinit rc_pci_fixup(struct pci_dev *dev)
+ {
+- /*
+- * Prevent enumeration of root complex.
+- */
+ if (dev->bus->parent == NULL && dev->devfn == 0) {
+ int i;
+
++ dev->class &= 0xff;
++ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
+index 60e8866..93fe83e 100644
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -156,7 +156,7 @@ typedef struct sigaltstack {
+ static inline void sigaddset(sigset_t *set, int _sig)
+ {
+ asm ("bfset %0{%1,#1}"
+- : "+od" (*set)
++ : "+o" (*set)
+ : "id" ((_sig - 1) ^ 31)
+ : "cc");
+ }
+@@ -164,7 +164,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
+ static inline void sigdelset(sigset_t *set, int _sig)
+ {
+ asm ("bfclr %0{%1,#1}"
+- : "+od" (*set)
++ : "+o" (*set)
+ : "id" ((_sig - 1) ^ 31)
+ : "cc");
+ }
+@@ -180,7 +180,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
+ int ret;
+ asm ("bfextu %1{%2,#1},%0"
+ : "=d" (ret)
+- : "od" (*set), "id" ((_sig-1) ^ 31)
++ : "o" (*set), "id" ((_sig-1) ^ 31)
+ : "cc");
+ return ret;
+ }
+diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
+index e141324..d0ea054 100644
+--- a/arch/parisc/kernel/signal32.c
++++ b/arch/parisc/kernel/signal32.c
+@@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+ {
+ compat_sigset_t s;
+
+- if (sz != sizeof *set) panic("put_sigset32()");
++ if (sz != sizeof *set)
++ return -EINVAL;
+ sigset_64to32(&s, set);
+
+ return copy_to_user(up, &s, sizeof s);
+@@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+ compat_sigset_t s;
+ int r;
+
+- if (sz != sizeof *set) panic("put_sigset32()");
++ if (sz != sizeof *set)
++ return -EINVAL;
+
+ if ((r = copy_from_user(&s, up, sz)) == 0) {
+ sigset_32to64(set, &s);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index c9b9322..7ea75d1 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ struct vm_area_struct *vma;
+ int offset = mapping ? get_offset(mapping) : 0;
+
++ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
++
+ addr = DCACHE_ALIGN(addr - offset) + offset;
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
+index 234f1d8..2e0a15b 100644
+--- a/arch/s390/include/asm/compat.h
++++ b/arch/s390/include/asm/compat.h
+@@ -20,7 +20,7 @@
+ #define PSW32_MASK_CC 0x00003000UL
+ #define PSW32_MASK_PM 0x00000f00UL
+
+-#define PSW32_MASK_USER 0x00003F00UL
++#define PSW32_MASK_USER 0x0000FF00UL
+
+ #define PSW32_ADDR_AMODE 0x80000000UL
+ #define PSW32_ADDR_INSN 0x7FFFFFFFUL
+diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
+index a658463..a5b4c48 100644
+--- a/arch/s390/include/asm/ptrace.h
++++ b/arch/s390/include/asm/ptrace.h
+@@ -240,7 +240,7 @@ typedef struct
+ #define PSW_MASK_EA 0x00000000UL
+ #define PSW_MASK_BA 0x00000000UL
+
+-#define PSW_MASK_USER 0x00003F00UL
++#define PSW_MASK_USER 0x0000FF00UL
+
+ #define PSW_ADDR_AMODE 0x80000000UL
+ #define PSW_ADDR_INSN 0x7FFFFFFFUL
+@@ -269,7 +269,7 @@ typedef struct
+ #define PSW_MASK_EA 0x0000000100000000UL
+ #define PSW_MASK_BA 0x0000000080000000UL
+
+-#define PSW_MASK_USER 0x00003F0180000000UL
++#define PSW_MASK_USER 0x0000FF0180000000UL
+
+ #define PSW_ADDR_AMODE 0x0000000000000000UL
+ #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index 4f68c81..9fdd05d 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -312,6 +312,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
+ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
+ (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
++ /* Check for invalid user address space control. */
++ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
++ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+ for (i = 0; i < NUM_GPRS; i++)
+ regs->gprs[i] = (__u64) regs32.gprs[i];
+@@ -493,7 +497,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (__force __u64) frame;
+- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
++ /* Force 31 bit amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (__force __u64) ka->sa.sa_handler;
+
+ regs->gprs[2] = map_signal(sig);
+@@ -557,7 +564,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (__force __u64) frame;
+- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
++ /* Force 31 bit amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (__u64) ka->sa.sa_handler;
+
+ regs->gprs[2] = map_signal(sig);
+diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
+index 5086553..d54d475 100644
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -147,6 +147,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+ /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
+ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ (user_sregs.regs.psw.mask & PSW_MASK_USER);
++ /* Check for invalid user address space control. */
++ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
++ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ /* Check for invalid amode */
+ if (regs->psw.mask & PSW_MASK_EA)
+ regs->psw.mask |= PSW_MASK_BA;
+@@ -293,7 +297,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (unsigned long) frame;
+- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
++ /* Force default amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+ regs->gprs[2] = map_signal(sig);
+@@ -362,7 +369,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (unsigned long) frame;
+- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
++ /* Force default amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+ regs->gprs[2] = map_signal(sig);
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 65cb06e..4ccf9f5 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -183,7 +183,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+- if (end < start)
++ if ((end < start) || (end > TASK_SIZE))
+ goto slow_irqon;
+
+ /*
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index f0836cd..d58d3ed 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -307,9 +307,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
+ err |= restore_fpu_state(regs, fpu_save);
+
+ err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
+- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
+-
+- if (err)
++ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
+ goto segv;
+
+ err |= __get_user(rwin_save, &sf->rwin_save);
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 3566454..3b96fd4 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -206,21 +206,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
+ }
+ #endif
+
+-/*
+- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+- * when it traps. The previous stack will be directly underneath the saved
+- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
+- *
+- * This is valid only for kernel mode traps.
+- */
+-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+-{
+ #ifdef CONFIG_X86_32
+- return (unsigned long)(&regs->sp);
++extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
+ #else
++static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
++{
+ return regs->sp;
+-#endif
+ }
++#endif
+
+ #define GET_IP(regs) ((regs)->ip)
+ #define GET_FP(regs) ((regs)->bp)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index ff8557e..f07becc 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -587,6 +587,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ }
+ }
+
++ /*
++ * The way access filter has a performance penalty on some workloads.
++ * Disable it on the affected CPUs.
++ */
++ if ((c->x86 == 0x15) &&
++ (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
++ u64 val;
++
++ if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
++ val |= 0x1E;
++ checking_wrmsrl(0xc0011021, val);
++ }
++ }
++
+ cpu_detect_cache_sizes(c);
+
+ /* Multi core CPU? */
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 787e06c..ce04b58 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -323,17 +323,6 @@ device_initcall(thermal_throttle_init_device);
+
+ #endif /* CONFIG_SYSFS */
+
+-/*
+- * Set up the most two significant bit to notify mce log that this thermal
+- * event type.
+- * This is a temp solution. May be changed in the future with mce log
+- * infrasture.
+- */
+-#define CORE_THROTTLED (0)
+-#define CORE_POWER_LIMIT ((__u64)1 << 62)
+-#define PACKAGE_THROTTLED ((__u64)2 << 62)
+-#define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
+-
+ static void notify_thresholds(__u64 msr_val)
+ {
+ /* check whether the interrupt handler is defined;
+@@ -363,27 +352,23 @@ static void intel_thermal_interrupt(void)
+ if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
+ THERMAL_THROTTLING_EVENT,
+ CORE_LEVEL) != 0)
+- mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
++ mce_log_therm_throt_event(msr_val);
+
+ if (this_cpu_has(X86_FEATURE_PLN))
+- if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
++ therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
+ POWER_LIMIT_EVENT,
+- CORE_LEVEL) != 0)
+- mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
++ CORE_LEVEL);
+
+ if (this_cpu_has(X86_FEATURE_PTS)) {
+ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+- if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
++ therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+ THERMAL_THROTTLING_EVENT,
+- PACKAGE_LEVEL) != 0)
+- mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
++ PACKAGE_LEVEL);
+ if (this_cpu_has(X86_FEATURE_PLN))
+- if (therm_throt_process(msr_val &
++ therm_throt_process(msr_val &
+ PACKAGE_THERM_STATUS_POWER_LIMIT,
+ POWER_LIMIT_EVENT,
+- PACKAGE_LEVEL) != 0)
+- mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
+- | msr_val);
++ PACKAGE_LEVEL);
+ }
+ }
+
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index ac52c15..1ef962b 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -163,6 +163,7 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
+ #define F1XH_MPB_MAX_SIZE 2048
+ #define F14H_MPB_MAX_SIZE 1824
+ #define F15H_MPB_MAX_SIZE 4096
++#define F16H_MPB_MAX_SIZE 3458
+
+ switch (c->x86) {
+ case 0x14:
+@@ -171,6 +172,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
+ case 0x15:
+ max_size = F15H_MPB_MAX_SIZE;
+ break;
++ case 0x16:
++ max_size = F16H_MPB_MAX_SIZE;
++ break;
+ default:
+ max_size = F1XH_MPB_MAX_SIZE;
+ break;
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 8252879..2dc4121 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -21,6 +21,7 @@
+ #include <linux/signal.h>
+ #include <linux/perf_event.h>
+ #include <linux/hw_breakpoint.h>
++#include <linux/module.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -164,6 +165,35 @@ static inline bool invalid_selector(u16 value)
+
+ #define FLAG_MASK FLAG_MASK_32
+
++/*
++ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
++ * when it traps. The previous stack will be directly underneath the saved
++ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
++ *
++ * Now, if the stack is empty, '&regs->sp' is out of range. In this
++ * case we try to take the previous stack. To always return a non-null
++ * stack pointer we fall back to regs as stack if no previous stack
++ * exists.
++ *
++ * This is valid only for kernel mode traps.
++ */
++unsigned long kernel_stack_pointer(struct pt_regs *regs)
++{
++ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
++ unsigned long sp = (unsigned long)&regs->sp;
++ struct thread_info *tinfo;
++
++ if (context == (sp & ~(THREAD_SIZE - 1)))
++ return sp;
++
++ tinfo = (struct thread_info *)context;
++ if (tinfo->previous_esp)
++ return tinfo->previous_esp;
++
++ return (unsigned long)regs;
++}
++EXPORT_SYMBOL_GPL(kernel_stack_pointer);
++
+ static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
+ {
+ BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index cf0ef98..0d403aa 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -937,8 +937,21 @@ void __init setup_arch(char **cmdline_p)
+
+ #ifdef CONFIG_X86_64
+ if (max_pfn > max_low_pfn) {
+- max_pfn_mapped = init_memory_mapping(1UL<<32,
+- max_pfn<<PAGE_SHIFT);
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++
++ if (ei->addr + ei->size <= 1UL << 32)
++ continue;
++
++ if (ei->type == E820_RESERVED)
++ continue;
++
++ max_pfn_mapped = init_memory_mapping(
++ ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
++ ei->addr + ei->size);
++ }
++
+ /* can we preseve max_low_pfn ?*/
+ max_low_pfn = max_pfn;
+ }
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 87488b9..34a7f40 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -28,36 +28,50 @@ int direct_gbpages
+ #endif
+ ;
+
+-static void __init find_early_table_space(unsigned long end, int use_pse,
+- int use_gbpages)
++struct map_range {
++ unsigned long start;
++ unsigned long end;
++ unsigned page_size_mask;
++};
++
++/*
++ * First calculate space needed for kernel direct mapping page tables to cover
++ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
++ * pages. Then find enough contiguous space for those page tables.
++ */
++static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ {
+- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
++ int i;
++ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
++ unsigned long start = 0, good_end;
+ phys_addr_t base;
+
+- puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+- tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
++ for (i = 0; i < nr_range; i++) {
++ unsigned long range, extra;
+
+- if (use_gbpages) {
+- unsigned long extra;
++ range = mr[i].end - mr[i].start;
++ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+
+- extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
+- pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+- } else
+- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+-
+- tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
++ if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
++ extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
++ pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
++ } else {
++ pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
++ }
+
+- if (use_pse) {
+- unsigned long extra;
+-
+- extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
++ if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
++ extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
+ #ifdef CONFIG_X86_32
+- extra += PMD_SIZE;
++ extra += PMD_SIZE;
+ #endif
+- ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+- } else
+- ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ } else {
++ ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ }
++ }
+
++ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
++ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+
+ #ifdef CONFIG_X86_32
+@@ -75,7 +89,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
+ pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
+
+ printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
+- end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
++ mr[nr_range - 1].end, pgt_buf_start << PAGE_SHIFT,
++ pgt_buf_top << PAGE_SHIFT);
+ }
+
+ void __init native_pagetable_reserve(u64 start, u64 end)
+@@ -83,12 +98,6 @@ void __init native_pagetable_reserve(u64 start, u64 end)
+ memblock_x86_reserve_range(start, end, "PGTABLE");
+ }
+
+-struct map_range {
+- unsigned long start;
+- unsigned long end;
+- unsigned page_size_mask;
+-};
+-
+ #ifdef CONFIG_X86_32
+ #define NR_RANGE_MR 3
+ #else /* CONFIG_X86_64 */
+@@ -260,7 +269,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ * nodes are discovered.
+ */
+ if (!after_bootmem)
+- find_early_table_space(end, use_pse, use_gbpages);
++ find_early_table_space(mr, nr_range);
+
+ for (i = 0; i < nr_range; i++)
+ ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+index 6053285..ac2c6e7 100644
+--- a/block/blk-exec.c
++++ b/block/blk-exec.c
+@@ -49,6 +49,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ rq_end_io_fn *done)
+ {
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++ bool is_pm_resume;
+
+ if (unlikely(blk_queue_dead(q))) {
+ rq->errors = -ENXIO;
+@@ -59,12 +60,18 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
++ /*
++ * need to check this before __blk_run_queue(), because rq can
++ * be freed before that returns.
++ */
++ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
++
+ WARN_ON(irqs_disabled());
+ spin_lock_irq(q->queue_lock);
+ __elv_add_request(q, rq, where);
+ __blk_run_queue(q);
+ /* the queue is stopped so it won't be run */
+- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
++ if (is_pm_resume)
+ q->request_fn(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index c364358..791df46 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -374,6 +374,8 @@ typedef struct drm_i915_private {
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
++ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
++ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 22efb08..87bb87b 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+ }
+
++/* get lvds_fp_timing entry
++ * this function may return NULL if the corresponding entry is invalid
++ */
++static const struct lvds_fp_timing *
++get_lvds_fp_timing(const struct bdb_header *bdb,
++ const struct bdb_lvds_lfp_data *data,
++ const struct bdb_lvds_lfp_data_ptrs *ptrs,
++ int index)
++{
++ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
++ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
++ size_t ofs;
++
++ if (index >= ARRAY_SIZE(ptrs->ptr))
++ return NULL;
++ ofs = ptrs->ptr[index].fp_timing_offset;
++ if (ofs < data_ofs ||
++ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
++ return NULL;
++ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
++}
++
+ /* Try to find integrated panel data */
+ static void
+ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
++ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int i, downclock;
+
+@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ panel_fixed_mode->clock, 10*downclock);
+ }
++
++ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
++ lvds_lfp_data_ptrs,
++ lvds_options->panel_type);
++ if (fp_timing) {
++ /* check the resolution, just to be sure */
++ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
++ fp_timing->y_res == panel_fixed_mode->vdisplay) {
++ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
++ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
++ dev_priv->bios_lvds_val);
++ }
++ }
+ }
+
+ /* Try to find sdvo panel data */
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index adac0dd..fdae61f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -356,6 +356,27 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
+ .find_pll = intel_find_pll_ironlake_dp,
+ };
+
++static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
++ unsigned int reg)
++{
++ unsigned int val;
++
++ if (dev_priv->lvds_val)
++ val = dev_priv->lvds_val;
++ else {
++ /* BIOS should set the proper LVDS register value at boot, but
++ * in reality, it doesn't set the value when the lid is closed;
++ * we need to check "the value to be set" in VBT when LVDS
++ * register is uninitialized.
++ */
++ val = I915_READ(reg);
++ if (!(val & ~LVDS_DETECTED))
++ val = dev_priv->bios_lvds_val;
++ dev_priv->lvds_val = val;
++ }
++ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
++}
++
+ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
+ {
+@@ -364,8 +385,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+- LVDS_CLKB_POWER_UP) {
++ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+ /* LVDS dual channel */
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+@@ -393,8 +413,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+- LVDS_CLKB_POWER_UP)
++ if (is_dual_link_lvds(dev_priv, LVDS))
+ /* LVDS with dual channel */
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+@@ -531,8 +550,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+- LVDS_CLKB_POWER_UP)
++ if (is_dual_link_lvds(dev_priv, LVDS))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 3f4afba..9e24670 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -2264,6 +2264,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+ return true;
+ }
+
++static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
++{
++ struct drm_device *dev = intel_sdvo->base.base.dev;
++ struct drm_connector *connector, *tmp;
++
++ list_for_each_entry_safe(connector, tmp,
++ &dev->mode_config.connector_list, head) {
++ if (intel_attached_encoder(connector) == &intel_sdvo->base)
++ intel_sdvo_destroy(connector);
++ }
++}
++
+ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+@@ -2596,7 +2608,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+- goto err;
++ /* Output_setup can leave behind connectors! */
++ goto err_output;
+ }
+
+ /* Only enable the hotplug irq if we need it, to work around noisy
+@@ -2609,12 +2622,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+- goto err;
++ goto err_output;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+- goto err;
++ goto err_output;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+@@ -2634,6 +2647,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
++err_output:
++ intel_sdvo_output_cleanup(intel_sdvo);
++
+ err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 382e141..aca4755 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1386,7 +1386,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
++ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index ca94e23..b919b11 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1122,6 +1122,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ break;
+ udelay(1);
+ }
++ } else {
++ save->crtc_enabled[i] = false;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
+index bd2f33e..bc6b64f 100644
+--- a/drivers/gpu/drm/radeon/radeon_agp.c
++++ b/drivers/gpu/drm/radeon/radeon_agp.c
+@@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+ /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+ PCI_VENDOR_ID_DELL, 0x00e3, 2},
+- /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
++ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_DELL, 0x0149, 1},
++ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
++ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
++ PCI_VENDOR_ID_IBM, 0x0531, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x1025, 0x0061, 1},
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 727e93d..9e4313e 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -708,7 +708,10 @@ int ttm_get_pages(struct list_head *pages, int flags,
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+- clear_page(page_address(p));
++ if (PageHighMem(p))
++ clear_highpage(p);
++ else
++ clear_page(page_address(p));
+ }
+ }
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ab75a4e..652f230 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -277,6 +277,9 @@
+ #define USB_VENDOR_ID_EZKEY 0x0518
+ #define USB_DEVICE_ID_BTC_8193 0x0002
+
++#define USB_VENDOR_ID_FREESCALE 0x15A2
++#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
++
+ #define USB_VENDOR_ID_GAMERON 0x0810
+ #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
+ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index afb73af..aec3fa3 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 5b39216..3f28290 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -163,6 +163,38 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ .enter = &intel_idle },
+ };
+
++static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
++ { /* MWAIT C0 */ },
++ { /* MWAIT C1 */
++ .name = "C1-IVB",
++ .desc = "MWAIT 0x00",
++ .flags = CPUIDLE_FLAG_TIME_VALID,
++ .exit_latency = 1,
++ .target_residency = 1,
++ .enter = &intel_idle },
++ { /* MWAIT C2 */
++ .name = "C3-IVB",
++ .desc = "MWAIT 0x10",
++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 59,
++ .target_residency = 156,
++ .enter = &intel_idle },
++ { /* MWAIT C3 */
++ .name = "C6-IVB",
++ .desc = "MWAIT 0x20",
++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 80,
++ .target_residency = 300,
++ .enter = &intel_idle },
++ { /* MWAIT C4 */
++ .name = "C7-IVB",
++ .desc = "MWAIT 0x30",
++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 87,
++ .target_residency = 300,
++ .enter = &intel_idle },
++};
++
+ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+@@ -386,6 +418,11 @@ static int intel_idle_probe(void)
+ cpuidle_state_table = snb_cstates;
+ break;
+
++ case 0x3A: /* IVB */
++ case 0x3E: /* IVB Xeon */
++ cpuidle_state_table = ivb_cstates;
++ break;
++
+ default:
+ pr_debug(PREFIX "does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
+index ec58f48..1512bd8 100644
+--- a/drivers/input/mouse/bcm5974.c
++++ b/drivers/input/mouse/bcm5974.c
+@@ -453,6 +453,9 @@ static void setup_events_to_report(struct input_dev *input_dev,
+ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+
++ if (cfg->caps & HAS_INTEGRATED_BUTTON)
++ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
++
+ input_set_events_per_packet(input_dev, 60);
+ }
+
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index d37a48e..8656441 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -991,7 +991,7 @@ static int i8042_controller_init(void)
+ * Reset the controller and reset CRT to the original value set by BIOS.
+ */
+
+-static void i8042_controller_reset(void)
++static void i8042_controller_reset(bool force_reset)
+ {
+ i8042_flush();
+
+@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void)
+ * Reset the controller if requested.
+ */
+
+- if (i8042_reset)
++ if (i8042_reset || force_reset)
+ i8042_controller_selftest();
+
+ /*
+@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset)
+ * upsetting it.
+ */
+
+-static int i8042_pm_reset(struct device *dev)
++static int i8042_pm_suspend(struct device *dev)
+ {
+- i8042_controller_reset();
++ i8042_controller_reset(true);
+
+ return 0;
+ }
+@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev)
+ return 0;
+ }
+
++static int i8042_pm_reset(struct device *dev)
++{
++ i8042_controller_reset(false);
++
++ return 0;
++}
++
+ static int i8042_pm_restore(struct device *dev)
+ {
+ return i8042_controller_resume(false);
+ }
+
+ static const struct dev_pm_ops i8042_pm_ops = {
+- .suspend = i8042_pm_reset,
++ .suspend = i8042_pm_suspend,
+ .resume = i8042_pm_resume,
+ .thaw = i8042_pm_thaw,
+ .poweroff = i8042_pm_reset,
+@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = {
+
+ static void i8042_shutdown(struct platform_device *dev)
+ {
+- i8042_controller_reset();
++ i8042_controller_reset(false);
+ }
+
+ static int __init i8042_create_kbd_port(void)
+@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev)
+ out_fail:
+ i8042_free_aux_ports(); /* in case KBD failed but AUX not */
+ i8042_free_irqs();
+- i8042_controller_reset();
++ i8042_controller_reset(false);
+ i8042_platform_device = NULL;
+
+ return error;
+@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev)
+ {
+ i8042_unregister_ports();
+ i8042_free_irqs();
+- i8042_controller_reset();
++ i8042_controller_reset(false);
+ i8042_platform_device = NULL;
+
+ return 0;
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
+index 3913f47..492aa52 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -616,7 +616,13 @@ static void int_in_work(struct work_struct *work)
+ if (rc == 0)
+ /* success, resubmit interrupt read URB */
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+- if (rc != 0 && rc != -ENODEV) {
++
++ switch (rc) {
++ case 0: /* success */
++ case -ENODEV: /* device gone */
++ case -EINVAL: /* URB already resubmitted, or terminal badness */
++ break;
++ default: /* failure: try to recover by resetting the device */
+ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
+ rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
+ if (rc == 0) {
+@@ -2437,7 +2443,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
+ }
+
+ /* gigaset_suspend
+- * This function is called before the USB connection is suspended.
++ * This function is called before the USB connection is suspended
++ * or before the USB device is reset.
++ * In the latter case, message == PMSG_ON.
+ */
+ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+@@ -2493,7 +2501,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+ del_timer_sync(&ucs->timer_atrdy);
+ del_timer_sync(&ucs->timer_cmd_in);
+ del_timer_sync(&ucs->timer_int_in);
+- cancel_work_sync(&ucs->int_in_wq);
++
++ /* don't try to cancel int_in_wq from within reset as it
++ * might be the one requesting the reset
++ */
++ if (message.event != PM_EVENT_ON)
++ cancel_work_sync(&ucs->int_in_wq);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 502dcf7..8953630 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -755,8 +755,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
+ if (!md_in_flight(md))
+ wake_up(&md->wait);
+
++ /*
++ * Run this off this callpath, as drivers could invoke end_io while
++ * inside their request_fn (and holding the queue lock). Calling
++ * back into ->request_fn() could deadlock attempting to grab the
++ * queue lock again.
++ */
+ if (run_queue)
+- blk_run_queue(md->queue);
++ blk_run_queue_async(md->queue);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2887f22..145e378e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1801,10 +1801,10 @@ retry:
+ memset(bbp, 0xff, PAGE_SIZE);
+
+ for (i = 0 ; i < bb->count ; i++) {
+- u64 internal_bb = *p++;
++ u64 internal_bb = p[i];
+ u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
+ | BB_LEN(internal_bb));
+- *bbp++ = cpu_to_le64(store_bb);
++ bbp[i] = cpu_to_le64(store_bb);
+ }
+ bb->changed = 0;
+ if (read_seqretry(&bb->lock, seq))
+@@ -7650,9 +7650,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors)
+ {
+ int hi;
+- int lo = 0;
++ int lo;
+ u64 *p = bb->page;
+- int rv = 0;
++ int rv;
+ sector_t target = s + sectors;
+ unsigned seq;
+
+@@ -7667,7 +7667,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+
+ retry:
+ seq = read_seqbegin(&bb->lock);
+-
++ lo = 0;
++ rv = 0;
+ hi = bb->count;
+
+ /* Binary search between lo and hi for 'target'
+diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
+index e585263..f38c348 100644
+--- a/drivers/mtd/devices/slram.c
++++ b/drivers/mtd/devices/slram.c
+@@ -266,7 +266,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
+
+ if (*(szlength) != '+') {
+ devlength = simple_strtoul(szlength, &buffer, 0);
+- devlength = handle_unit(devlength, buffer) - devstart;
++ devlength = handle_unit(devlength, buffer);
+ if (devlength < devstart)
+ goto err_out;
+
+diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
+index 64be8f0..d9127e2 100644
+--- a/drivers/mtd/ofpart.c
++++ b/drivers/mtd/ofpart.c
+@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
+ nr_parts = plen / sizeof(part[0]);
+
+ *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
+- if (!pparts)
++ if (!*pparts)
+ return -ENOMEM;
+
+ names = of_get_property(dp, "partition-names", &plen);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+index 4ae26a7..7720721 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
++ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index f1365fe..2c14e85 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -3157,6 +3157,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
++ case IXGBE_DEV_ID_X540T1:
+ return 0;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return 0;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 8ef92d1..cc96a5a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -106,6 +106,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
+ /* required last entry */
+ {0, }
+ };
+@@ -7611,6 +7613,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_X540T:
++ case IXGBE_DEV_ID_X540T1:
+ /* Check eeprom to see if it is enabled */
+ hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
+ wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+index 6c5cca8..f00d6d5 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+@@ -65,6 +65,8 @@
+ #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+ #define IXGBE_DEV_ID_82599_LS 0x154F
+ #define IXGBE_DEV_ID_X540T 0x1528
++#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
++#define IXGBE_DEV_ID_X540T1 0x1560
+
+ /* VF Device IDs */
+ #define IXGBE_DEV_ID_82599_VF 0x10ED
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+index 0515862..858a762 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+@@ -1072,9 +1072,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
+- wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_"
+- "transit %d\n", "AMPDU status", seq,
+- ini->tx_in_transit);
++ BCMMSG(wiphy,
++ "BA Timeout, seq %d, in_transit %d\n",
++ seq, ini->tx_in_transit);
+ ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
+ p);
+ }
+diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
+index 6e0a3ea..5a25dd2 100644
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -816,9 +816,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
+ return;
+ }
+ cmd_node = adapter->curr_cmd;
+- if (cmd_node->wait_q_enabled)
+- adapter->cmd_wait_q.status = -ETIMEDOUT;
+-
+ if (cmd_node) {
+ adapter->dbg.timeout_cmd_id =
+ adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
+@@ -863,6 +860,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
+
+ dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
+ adapter->ps_mode, adapter->ps_state);
++
++ if (cmd_node->wait_q_enabled) {
++ adapter->cmd_wait_q.status = -ETIMEDOUT;
++ wake_up_interruptible(&adapter->cmd_wait_q.wait);
++ mwifiex_cancel_pending_ioctl(adapter);
++ /* reset cmd_sent flag to unblock new commands */
++ adapter->cmd_sent = false;
++ }
+ }
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+ mwifiex_init_fw_complete(adapter);
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 283171b..3579a68 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -162,7 +162,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
+ struct sdio_mmc_card *card;
+ struct mwifiex_adapter *adapter;
+ mmc_pm_flag_t pm_flag = 0;
+- int hs_actived = 0;
+ int i;
+ int ret = 0;
+
+@@ -189,12 +188,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
+ adapter = card->adapter;
+
+ /* Enable the Host Sleep */
+- hs_actived = mwifiex_enable_hs(adapter);
+- if (hs_actived) {
+- pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
+- ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
++ if (!mwifiex_enable_hs(adapter)) {
++ dev_err(adapter->dev, "cmd: failed to suspend\n");
++ return -EFAULT;
+ }
+
++ dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
++ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
++
+ /* Indicate device suspended */
+ adapter->is_suspended = true;
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 0302148..a99be2d0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -307,6 +307,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ /*=== Customer ID ===*/
+ /****** 8188CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
++ {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
+ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index 86b69f85..9d932f4 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -612,7 +612,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+- calculate_iosize(size, min_size+add_size, size1,
++ calculate_iosize(size, min_size, add_size + size1,
+ resource_size(b_res), 4096);
+ if (!size0 && !size1) {
+ if (b_res->start || b_res->end)
+@@ -726,7 +726,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+- calculate_memsize(size, min_size+add_size, 0,
++ calculate_memsize(size, min_size, add_size,
+ resource_size(b_res), min_align);
+ if (!size0 && !size1) {
+ if (b_res->start || b_res->end)
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 5717509b..8b25f9c 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -233,11 +233,12 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ return -EINVAL;
+ }
+
+- new_size = resource_size(res) + addsize + min_align;
++ /* already aligned with min_align */
++ new_size = resource_size(res) + addsize;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (!ret) {
+ res->flags &= ~IORESOURCE_STARTALIGN;
+- dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
++ dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+ }
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 110e4af..7d47434 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -105,6 +105,7 @@ static const struct key_entry acer_wmi_keymap[] = {
+ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
+ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
+ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
++ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
+ {KE_IGNORE, 0x41, {KEY_MUTE} },
+ {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
+diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
+index 192cb48..f3cbecc 100644
+--- a/drivers/scsi/isci/request.c
++++ b/drivers/scsi/isci/request.c
+@@ -1849,7 +1849,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
+ frame_index,
+ (void **)&frame_buffer);
+
+- sci_controller_copy_sata_response(&ireq->stp.req,
++ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 0c6fb19..7de9993 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1934,7 +1934,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ }
+ break;
+ case COMP_SHORT_TX:
+- xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+@@ -2291,7 +2290,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+ break;
+ case COMP_STALL:
+- xhci_warn(xhci, "WARN: Stalled endpoint\n");
++ xhci_dbg(xhci, "Stalled endpoint\n");
+ ep->ep_state |= EP_HALTED;
+ status = -EPIPE;
+ break;
+@@ -2301,11 +2300,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ break;
+ case COMP_SPLIT_ERR:
+ case COMP_TX_ERR:
+- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
++ xhci_dbg(xhci, "Transfer error on endpoint\n");
+ status = -EPROTO;
+ break;
+ case COMP_BABBLE:
+- xhci_warn(xhci, "WARN: babble error on endpoint\n");
++ xhci_dbg(xhci, "Babble error on endpoint\n");
+ status = -EOVERFLOW;
+ break;
+ case COMP_DB_ERR:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 5a23f4d..dab05d1 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -206,14 +206,14 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
+
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+- xhci_err(xhci, "failed to allocate MSI entry\n");
++ xhci_dbg(xhci, "failed to allocate MSI entry\n");
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
+ 0, "xhci_hcd", xhci_to_hcd(xhci));
+ if (ret) {
+- xhci_err(xhci, "disable MSI interrupt\n");
++ xhci_dbg(xhci, "disable MSI interrupt\n");
+ pci_disable_msi(pdev);
+ }
+
+@@ -276,7 +276,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
+
+ ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+ if (ret) {
+- xhci_err(xhci, "Failed to enable MSI-X\n");
++ xhci_dbg(xhci, "Failed to enable MSI-X\n");
+ goto free_entries;
+ }
+
+@@ -292,7 +292,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
+ return ret;
+
+ disable_msix:
+- xhci_err(xhci, "disable MSI-X interrupt\n");
++ xhci_dbg(xhci, "disable MSI-X interrupt\n");
+ xhci_free_irq(xhci);
+ pci_disable_msix(pdev);
+ free_entries:
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c334670..a5f875d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -157,6 +157,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
++#define NOVATELWIRELESS_PRODUCT_E362 0x9010
+ #define NOVATELWIRELESS_PRODUCT_G1 0xA001
+ #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
+@@ -192,6 +193,9 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
+ #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
+
++#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
++#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
++
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+ #define KYOCERA_PRODUCT_KPC680 0x180a
+@@ -282,6 +286,7 @@ static void option_instat_callback(struct urb *urb);
+ /* ALCATEL PRODUCTS */
+ #define ALCATEL_VENDOR_ID 0x1bbb
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
++#define ALCATEL_PRODUCT_X220_X500D 0x0017
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -705,6 +710,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
+
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+@@ -727,6 +733,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -1156,6 +1164,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 8bea45c..e5206de 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -764,7 +764,7 @@ int usb_serial_probe(struct usb_interface *interface,
+
+ if (retval) {
+ dbg("sub driver rejected device");
+- kfree(serial);
++ usb_serial_put(serial);
+ module_put(type->driver.owner);
+ return retval;
+ }
+@@ -836,7 +836,7 @@ int usb_serial_probe(struct usb_interface *interface,
+ */
+ if (num_bulk_in == 0 || num_bulk_out == 0) {
+ dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
+- kfree(serial);
++ usb_serial_put(serial);
+ module_put(type->driver.owner);
+ return -ENODEV;
+ }
+@@ -850,7 +850,7 @@ int usb_serial_probe(struct usb_interface *interface,
+ if (num_ports == 0) {
+ dev_err(&interface->dev,
+ "Generic device with no bulk out, not allowed.\n");
+- kfree(serial);
++ usb_serial_put(serial);
+ module_put(type->driver.owner);
+ return -EIO;
+ }
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 99796c5..bdf401b 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -36,6 +36,7 @@
+ * document number TBD : Patsburg (PBG)
+ * document number TBD : DH89xxCC
+ * document number TBD : Panther Point
++ * document number TBD : Lynx Point
+ */
+
+ /*
+@@ -126,6 +127,7 @@ enum iTCO_chipsets {
+ TCO_PBG, /* Patsburg */
+ TCO_DH89XXCC, /* DH89xxCC */
+ TCO_PPT, /* Panther Point */
++ TCO_LPT, /* Lynx Point */
+ };
+
+ static struct {
+@@ -189,6 +191,7 @@ static struct {
+ {"Patsburg", 2},
+ {"DH89xxCC", 2},
+ {"Panther Point", 2},
++ {"Lynx Point", 2},
+ {NULL, 0}
+ };
+
+@@ -331,6 +334,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
++ { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
+ { 0, }, /* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
+diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
+index 0301be6..465e49a 100644
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -165,16 +165,14 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ struct gfs2_meta_header *mh;
+ struct gfs2_trans *tr;
+
+- lock_buffer(bd->bd_bh);
+- gfs2_log_lock(sdp);
+ if (!list_empty(&bd->bd_list_tr))
+- goto out;
++ return;
+ tr = current->journal_info;
+ tr->tr_touched = 1;
+ tr->tr_num_buf++;
+ list_add(&bd->bd_list_tr, &tr->tr_list_buf);
+ if (!list_empty(&le->le_list))
+- goto out;
++ return;
+ set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
+ set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
+ gfs2_meta_check(sdp, bd->bd_bh);
+@@ -185,9 +183,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ sdp->sd_log_num_buf++;
+ list_add(&le->le_list, &sdp->sd_log_le_buf);
+ tr->tr_num_buf_new++;
+-out:
+- gfs2_log_unlock(sdp);
+- unlock_buffer(bd->bd_bh);
+ }
+
+ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
+@@ -518,11 +513,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ struct address_space *mapping = bd->bd_bh->b_page->mapping;
+ struct gfs2_inode *ip = GFS2_I(mapping->host);
+
+- lock_buffer(bd->bd_bh);
+- gfs2_log_lock(sdp);
+ if (tr) {
+ if (!list_empty(&bd->bd_list_tr))
+- goto out;
++ return;
+ tr->tr_touched = 1;
+ if (gfs2_is_jdata(ip)) {
+ tr->tr_num_buf++;
+@@ -530,7 +523,7 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ }
+ }
+ if (!list_empty(&le->le_list))
+- goto out;
++ return;
+
+ set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
+ set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
+@@ -542,9 +535,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ } else {
+ list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
+ }
+-out:
+- gfs2_log_unlock(sdp);
+- unlock_buffer(bd->bd_bh);
+ }
+
+ static void gfs2_check_magic(struct buffer_head *bh)
+diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
+index 86ac75d..6ab2a77 100644
+--- a/fs/gfs2/trans.c
++++ b/fs/gfs2/trans.c
+@@ -145,14 +145,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta)
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct gfs2_bufdata *bd;
+
++ lock_buffer(bh);
++ gfs2_log_lock(sdp);
+ bd = bh->b_private;
+ if (bd)
+ gfs2_assert(sdp, bd->bd_gl == gl);
+ else {
++ gfs2_log_unlock(sdp);
++ unlock_buffer(bh);
+ gfs2_attach_bufdata(gl, bh, meta);
+ bd = bh->b_private;
++ lock_buffer(bh);
++ gfs2_log_lock(sdp);
+ }
+ lops_add(sdp, &bd->bd_le);
++ gfs2_log_unlock(sdp);
++ unlock_buffer(bh);
+ }
+
+ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index edac004..7c86b37 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1957,7 +1957,9 @@ retry:
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ spin_unlock(&journal->j_state_lock);
++ unlock_buffer(bh);
+ log_wait_commit(journal, tid);
++ lock_buffer(bh);
+ goto retry;
+ }
+ /*
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index 61e6723..0095a70 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -135,33 +135,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ struct page *pg;
+ struct inode *inode = mapping->host;
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
++ struct jffs2_raw_inode ri;
++ uint32_t alloc_len = 0;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+ int ret = 0;
+
++ D1(printk(KERN_DEBUG "%s()\n", __func__));
++
++ if (pageofs > inode->i_size) {
++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
++ if (ret)
++ return ret;
++ }
++
++ mutex_lock(&f->sem);
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+- if (!pg)
++ if (!pg) {
++ if (alloc_len)
++ jffs2_complete_reservation(c);
++ mutex_unlock(&f->sem);
+ return -ENOMEM;
++ }
+ *pagep = pg;
+
+- D1(printk(KERN_DEBUG "jffs2_write_begin()\n"));
+-
+- if (pageofs > inode->i_size) {
++ if (alloc_len) {
+ /* Make new hole frag from old EOF to new page */
+- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+- struct jffs2_raw_inode ri;
+ struct jffs2_full_dnode *fn;
+- uint32_t alloc_len;
+
+ D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+ (unsigned int)inode->i_size, pageofs));
+
+- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+- if (ret)
+- goto out_page;
+-
+- mutex_lock(&f->sem);
+ memset(&ri, 0, sizeof(ri));
+
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+@@ -188,7 +194,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ if (IS_ERR(fn)) {
+ ret = PTR_ERR(fn);
+ jffs2_complete_reservation(c);
+- mutex_unlock(&f->sem);
+ goto out_page;
+ }
+ ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+@@ -202,12 +207,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ jffs2_mark_node_obsolete(c, fn->raw);
+ jffs2_free_full_dnode(fn);
+ jffs2_complete_reservation(c);
+- mutex_unlock(&f->sem);
+ goto out_page;
+ }
+ jffs2_complete_reservation(c);
+ inode->i_size = pageofs;
+- mutex_unlock(&f->sem);
+ }
+
+ /*
+@@ -216,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ * case of a short-copy.
+ */
+ if (!PageUptodate(pg)) {
+- mutex_lock(&f->sem);
+ ret = jffs2_do_readpage_nolock(inode, pg);
+- mutex_unlock(&f->sem);
+ if (ret)
+ goto out_page;
+ }
++ mutex_unlock(&f->sem);
+ D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags));
+ return ret;
+
+ out_page:
+ unlock_page(pg);
+ page_cache_release(pg);
++ mutex_unlock(&f->sem);
+ return ret;
+ }
+
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 5809abb..fe677c0 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1788,8 +1788,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+
+ BUG_ON(!th->t_trans_id);
+
+- dquot_initialize(inode);
++ reiserfs_write_unlock(inode->i_sb);
+ err = dquot_alloc_inode(inode);
++ reiserfs_write_lock(inode->i_sb);
+ if (err)
+ goto out_end_trans;
+ if (!dir->i_nlink) {
+@@ -1985,8 +1986,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+
+ out_end_trans:
+ journal_end(th, th->t_super, th->t_blocks_allocated);
++ reiserfs_write_unlock(inode->i_sb);
+ /* Drop can be outside and it needs more credits so it's better to have it outside */
+ dquot_drop(inode);
++ reiserfs_write_lock(inode->i_sb);
+ inode->i_flags |= S_NOQUOTA;
+ make_bad_inode(inode);
+
+@@ -3109,10 +3112,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ /* must be turned off for recursive notify_change calls */
+ ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
+
+- depth = reiserfs_write_lock_once(inode->i_sb);
+ if (is_quota_modification(inode, attr))
+ dquot_initialize(inode);
+-
++ depth = reiserfs_write_lock_once(inode->i_sb);
+ if (attr->ia_valid & ATTR_SIZE) {
+ /* version 2 items will be caught by the s_maxbytes check
+ ** done for us in vmtruncate
+@@ -3176,7 +3178,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ error = journal_begin(&th, inode->i_sb, jbegin_count);
+ if (error)
+ goto out;
++ reiserfs_write_unlock_once(inode->i_sb, depth);
+ error = dquot_transfer(inode, attr);
++ depth = reiserfs_write_lock_once(inode->i_sb);
+ if (error) {
+ journal_end(&th, inode->i_sb, jbegin_count);
+ goto out;
+diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
+index 313d39d..3ae9926 100644
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
+ key2type(&(key->on_disk_key)));
+ #endif
+
++ reiserfs_write_unlock(inode->i_sb);
+ retval = dquot_alloc_space_nodirty(inode, pasted_size);
++ reiserfs_write_lock(inode->i_sb);
+ if (retval) {
+ pathrelse(search_path);
+ return retval;
+@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
+ "reiserquota insert_item(): allocating %u id=%u type=%c",
+ quota_bytes, inode->i_uid, head2type(ih));
+ #endif
++ reiserfs_write_unlock(inode->i_sb);
+ /* We can't dirty inode here. It would be immediately written but
+ * appropriate stat item isn't inserted yet... */
+ retval = dquot_alloc_space_nodirty(inode, quota_bytes);
++ reiserfs_write_lock(inode->i_sb);
+ if (retval) {
+ pathrelse(path);
+ return retval;
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 5e3527b..569498a 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -254,7 +254,9 @@ static int finish_unfinished(struct super_block *s)
+ retval = remove_save_link_only(s, &save_link_key, 0);
+ continue;
+ }
++ reiserfs_write_unlock(s);
+ dquot_initialize(inode);
++ reiserfs_write_lock(s);
+
+ if (truncate && S_ISDIR(inode->i_mode)) {
+ /* We got a truncate request for a dir which is impossible.
+@@ -1207,7 +1209,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ kfree(qf_names[i]);
+ #endif
+ err = -EINVAL;
+- goto out_err;
++ goto out_unlock;
+ }
+ #ifdef CONFIG_QUOTA
+ handle_quota_files(s, qf_names, &qfmt);
+@@ -1250,7 +1252,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ if (blocks) {
+ err = reiserfs_resize(s, blocks);
+ if (err != 0)
+- goto out_err;
++ goto out_unlock;
+ }
+
+ if (*mount_flags & MS_RDONLY) {
+@@ -1260,9 +1262,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ /* it is read-only already */
+ goto out_ok;
+
++ /*
++ * Drop write lock. Quota will retake it when needed and lock
++ * ordering requires calling dquot_suspend() without it.
++ */
++ reiserfs_write_unlock(s);
+ err = dquot_suspend(s, -1);
+ if (err < 0)
+ goto out_err;
++ reiserfs_write_lock(s);
+
+ /* try to remount file system with read-only permissions */
+ if (sb_umount_state(rs) == REISERFS_VALID_FS
+@@ -1272,7 +1280,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+
+ err = journal_begin(&th, s, 10);
+ if (err)
+- goto out_err;
++ goto out_unlock;
+
+ /* Mounting a rw partition read-only. */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
+@@ -1287,7 +1295,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+
+ if (reiserfs_is_journal_aborted(journal)) {
+ err = journal->j_errno;
+- goto out_err;
++ goto out_unlock;
+ }
+
+ handle_data_mode(s, mount_options);
+@@ -1296,7 +1304,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
+ err = journal_begin(&th, s, 10);
+ if (err)
+- goto out_err;
++ goto out_unlock;
+
+ /* Mount a partition which is read-only, read-write */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
+@@ -1313,11 +1321,17 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ SB_JOURNAL(s)->j_must_wait = 1;
+ err = journal_end(&th, s, 10);
+ if (err)
+- goto out_err;
++ goto out_unlock;
+ s->s_dirt = 0;
+
+ if (!(*mount_flags & MS_RDONLY)) {
++ /*
++ * Drop write lock. Quota will retake it when needed and lock
++ * ordering requires calling dquot_resume() without it.
++ */
++ reiserfs_write_unlock(s);
+ dquot_resume(s, -1);
++ reiserfs_write_lock(s);
+ finish_unfinished(s);
+ reiserfs_xattr_init(s, *mount_flags);
+ }
+@@ -1327,9 +1341,10 @@ out_ok:
+ reiserfs_write_unlock(s);
+ return 0;
+
++out_unlock:
++ reiserfs_write_unlock(s);
+ out_err:
+ kfree(new_opts);
+- reiserfs_write_unlock(s);
+ return err;
+ }
+
+@@ -1953,13 +1968,15 @@ static int reiserfs_write_dquot(struct dquot *dquot)
+ REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
+ if (ret)
+ goto out;
++ reiserfs_write_unlock(dquot->dq_sb);
+ ret = dquot_commit(dquot);
++ reiserfs_write_lock(dquot->dq_sb);
+ err =
+ journal_end(&th, dquot->dq_sb,
+ REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
+ if (!ret && err)
+ ret = err;
+- out:
++out:
+ reiserfs_write_unlock(dquot->dq_sb);
+ return ret;
+ }
+@@ -1975,13 +1992,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
+ REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
+ if (ret)
+ goto out;
++ reiserfs_write_unlock(dquot->dq_sb);
+ ret = dquot_acquire(dquot);
++ reiserfs_write_lock(dquot->dq_sb);
+ err =
+ journal_end(&th, dquot->dq_sb,
+ REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
+ if (!ret && err)
+ ret = err;
+- out:
++out:
+ reiserfs_write_unlock(dquot->dq_sb);
+ return ret;
+ }
+@@ -1995,19 +2014,21 @@ static int reiserfs_release_dquot(struct dquot *dquot)
+ ret =
+ journal_begin(&th, dquot->dq_sb,
+ REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
++ reiserfs_write_unlock(dquot->dq_sb);
+ if (ret) {
+ /* Release dquot anyway to avoid endless cycle in dqput() */
+ dquot_release(dquot);
+ goto out;
+ }
+ ret = dquot_release(dquot);
++ reiserfs_write_lock(dquot->dq_sb);
+ err =
+ journal_end(&th, dquot->dq_sb,
+ REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+ if (!ret && err)
+ ret = err;
+- out:
+ reiserfs_write_unlock(dquot->dq_sb);
++out:
+ return ret;
+ }
+
+@@ -2032,11 +2053,13 @@ static int reiserfs_write_info(struct super_block *sb, int type)
+ ret = journal_begin(&th, sb, 2);
+ if (ret)
+ goto out;
++ reiserfs_write_unlock(sb);
+ ret = dquot_commit_info(sb, type);
++ reiserfs_write_lock(sb);
+ err = journal_end(&th, sb, 2);
+ if (!ret && err)
+ ret = err;
+- out:
++out:
+ reiserfs_write_unlock(sb);
+ return ret;
+ }
+@@ -2060,8 +2083,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
+ struct inode *inode;
+ struct reiserfs_transaction_handle th;
+
+- if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
+- return -EINVAL;
++ reiserfs_write_lock(sb);
++ if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) {
++ err = -EINVAL;
++ goto out;
++ }
+
+ /* Quotafile not on the same filesystem? */
+ if (path->mnt->mnt_sb != sb) {
+@@ -2103,8 +2129,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
+ if (err)
+ goto out;
+ }
+- err = dquot_quota_on(sb, type, format_id, path);
++ reiserfs_write_unlock(sb);
++ return dquot_quota_on(sb, type, format_id, path);
+ out:
++ reiserfs_write_unlock(sb);
+ return err;
+ }
+
+@@ -2178,7 +2206,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+ tocopy = sb->s_blocksize - offset < towrite ?
+ sb->s_blocksize - offset : towrite;
+ tmp_bh.b_state = 0;
++ reiserfs_write_lock(sb);
+ err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
++ reiserfs_write_unlock(sb);
+ if (err)
+ goto out;
+ if (offset || tocopy != sb->s_blocksize)
+@@ -2194,10 +2224,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+ flush_dcache_page(bh->b_page);
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
++ reiserfs_write_lock(sb);
+ reiserfs_prepare_for_journal(sb, bh, 1);
+ journal_mark_dirty(current->journal_info, sb, bh);
+ if (!journal_quota)
+ reiserfs_add_ordered_list(inode, bh);
++ reiserfs_write_unlock(sb);
+ brelse(bh);
+ offset = 0;
+ towrite -= tocopy;
+diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
+index 2559d17..5dc48ca 100644
+--- a/fs/ubifs/find.c
++++ b/fs/ubifs/find.c
+@@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c)
+ if (!lprops) {
+ lprops = ubifs_fast_find_freeable(c);
+ if (!lprops) {
+- ubifs_assert(c->freeable_cnt == 0);
+- if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
++ /*
++ * The first condition means the following: go scan the
++ * LPT if there are uncategorized lprops, which means
++ * there may be freeable LEBs there (UBIFS does not
++ * store the information about freeable LEBs in the
++ * master node).
++ */
++ if (c->in_a_category_cnt != c->main_lebs ||
++ c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
++ ubifs_assert(c->freeable_cnt == 0);
+ lprops = scan_for_leb_for_idx(c);
+ if (IS_ERR(lprops)) {
+ err = PTR_ERR(lprops);
+diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
+index f8a181e..ea9d491 100644
+--- a/fs/ubifs/lprops.c
++++ b/fs/ubifs/lprops.c
+@@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
+ default:
+ ubifs_assert(0);
+ }
++
+ lprops->flags &= ~LPROPS_CAT_MASK;
+ lprops->flags |= cat;
++ c->in_a_category_cnt += 1;
++ ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
+ }
+
+ /**
+@@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c,
+ default:
+ ubifs_assert(0);
+ }
++
++ c->in_a_category_cnt -= 1;
++ ubifs_assert(c->in_a_category_cnt >= 0);
+ }
+
+ /**
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 27f2255..8bbc99e 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1187,6 +1187,8 @@ struct ubifs_debug_info;
+ * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size)
+ * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size)
+ * @freeable_cnt: number of freeable LEBs in @freeable_list
++ * @in_a_category_cnt: count of lprops which are in a certain category, which
++ * basically meants that they were loaded from the flash
+ *
+ * @ltab_lnum: LEB number of LPT's own lprops table
+ * @ltab_offs: offset of LPT's own lprops table
+@@ -1416,6 +1418,7 @@ struct ubifs_info {
+ struct list_head freeable_list;
+ struct list_head frdi_idx_list;
+ int freeable_cnt;
++ int in_a_category_cnt;
+
+ int ltab_lnum;
+ int ltab_offs;
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index cf0ac05..2f5a8f7 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1167,9 +1167,14 @@ xfs_buf_bio_end_io(
+ {
+ xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
+
+- xfs_buf_ioerror(bp, -error);
++ /*
++ * don't overwrite existing errors - otherwise we can lose errors on
++ * buffers that require multiple bios to complete.
++ */
++ if (!bp->b_error)
++ xfs_buf_ioerror(bp, -error);
+
+- if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
++ if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+ invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+
+ _xfs_buf_ioend(bp, 1);
+@@ -1245,6 +1250,11 @@ next_chunk:
+ if (size)
+ goto next_chunk;
+ } else {
++ /*
++ * This is guaranteed not to be the last io reference count
++ * because the caller (xfs_buf_iorequest) holds a count itself.
++ */
++ atomic_dec(&bp->b_io_remaining);
+ xfs_buf_ioerror(bp, EIO);
+ bio_put(bio);
+ }
+diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
+index dd2e44f..9d709d1 100644
+--- a/include/linux/ptp_clock_kernel.h
++++ b/include/linux/ptp_clock_kernel.h
+@@ -50,7 +50,8 @@ struct ptp_clock_request {
+ * clock operations
+ *
+ * @adjfreq: Adjusts the frequency of the hardware clock.
+- * parameter delta: Desired period change in parts per billion.
++ * parameter delta: Desired frequency offset from nominal frequency
++ * in parts per billion
+ *
+ * @adjtime: Shifts the time of the hardware clock.
+ * parameter delta: Desired change in nanoseconds.
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 80fb1c6..77bccfc 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ struct futex_pi_state **ps,
+ struct task_struct *task, int set_waiters)
+ {
+- int lock_taken, ret, ownerdied = 0;
++ int lock_taken, ret, force_take = 0;
+ u32 uval, newval, curval, vpid = task_pid_vnr(task);
+
+ retry:
+@@ -755,17 +755,15 @@ retry:
+ newval = curval | FUTEX_WAITERS;
+
+ /*
+- * There are two cases, where a futex might have no owner (the
+- * owner TID is 0): OWNER_DIED. We take over the futex in this
+- * case. We also do an unconditional take over, when the owner
+- * of the futex died.
+- *
+- * This is safe as we are protected by the hash bucket lock !
++ * Should we force take the futex? See below.
+ */
+- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
+- /* Keep the OWNER_DIED bit */
++ if (unlikely(force_take)) {
++ /*
++ * Keep the OWNER_DIED and the WAITERS bit and set the
++ * new TID value.
++ */
+ newval = (curval & ~FUTEX_TID_MASK) | vpid;
+- ownerdied = 0;
++ force_take = 0;
+ lock_taken = 1;
+ }
+
+@@ -775,7 +773,7 @@ retry:
+ goto retry;
+
+ /*
+- * We took the lock due to owner died take over.
++ * We took the lock due to forced take over.
+ */
+ if (unlikely(lock_taken))
+ return 1;
+@@ -790,20 +788,25 @@ retry:
+ switch (ret) {
+ case -ESRCH:
+ /*
+- * No owner found for this futex. Check if the
+- * OWNER_DIED bit is set to figure out whether
+- * this is a robust futex or not.
++ * We failed to find an owner for this
++ * futex. So we have no pi_state to block
++ * on. This can happen in two cases:
++ *
++ * 1) The owner died
++ * 2) A stale FUTEX_WAITERS bit
++ *
++ * Re-read the futex value.
+ */
+ if (get_futex_value_locked(&curval, uaddr))
+ return -EFAULT;
+
+ /*
+- * We simply start over in case of a robust
+- * futex. The code above will take the futex
+- * and return happy.
++ * If the owner died or we have a stale
++ * WAITERS bit the owner TID in the user space
++ * futex is 0.
+ */
+- if (curval & FUTEX_OWNER_DIED) {
+- ownerdied = 1;
++ if (!(curval & FUTEX_TID_MASK)) {
++ force_take = 1;
+ goto retry;
+ }
+ default:
+@@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q)
+ {
+ struct task_struct *p = q->task;
+
++ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
++ return;
++
+ /*
+ * We set q->lock_ptr = NULL _before_ we wake up the task. If
+ * a non-futex wake up happens on another CPU then the task
+@@ -1075,6 +1081,10 @@ retry_private:
+
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex (&this->key, &key1)) {
++ if (this->pi_state || this->rt_waiter) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
+ wake_futex(this);
+ if (++ret >= nr_wake)
+ break;
+@@ -1087,6 +1097,10 @@ retry_private:
+ op_ret = 0;
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex (&this->key, &key2)) {
++ if (this->pi_state || this->rt_waiter) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
+ wake_futex(this);
+ if (++op_ret >= nr_wake2)
+ break;
+@@ -1095,6 +1109,7 @@ retry_private:
+ ret += op_ret;
+ }
+
++out_unlock:
+ double_unlock_hb(hb1, hb2);
+ out_put_keys:
+ put_futex_key(&key2);
+@@ -1384,9 +1399,13 @@ retry_private:
+ /*
+ * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
+ * be paired with each other and no other futex ops.
++ *
++ * We should never be requeueing a futex_q with a pi_state,
++ * which is awaiting a futex_unlock_pi().
+ */
+ if ((requeue_pi && !this->rt_waiter) ||
+- (!requeue_pi && this->rt_waiter)) {
++ (!requeue_pi && this->rt_waiter) ||
++ this->pi_state) {
+ ret = -EINVAL;
+ break;
+ }
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 1d7bca7..a8bc4d9 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -113,7 +113,7 @@ static unsigned long get_timestamp(int this_cpu)
+ return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
+ }
+
+-static unsigned long get_sample_period(void)
++static u64 get_sample_period(void)
+ {
+ /*
+ * convert watchdog_thresh from seconds to ns
+@@ -121,7 +121,7 @@ static unsigned long get_sample_period(void)
+ * increment before the hardlockup detector generates
+ * a warning
+ */
+- return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
++ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
+ }
+
+ /* Commands for resetting the watchdog */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 43a19c5..d551d5f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2052,8 +2052,10 @@ static int rescuer_thread(void *__wq)
+ repeat:
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- if (kthread_should_stop())
++ if (kthread_should_stop()) {
++ __set_current_state(TASK_RUNNING);
+ return 0;
++ }
+
+ /*
+ * See whether any cpu is asking for help. Unbounded
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index c8425b1..d027a24 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1457,17 +1457,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
+ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
+ {
+ u64 limit;
+- u64 memsw;
+
+ limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+- limit += total_swap_pages << PAGE_SHIFT;
+
+- memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ /*
+- * If memsw is finite and limits the amount of swap space available
+- * to this memcg, return that limit.
++ * Do not consider swap space if we cannot swap due to swappiness
+ */
+- return min(limit, memsw);
++ if (mem_cgroup_swappiness(memcg)) {
++ u64 memsw;
++
++ limit += total_swap_pages << PAGE_SHIFT;
++ memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
++
++ /*
++ * If memsw is finite and limits the amount of swap space
++ * available to this memcg, return that limit.
++ */
++ limit = min(limit, memsw);
++ }
++
++ return limit;
+ }
+
+ /*
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 5bd5bb1..1b03878 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1475,9 +1475,17 @@ int soft_offline_page(struct page *page, int flags)
+ {
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
++ struct page *hpage = compound_trans_head(page);
+
+ if (PageHuge(page))
+ return soft_offline_huge_page(page, flags);
++ if (PageTransHuge(hpage)) {
++ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
++ pr_info("soft offline: %#lx: failed to split THP\n",
++ pfn);
++ return -EBUSY;
++ }
++ }
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 126ca35..2d46e23 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -595,7 +595,7 @@ static void shmem_evict_inode(struct inode *inode)
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+- BUG_ON(inode->i_blocks);
++ WARN_ON(inode->i_blocks);
+ shmem_free_inode(inode->i_sb);
+ end_writeback(inode);
+ }
+diff --git a/mm/sparse.c b/mm/sparse.c
+index bf7d3cc..42935b5 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -622,7 +622,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+ {
+ return; /* XXX: Not implemented yet */
+ }
+-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+ {
+ }
+ #else
+@@ -663,10 +663,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+ get_order(sizeof(struct page) * nr_pages));
+ }
+
+-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+ {
+ unsigned long maps_section_nr, removing_section_nr, i;
+ unsigned long magic;
++ struct page *page = virt_to_page(memmap);
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ magic = (unsigned long) page->lru.next;
+@@ -715,13 +716,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
+ */
+
+ if (memmap) {
+- struct page *memmap_page;
+- memmap_page = virt_to_page(memmap);
+-
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+ >> PAGE_SHIFT;
+
+- free_map_bootmem(memmap_page, nr_pages);
++ free_map_bootmem(memmap, nr_pages);
+ }
+ }
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 313381c..1e4ee1a 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2492,6 +2492,19 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+ }
+ #endif
+
++static bool zone_balanced(struct zone *zone, int order,
++ unsigned long balance_gap, int classzone_idx)
++{
++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
++ balance_gap, classzone_idx, 0))
++ return false;
++
++ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
++ return false;
++
++ return true;
++}
++
+ /*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+@@ -2551,8 +2564,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+ continue;
+ }
+
+- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+- i, 0))
++ if (!zone_balanced(zone, order, 0, i))
+ all_zones_ok = false;
+ else
+ balanced += zone->present_pages;
+@@ -2655,8 +2667,7 @@ loop_again:
+ shrink_active_list(SWAP_CLUSTER_MAX, zone,
+ &sc, priority, 0);
+
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone), 0, 0)) {
++ if (!zone_balanced(zone, order, 0, 0)) {
+ end_zone = i;
+ break;
+ } else {
+@@ -2717,9 +2728,8 @@ loop_again:
+ (zone->present_pages +
+ KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone) + balance_gap,
+- end_zone, 0)) {
++ if (!zone_balanced(zone, order,
++ balance_gap, end_zone)) {
+ shrink_zone(priority, zone, &sc);
+
+ reclaim_state->reclaimed_slab = 0;
+@@ -2746,8 +2756,7 @@ loop_again:
+ continue;
+ }
+
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone), end_zone, 0)) {
++ if (!zone_balanced(zone, order, 0, end_zone)) {
+ all_zones_ok = 0;
+ /*
+ * We are still under min water mark. This
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 151b773..3910c1f 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ op->sk = sk;
+ op->ifindex = ifindex;
+
++ /* ifindex for timeout events w/o previous frame reception */
++ op->rx_ifindex = ifindex;
++
+ /* initialize uninitialized (kzalloc) structure */
+ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ op->timer.function = bcm_rx_timeout_handler;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 480be72..2aac4ec 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2829,8 +2829,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ if (unlikely(tcpu != next_cpu) &&
+ (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+- rflow->last_qtail)) >= 0))
++ rflow->last_qtail)) >= 0)) {
++ tcpu = next_cpu;
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
++ }
+
+ if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ *rflowp = rflow;
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index 277faef..0387da0 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -308,7 +308,8 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+- if (ha->addr == dev->dev_addr && ha->refcount == 1)
++ if (!memcmp(ha->addr, addr, dev->addr_len) &&
++ ha->type == addr_type && ha->refcount == 1)
+ return -ENOENT;
+
+ err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 09ff51b..0106d25 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -468,18 +468,27 @@ static int do_ip_setsockopt(struct sock *sk, int level,
+ struct inet_sock *inet = inet_sk(sk);
+ int val = 0, err;
+
+- if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
+- (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
+- (1<<IP_RETOPTS) | (1<<IP_TOS) |
+- (1<<IP_TTL) | (1<<IP_HDRINCL) |
+- (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
+- (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
+- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
+- (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
+- optname == IP_MULTICAST_TTL ||
+- optname == IP_MULTICAST_ALL ||
+- optname == IP_MULTICAST_LOOP ||
+- optname == IP_RECVORIGDSTADDR) {
++ switch (optname) {
++ case IP_PKTINFO:
++ case IP_RECVTTL:
++ case IP_RECVOPTS:
++ case IP_RECVTOS:
++ case IP_RETOPTS:
++ case IP_TOS:
++ case IP_TTL:
++ case IP_HDRINCL:
++ case IP_MTU_DISCOVER:
++ case IP_RECVERR:
++ case IP_ROUTER_ALERT:
++ case IP_FREEBIND:
++ case IP_PASSSEC:
++ case IP_TRANSPARENT:
++ case IP_MINTTL:
++ case IP_NODEFRAG:
++ case IP_MULTICAST_TTL:
++ case IP_MULTICAST_ALL:
++ case IP_MULTICAST_LOOP:
++ case IP_RECVORIGDSTADDR:
+ if (optlen >= sizeof(int)) {
+ if (get_user(val, (int __user *) optval))
+ return -EFAULT;
+diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
+index 9290048..e32b542 100644
+--- a/net/ipv4/netfilter/nf_nat_standalone.c
++++ b/net/ipv4/netfilter/nf_nat_standalone.c
+@@ -194,7 +194,8 @@ nf_nat_out(unsigned int hooknum,
+
+ if ((ct->tuplehash[dir].tuple.src.u3.ip !=
+ ct->tuplehash[!dir].tuple.dst.u3.ip) ||
+- (ct->tuplehash[dir].tuple.src.u.all !=
++ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
++ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)
+ )
+ return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP;
+@@ -230,7 +231,8 @@ nf_nat_local_fn(unsigned int hooknum,
+ ret = NF_DROP;
+ }
+ #ifdef CONFIG_XFRM
+- else if (ct->tuplehash[dir].tuple.dst.u.all !=
++ else if (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
++ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all)
+ if (ip_xfrm_me_harder(skb))
+ ret = NF_DROP;
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 26cb08c..b204df8 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -798,6 +798,7 @@ pref_skip_coa:
+ if (val < 0 || val > 255)
+ goto e_inval;
+ np->min_hopcount = val;
++ retv = 0;
+ break;
+ case IPV6_DONTFRAG:
+ np->dontfrag = valbool;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 8c7364b..9e20cb8 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -965,10 +965,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
+
+ mutex_lock(&sdata->u.ibss.mtx);
+
+- sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
+- memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
+- sdata->u.ibss.ssid_len = 0;
+-
+ active_ibss = ieee80211_sta_active_ibss(sdata);
+
+ if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+@@ -989,6 +985,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
+ }
+ }
+
++ ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
++ memset(ifibss->bssid, 0, ETH_ALEN);
++ ifibss->ssid_len = 0;
++
+ sta_info_flush(sdata->local, sdata);
+
+ /* remove beacon */
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 1fdd8ff..1c775f0 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1129,6 +1129,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
++ unsigned long flags;
+
+ clear_sta_flag(sta, WLAN_STA_SP);
+
+@@ -1144,12 +1145,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+
++ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
+ skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
++ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
+ tmp = skb_queue_len(&pending);
+ filtered += tmp - count;
+ count = tmp;
+
++ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
+ skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
++ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
+ tmp = skb_queue_len(&pending);
+ buffered += tmp - count;
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 8235b86..57ad466 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -159,21 +159,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+ * sCL -> sSS
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+-/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
++/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
+ /*
+ * sNO -> sIV Too late and no reason to do anything
+ * sSS -> sIV Client can't send SYN and then SYN/ACK
+ * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
+- * sSR -> sIG
+- * sES -> sIG Error: SYNs in window outside the SYN_SENT state
+- * are errors. Receiver will reply with RST
+- * and close the connection.
+- * Or we are not in sync and hold a dead connection.
+- * sFW -> sIG
+- * sCW -> sIG
+- * sLA -> sIG
+- * sTW -> sIG
+- * sCL -> sIG
++ * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
++ * sES -> sIV Invalid SYN/ACK packets sent by the client
++ * sFW -> sIV
++ * sCW -> sIV
++ * sLA -> sIV
++ * sTW -> sIV
++ * sCL -> sIV
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+@@ -628,15 +625,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ ack = sack = receiver->td_end;
+ }
+
+- if (seq == end
+- && (!tcph->rst
+- || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
++ if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
+ /*
+- * Packets contains no data: we assume it is valid
+- * and check the ack value only.
+- * However RST segments are always validated by their
+- * SEQ number, except when seq == 0 (reset sent answering
+- * SYN.
++ * RST sent answering SYN.
+ */
+ seq = end = sender->td_end;
+
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index fa39731..0b08905 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -125,9 +125,8 @@ static const struct ieee80211_regdomain world_regdom = {
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+- /* IEEE 802.11b/g, channels 12..13. No HT40
+- * channel fits here. */
+- REG_RULE(2467-10, 2472+10, 20, 6, 20,
++ /* IEEE 802.11b/g, channels 12..13. */
++ REG_RULE(2467-10, 2472+10, 40, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS),
+ /* IEEE 802.11 channel 14 - Only JP enables
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index 4450fbe..92e24bb 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -202,8 +202,8 @@ static void devcgroup_destroy(struct cgroup_subsys *ss,
+
+ dev_cgroup = cgroup_to_devcgroup(cgroup);
+ list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
+- list_del(&wh->list);
+- kfree(wh);
++ list_del_rcu(&wh->list);
++ kfree_rcu(wh, rcu);
+ }
+ kfree(dev_cgroup);
+ }
+@@ -278,7 +278,7 @@ static int may_access_whitelist(struct dev_cgroup *c,
+ {
+ struct dev_whitelist_item *whitem;
+
+- list_for_each_entry(whitem, &c->whitelist, list) {
++ list_for_each_entry_rcu(whitem, &c->whitelist, list) {
+ if (whitem->type & DEV_ALL)
+ return 1;
+ if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
+index 3bf46ab..46a5b81 100644
+--- a/security/selinux/netnode.c
++++ b/security/selinux/netnode.c
+@@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node)
+ if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
+ struct sel_netnode *tail;
+ tail = list_entry(
+- rcu_dereference(sel_netnode_hash[idx].list.prev),
++ rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
++ lockdep_is_held(&sel_netnode_lock)),
+ struct sel_netnode, list);
+ list_del_rcu(&tail->list);
+ kfree_rcu(tail, rcu);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3ce2da2..1a09fbf 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6026,6 +6026,9 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
++ { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
++ { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
++ { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
+index 41ca4d9..f81b185 100644
+--- a/sound/soc/codecs/wm8978.c
++++ b/sound/soc/codecs/wm8978.c
+@@ -749,7 +749,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream,
+ wm8978->mclk_idx = -1;
+ f_sel = wm8978->f_mclk;
+ } else {
+- if (!wm8978->f_pllout) {
++ if (!wm8978->f_opclk) {
+ /* We only enter here, if OPCLK is not used */
+ int ret = wm8978_configure_pll(codec);
+ if (ret < 0)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 0dc441c..b516488 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3009,7 +3009,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
+ {
+ struct snd_soc_codec *codec;
+
+- list_for_each_entry(codec, &card->codec_dev_list, list) {
++ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ soc_dapm_shutdown_codec(&codec->dapm);
+ if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+ snd_soc_dapm_set_bias_level(&codec->dapm,
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index c83f614..eeefbce 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint {
+ struct snd_usb_midi_out_endpoint* ep;
+ struct snd_rawmidi_substream *substream;
+ int active;
++ bool autopm_reference;
+ uint8_t cable; /* cable number << 4 */
+ uint8_t state;
+ #define STATE_UNKNOWN 0
+@@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ return -ENXIO;
+ }
+ err = usb_autopm_get_interface(umidi->iface);
+- if (err < 0)
++ port->autopm_reference = err >= 0;
++ if (err < 0 && err != -EACCES)
+ return -EIO;
+ substream->runtime->private_data = port;
+ port->state = STATE_UNKNOWN;
+@@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
++ struct usbmidi_out_port *port = substream->runtime->private_data;
+
+ substream_open(substream, 0);
+- usb_autopm_put_interface(umidi->iface);
++ if (port->autopm_reference)
++ usb_autopm_put_interface(umidi->iface);
+ return 0;
+ }
+
diff --git a/3.2.54/1035_linux-3.2.36.patch b/3.2.54/1035_linux-3.2.36.patch
new file mode 100644
index 0000000..5d192a3
--- /dev/null
+++ b/3.2.54/1035_linux-3.2.36.patch
@@ -0,0 +1,6434 @@
+diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
+index 84d46c0..eb5502e 100644
+--- a/Documentation/hwmon/coretemp
++++ b/Documentation/hwmon/coretemp
+@@ -6,7 +6,9 @@ Supported chips:
+ Prefix: 'coretemp'
+ CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
+ 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
+- 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
++ 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
++ 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
++ 0x36 (Cedar Trail Atom)
+ Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
+ Volume 3A: System Programming Guide
+ http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
+@@ -65,6 +67,11 @@ Process Processor TjMax(C)
+ U3400 105
+ P4505/P4500 90
+
++32nm Atom Processors
++ Z2460 90
++ D2700/2550/2500 100
++ N2850/2800/2650/2600 100
++
+ 45nm Xeon Processors 5400 Quad-Core
+ X5492, X5482, X5472, X5470, X5460, X5450 85
+ E5472, E5462, E5450/40/30/20/10/05 85
+@@ -85,6 +92,9 @@ Process Processor TjMax(C)
+ N475/470/455/450 100
+ N280/270 90
+ 330/230 125
++ E680/660/640/620 90
++ E680T/660T/640T/620T 110
++ CE4170/4150/4110 110
+
+ 45nm Core2 Processors
+ Solo ULV SU3500/3300 100
+diff --git a/Makefile b/Makefile
+index d985af0..2052c29 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
+index c93a22a..3a925fb 100644
+--- a/arch/arm/include/asm/hwcap.h
++++ b/arch/arm/include/asm/hwcap.h
+@@ -18,11 +18,12 @@
+ #define HWCAP_THUMBEE (1 << 11)
+ #define HWCAP_NEON (1 << 12)
+ #define HWCAP_VFPv3 (1 << 13)
+-#define HWCAP_VFPv3D16 (1 << 14)
++#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+ #define HWCAP_TLS (1 << 15)
+ #define HWCAP_VFPv4 (1 << 16)
+ #define HWCAP_IDIVA (1 << 17)
+ #define HWCAP_IDIVT (1 << 18)
++#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+
+ #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index bf53047..c49c8f7 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -27,9 +27,9 @@
+ #if __LINUX_ARM_ARCH__ <= 6
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+- tst \tmp, #HWCAP_VFPv3D16
+- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+- addne \base, \base, #32*4 @ step over unused register space
++ tst \tmp, #HWCAP_VFPD32
++ ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
++ addeq \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+ and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+@@ -51,9 +51,9 @@
+ #if __LINUX_ARM_ARCH__ <= 6
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+- tst \tmp, #HWCAP_VFPv3D16
+- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+- addne \base, \base, #32*4 @ step over unused register space
++ tst \tmp, #HWCAP_VFPD32
++ stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
++ addeq \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+ and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
+index 5f452f8..5d9b1ee 100644
+--- a/arch/arm/kernel/swp_emulate.c
++++ b/arch/arm/kernel/swp_emulate.c
+@@ -108,10 +108,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
+ {
+ siginfo_t info;
+
++ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, addr) == NULL)
+ info.si_code = SEGV_MAPERR;
+ else
+ info.si_code = SEGV_ACCERR;
++ up_read(&current->mm->mmap_sem);
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index dc8c550..9e28fdb 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -475,7 +475,7 @@ static void __init build_mem_type_table(void)
+ }
+
+ for (i = 0; i < 16; i++) {
+- unsigned long v = pgprot_val(protection_map[i]);
++ pteval_t v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | user_pgprot);
+ }
+
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index f0702f3..7c815b2 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -610,11 +610,14 @@ static int __init vfp_init(void)
+ elf_hwcap |= HWCAP_VFPv3;
+
+ /*
+- * Check for VFPv3 D16. CPUs in this configuration
+- * only have 16 x 64bit registers.
++ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
++ * this configuration only have 16 x 64bit
++ * registers.
+ */
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+- elf_hwcap |= HWCAP_VFPv3D16;
++ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
++ else
++ elf_hwcap |= HWCAP_VFPD32;
+ }
+ #endif
+ /*
+diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
+index 32567bc..ac12ae2 100644
+--- a/arch/cris/include/asm/io.h
++++ b/arch/cris/include/asm/io.h
+@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
+ #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
+ #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
+ #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
+-#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)
+-#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)
+-#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)
+-#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)
+-#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)
+-#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)
++static inline void outb(unsigned char data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 1, 1);
++}
++static inline void outw(unsigned short data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 2, 1);
++}
++static inline void outl(unsigned int data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 4, 1);
++}
++static inline void outsb(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 1, count);
++}
++static inline void outsw(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 2, count);
++}
++static inline void outsl(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 4, count);
++}
+
+ /*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index c47f96e..bf128d7 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
+ }
+ }
+ #ifdef CONFIG_HOTPLUG_CPU
+- if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
+- (system_state == SYSTEM_RUNNING ||
+- system_state == SYSTEM_BOOTING))
++ if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
+ play_dead();
+ #endif
+ tick_nohz_restart_sched_tick();
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 06c7251..cdf6b3f 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -435,7 +435,7 @@ _STATIC(__after_prom_start)
+ tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
+ #endif
+
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_RELOCATABLE
+ /*
+ * Check if the kernel has to be running as relocatable kernel based on the
+ * variable __run_at_load, if it is set the kernel is treated as relocatable
+diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
+index 549bb2c..ded8a1a 100644
+--- a/arch/powerpc/kvm/44x_emulate.c
++++ b/arch/powerpc/kvm/44x_emulate.c
+@@ -79,6 +79,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
++ vcpu->arch.dcr_is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+@@ -100,6 +101,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = kvmppc_get_gpr(vcpu, rs);
+ run->dcr.is_write = 1;
++ vcpu->arch.dcr_is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ emulated = EMULATE_DO_DCR;
+diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
+index 1b5dc1a..daf793b 100644
+--- a/arch/powerpc/platforms/embedded6xx/wii.c
++++ b/arch/powerpc/platforms/embedded6xx/wii.c
+@@ -85,9 +85,11 @@ void __init wii_memory_fixups(void)
+ wii_hole_start = p[0].base + p[0].size;
+ wii_hole_size = p[1].base - wii_hole_start;
+
+- pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
++ pr_info("MEM1: <%08llx %08llx>\n",
++ (unsigned long long) p[0].base, (unsigned long long) p[0].size);
+ pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
+- pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
++ pr_info("MEM2: <%08llx %08llx>\n",
++ (unsigned long long) p[1].base, (unsigned long long) p[1].size);
+
+ p[0].size += wii_hole_size + p[1].size;
+
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index d3cb86c..dffcaa4 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -749,7 +749,7 @@ static int __init kvm_s390_init(void)
+ }
+ memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
+ facilities[0] &= 0xff00fff3f47c0000ULL;
+- facilities[1] &= 0x201c000000000000ULL;
++ facilities[1] &= 0x001c000000000000ULL;
+ return 0;
+ }
+
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 1bb0bf4..4970ef0 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -429,7 +429,7 @@ void hpet_msi_unmask(struct irq_data *data)
+
+ /* unmask it */
+ cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+- cfg |= HPET_TN_FSB;
++ cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
+ hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+ }
+
+@@ -440,7 +440,7 @@ void hpet_msi_mask(struct irq_data *data)
+
+ /* mask it */
+ cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+- cfg &= ~HPET_TN_FSB;
++ cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
+ hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4fc5323..f4063fd 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -578,6 +578,9 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+
++ if (!static_cpu_has(X86_FEATURE_XSAVE))
++ return 0;
++
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+ }
+@@ -6149,6 +6152,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ int pending_vec, max_bits, idx;
+ struct desc_ptr dt;
+
++ if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
++ return -EINVAL;
++
+ dt.size = sregs->idt.limit;
+ dt.address = sregs->idt.base;
+ kvm_x86_ops->set_idt(vcpu, &dt);
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 5535477..a1a4b8e 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -34,6 +34,7 @@
+ #include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
++#include <asm/unaligned.h>
+
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ #include <linux/proc_fs.h>
+@@ -95,6 +96,18 @@ enum {
+ ACPI_BATTERY_ALARM_PRESENT,
+ ACPI_BATTERY_XINFO_PRESENT,
+ ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
++ /* On Lenovo Thinkpad models from 2010 and 2011, the power unit
++ switches between mWh and mAh depending on whether the system
++ is running on battery or not. When mAh is the unit, most
++ reported values are incorrect and need to be adjusted by
++ 10000/design_voltage. Verified on x201, t410, t410s, and x220.
++ Pre-2010 and 2012 models appear to always report in mWh and
++ are thus unaffected (tested with t42, t61, t500, x200, x300,
++ and x230). Also, in mid-2012 Lenovo issued a BIOS update for
++ the 2011 models that fixes the issue (tested on x220 with a
++ post-1.29 BIOS), but as of Nov. 2012, no such update is
++ available for the 2010 models. */
++ ACPI_BATTERY_QUIRK_THINKPAD_MAH,
+ };
+
+ struct acpi_battery {
+@@ -429,6 +442,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
+ kfree(buffer.pointer);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ battery->full_charge_capacity = battery->design_capacity;
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
++ battery->power_unit && battery->design_voltage) {
++ battery->design_capacity = battery->design_capacity *
++ 10000 / battery->design_voltage;
++ battery->full_charge_capacity = battery->full_charge_capacity *
++ 10000 / battery->design_voltage;
++ battery->design_capacity_warning =
++ battery->design_capacity_warning *
++ 10000 / battery->design_voltage;
++ /* Curiously, design_capacity_low, unlike the rest of them,
++ is correct. */
++ /* capacity_granularity_* equal 1 on the systems tested, so
++ it's impossible to tell if they would need an adjustment
++ or not if their values were higher. */
++ }
+ return result;
+ }
+
+@@ -477,6 +505,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
+ && battery->capacity_now >= 0 && battery->capacity_now <= 100)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
++ battery->power_unit && battery->design_voltage) {
++ battery->capacity_now = battery->capacity_now *
++ 10000 / battery->design_voltage;
++ }
+ return result;
+ }
+
+@@ -586,6 +619,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
+ mutex_unlock(&battery->sysfs_lock);
+ }
+
++static void find_battery(const struct dmi_header *dm, void *private)
++{
++ struct acpi_battery *battery = (struct acpi_battery *)private;
++ /* Note: the hardcoded offsets below have been extracted from
++ the source code of dmidecode. */
++ if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) {
++ const u8 *dmi_data = (const u8 *)(dm + 1);
++ int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6));
++ if (dm->length >= 18)
++ dmi_capacity *= dmi_data[17];
++ if (battery->design_capacity * battery->design_voltage / 1000
++ != dmi_capacity &&
++ battery->design_capacity * 10 == dmi_capacity)
++ set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
++ &battery->flags);
++ }
++}
++
+ /*
+ * According to the ACPI spec, some kinds of primary batteries can
+ * report percentage battery remaining capacity directly to OS.
+@@ -611,6 +662,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
+ }
++
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags))
++ return ;
++
++ if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
++ const char *s;
++ s = dmi_get_system_info(DMI_PRODUCT_VERSION);
++ if (s && !strnicmp(s, "ThinkPad", 8)) {
++ dmi_walk(find_battery, battery);
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
++ &battery->flags) &&
++ battery->design_voltage) {
++ battery->design_capacity =
++ battery->design_capacity *
++ 10000 / battery->design_voltage;
++ battery->full_charge_capacity =
++ battery->full_charge_capacity *
++ 10000 / battery->design_voltage;
++ battery->design_capacity_warning =
++ battery->design_capacity_warning *
++ 10000 / battery->design_voltage;
++ battery->capacity_now = battery->capacity_now *
++ 10000 / battery->design_voltage;
++ }
++ }
++ }
+ }
+
+ static int acpi_battery_update(struct acpi_battery *battery)
+diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
+index 9d7bc9f..ac28db3 100644
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -409,6 +409,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
++ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index ed6bc52..d790791 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -108,6 +108,180 @@ void __init acpi_old_suspend_ordering(void)
+ old_suspend_ordering = true;
+ }
+
++static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
++{
++ acpi_old_suspend_ordering();
++ return 0;
++}
++
++static int __init init_nvs_nosave(const struct dmi_system_id *d)
++{
++ acpi_nvs_nosave();
++ return 0;
++}
++
++static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Abit KN9 (nForce4 variant)",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
++ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "HP xw4600 Workstation",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Panasonic CF51-2L",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "Matsushita Electric Industrial Co.,Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW21E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB17FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-SR11M",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Everex StepNote Series",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB1Z1E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-NW130D",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCCW29FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Averatec AV1020-ED2",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus A8N-SLI DELUXE",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus A8N-SLI Premium",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-SR26GN_P",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB1S1E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW520F",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54C",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54HR",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
++ },
++ },
++ {},
++};
++
++static void acpi_sleep_dmi_check(void)
++{
++ dmi_check_system(acpisleep_dmi_table);
++}
++
+ /**
+ * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
+ */
+@@ -197,6 +371,7 @@ static void acpi_pm_end(void)
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ #define acpi_target_sleep_state ACPI_STATE_S0
++static inline void acpi_sleep_dmi_check(void) {}
+ #endif /* CONFIG_ACPI_SLEEP */
+
+ #ifdef CONFIG_SUSPEND
+@@ -341,167 +516,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
+ .end = acpi_pm_end,
+ .recover = acpi_pm_finish,
+ };
+-
+-static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+-{
+- old_suspend_ordering = true;
+- return 0;
+-}
+-
+-static int __init init_nvs_nosave(const struct dmi_system_id *d)
+-{
+- acpi_nvs_nosave();
+- return 0;
+-}
+-
+-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Abit KN9 (nForce4 variant)",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+- DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "HP xw4600 Workstation",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Panasonic CF51-2L",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR,
+- "Matsushita Electric Industrial Co.,Ltd."),
+- DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-FW21E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB17FX",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-SR11M",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Everex StepNote Series",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB1Z1E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-NW130D",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCCW29FX",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Averatec AV1020-ED2",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus A8N-SLI DELUXE",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus A8N-SLI Premium",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-SR26GN_P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-FW520F",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Asus K54C",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Asus K54HR",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+- },
+- },
+- {},
+-};
+ #endif /* CONFIG_SUSPEND */
+
+ #ifdef CONFIG_HIBERNATION
+@@ -808,13 +822,13 @@ int __init acpi_sleep_init(void)
+ u8 type_a, type_b;
+ #ifdef CONFIG_SUSPEND
+ int i = 0;
+-
+- dmi_check_system(acpisleep_dmi_table);
+ #endif
+
+ if (acpi_disabled)
+ return 0;
+
++ acpi_sleep_dmi_check();
++
+ sleep_states[ACPI_STATE_S0] = 1;
+ printk(KERN_INFO PREFIX "(supports S0");
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 08a44b5..0e47949 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int video_ignore_initial_backlight(const struct dmi_system_id *d)
++{
++ use_bios_initial_backlight = 0;
++ return 0;
++}
++
+ static struct dmi_system_id video_dmi_table[] __initdata = {
+ /*
+ * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+@@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
+ },
+ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Folio 13-2000",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index f3f0fe7..d9c0199 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -132,6 +132,41 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
+ return AE_OK;
+ }
+
++/* Force to use vendor driver when the ACPI device is known to be
++ * buggy */
++static int video_detect_force_vendor(const struct dmi_system_id *d)
++{
++ acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
++ return 0;
++}
++
++static struct dmi_system_id video_detect_dmi_table[] = {
++ /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
++ * ACPI backlight device is used. This flag will definitively break
++ * the backlight interface (even the vendor interface) untill next
++ * reboot. It's why we should prevent video.ko from being used here
++ * and we can't rely on a later call to acpi_video_unregister().
++ */
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "X360",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
++ DMI_MATCH(DMI_BOARD_NAME, "X360"),
++ },
++ },
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "Asus UL30VT",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
++ },
++ },
++ { },
++};
++
+ /*
+ * Returns the video capabilities of a specific ACPI graphics device
+ *
+@@ -164,6 +199,8 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
+ * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
+ *}
+ */
++
++ dmi_check_system(video_detect_dmi_table);
+ } else {
+ status = acpi_bus_get_device(graphics_handle, &tmp_dev);
+ if (ACPI_FAILURE(status)) {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 321e23e..c9540c0 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2529,6 +2529,7 @@ int ata_bus_probe(struct ata_port *ap)
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
++ dev->dma_mode = 0xff;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 58db834..aea627e 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2599,6 +2599,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
++ dev->dma_mode = 0xff;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 2a5412e..dd332e5 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+- if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
++ if (atadev && ap->ops->sw_activity_show &&
++ (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ return ap->ops->sw_activity_show(atadev, buf);
+ return -EINVAL;
+ }
+@@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+ enum sw_activity val;
+ int rc;
+
+- if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
++ if (atadev && ap->ops->sw_activity_store &&
++ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ val = simple_strtoul(buf, NULL, 0);
+ switch (val) {
+ case OFF: case BLINK_ON: case BLINK_OFF:
+diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
+index 000fcc9..ef6e328 100644
+--- a/drivers/ata/sata_promise.c
++++ b/drivers/ata/sata_promise.c
+@@ -147,6 +147,10 @@ struct pdc_port_priv {
+ dma_addr_t pkt_dma;
+ };
+
++struct pdc_host_priv {
++ spinlock_t hard_reset_lock;
++};
++
+ static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+ static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+@@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
+ void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+ void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
+ unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
++ struct pdc_host_priv *hpriv = ap->host->private_data;
+ u8 tmp;
+
+- spin_lock(&ap->host->lock);
++ spin_lock(&hpriv->hard_reset_lock);
+
+ tmp = readb(pcictl_b1_mmio);
+ tmp &= ~(0x10 << ata_no);
+@@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
+ writeb(tmp, pcictl_b1_mmio);
+ readb(pcictl_b1_mmio); /* flush */
+
+- spin_unlock(&ap->host->lock);
++ spin_unlock(&hpriv->hard_reset_lock);
+ }
+
+ static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+@@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
+ const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
+ const struct ata_port_info *ppi[PDC_MAX_PORTS];
+ struct ata_host *host;
++ struct pdc_host_priv *hpriv;
+ void __iomem *host_mmio;
+ int n_ports, i, rc;
+ int is_sataii_tx4;
+@@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
+ dev_err(&pdev->dev, "failed to allocate host\n");
+ return -ENOMEM;
+ }
++ hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
++ if (!hpriv)
++ return -ENOMEM;
++ spin_lock_init(&hpriv->hard_reset_lock);
++ host->private_data = hpriv;
+ host->iomap = pcim_iomap_table(pdev);
+
+ is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
+diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
+index c646118..833607f 100644
+--- a/drivers/ata/sata_svw.c
++++ b/drivers/ata/sata_svw.c
+@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
+ return 0;
+ }
+
++static int k2_sata_softreset(struct ata_link *link,
++ unsigned int *class, unsigned long deadline)
++{
++ u8 dmactl;
++ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
++
++ dmactl = readb(mmio + ATA_DMA_CMD);
++
++ /* Clear the start bit */
++ if (dmactl & ATA_DMA_START) {
++ dmactl &= ~ATA_DMA_START;
++ writeb(dmactl, mmio + ATA_DMA_CMD);
++ }
++
++ return ata_sff_softreset(link, class, deadline);
++}
++
++static int k2_sata_hardreset(struct ata_link *link,
++ unsigned int *class, unsigned long deadline)
++{
++ u8 dmactl;
++ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
++
++ dmactl = readb(mmio + ATA_DMA_CMD);
++
++ /* Clear the start bit */
++ if (dmactl & ATA_DMA_START) {
++ dmactl &= ~ATA_DMA_START;
++ writeb(dmactl, mmio + ATA_DMA_CMD);
++ }
++
++ return sata_sff_hardreset(link, class, deadline);
++}
+
+ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+ {
+@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
+
+ static struct ata_port_operations k2_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
++ .softreset = k2_sata_softreset,
++ .hardreset = k2_sata_hardreset,
+ .sff_tf_load = k2_sata_tf_load,
+ .sff_tf_read = k2_sata_tf_read,
+ .sff_check_status = k2_stat_check_status,
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index d452592..adfce9f 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card)
+ for (port = 0; tx_pending; tx_pending >>= 1, port++) {
+ if (tx_pending & 1) {
+ struct sk_buff *oldskb = card->tx_skb[port];
+- if (oldskb)
++ if (oldskb) {
+ pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
+ oldskb->len, PCI_DMA_TODEVICE);
+-
++ card->tx_skb[port] = NULL;
++ }
+ spin_lock(&card->tx_queue_lock);
+ skb = skb_dequeue(&card->tx_queue[port]);
+ if (!skb)
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 6f39747..cd252e0 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -67,7 +67,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+- if (buf_pos >= count - 1 - tot_len)
++ if (buf_pos + 1 + tot_len >= count)
+ break;
+
+ /* Format the register */
+diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
+index c3e9dff..041fddf 100644
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
+ bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
+ ~(1 << irqflag));
+ else
+- bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
++ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
+
+ /* assign the new one */
+ if (irq == 0) {
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 5c6709d..574ce73 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -66,6 +66,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x0489, 0xE027) },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1f90dab..c5e44a3 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -104,6 +104,8 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x0c10, 0x0000) },
+
+ /* Broadcom BCM20702A0 */
++ { USB_DEVICE(0x0b05, 0x17b5) },
++ { USB_DEVICE(0x04ca, 0x2003) },
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
+@@ -131,6 +133,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
+index 6f24604..439d7e7 100644
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -235,6 +235,7 @@
+ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
+ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
+ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
++#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
+
+ int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge);
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index c92424c..43c4ec3 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -1459,6 +1459,8 @@ static const struct intel_gtt_driver_description {
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
++ "Ivybridge", &sandybridge_gtt_driver },
+ { 0, NULL, NULL }
+ };
+
+diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
+index 7c7f42a1f8..9658116 100644
+--- a/drivers/char/ramoops.c
++++ b/drivers/char/ramoops.c
+@@ -126,8 +126,8 @@ static int __init ramoops_probe(struct platform_device *pdev)
+ goto fail3;
+ }
+
+- rounddown_pow_of_two(pdata->mem_size);
+- rounddown_pow_of_two(pdata->record_size);
++ pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
++ pdata->record_size = rounddown_pow_of_two(pdata->record_size);
+
+ /* Check for the minimum memory size */
+ if (pdata->mem_size < MIN_MEM_SIZE &&
+diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
+index 6104dba..2244df0 100644
+--- a/drivers/edac/i7300_edac.c
++++ b/drivers/edac/i7300_edac.c
+@@ -215,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = {
+ [0] = "Memory Write error on non-redundant retry or "
+ "FBD configuration Write error on retry",
+ };
+-#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
+-#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
++#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
++#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
+
+ #define FERR_NF_FBD 0xa0
+ static const char *ferr_nf_fbd_name[] = {
+@@ -243,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = {
+ [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
+ [0] = "Uncorrectable Data ECC on Replay",
+ };
+-#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
++#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
+ #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
+ (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
+ (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
+@@ -485,7 +485,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
+ errnum = find_first_bit(&errors,
+ ARRAY_SIZE(ferr_nf_fbd_name));
+ specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
+- branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
++ branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
+
+ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
+ REDMEMA, &syndrome);
+diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
+index a5da732..01658ca 100644
+--- a/drivers/edac/i82975x_edac.c
++++ b/drivers/edac/i82975x_edac.c
+@@ -355,10 +355,6 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
+ static void i82975x_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev, void __iomem *mch_window)
+ {
+- static const char *labels[4] = {
+- "DIMM A1", "DIMM A2",
+- "DIMM B1", "DIMM B2"
+- };
+ struct csrow_info *csrow;
+ unsigned long last_cumul_size;
+ u8 value;
+@@ -399,9 +395,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
+ * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++)
+- strncpy(csrow->channels[chan].label,
+- labels[(index >> 1) + (chan * 2)],
+- EDAC_MC_LABEL_LEN);
++
++ snprintf(csrow->channels[chan].label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
++ (chan == 0) ? 'A' : 'B',
++ index);
+
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index a20f45b..7c869b7 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -860,8 +860,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+- fwnet_incoming_packet(dev, buf_ptr, length,
+- source_node_id, -1, true);
++ fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
++ context->card->generation, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+@@ -956,7 +956,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+ break;
+ }
+
+- skb_pull(skb, ptask->max_payload);
++ if (ptask->dest_node == IEEE1394_ALL_NODES) {
++ skb_pull(skb,
++ ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
++ } else {
++ skb_pull(skb, ptask->max_payload);
++ }
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+@@ -1059,7 +1064,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+- p = skb_push(ptask->skb, 8);
++ p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index d2619d7..11788f7 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -321,8 +321,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+- * @x: width of mode
+- * @y: height of mode
++ * @x: horizontal offset into the surface
++ * @y: vertical offset into the surface
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 0c1a99b..bb95d59 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -274,6 +274,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, 2);
++ if (ret == -ENXIO) {
++ DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
++ adapter->name);
++ break;
++ }
+ } while (ret != 2 && --retries);
+
+ return ret == 2 ? 0 : -1;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index b2e3c97..d00f905 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -339,7 +339,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+- if (!work->pending) {
++ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+@@ -350,7 +350,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ seq_printf(m, "Stall check enabled, ");
+ else
+ seq_printf(m, "Stall check waiting for page flip ioctl, ");
+- seq_printf(m, "%d prepares\n", work->pending);
++ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
+
+ if (work->old_fb_obj) {
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 3a1bfd7..452bc51 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -287,6 +287,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
+ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
++ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ {0, 0, 0}
+ };
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index dbe4dbe..5950ba3 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1259,6 +1259,11 @@ out:
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
++ case -EBUSY:
++ /*
++ * EBUSY is ok: this just means that another thread
++ * already did the job.
++ */
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 2812d7b..93e74fb 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1187,7 +1187,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+
+- if (work == NULL || work->pending || !work->enable_stall_check) {
++ if (work == NULL ||
++ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
++ !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a294a32..7a10f5f 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2816,6 +2816,8 @@
+ #define _PFA_CTL_1 0x68080
+ #define _PFB_CTL_1 0x68880
+ #define PF_ENABLE (1<<31)
++#define PF_PIPE_SEL_MASK_IVB (3<<29)
++#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
+ #define PF_FILTER_MASK (3<<23)
+ #define PF_FILTER_PROGRAMMED (0<<23)
+ #define PF_FILTER_MED_3x3 (1<<23)
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 87bb87b..0016fee 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -495,12 +495,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
+- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
+- "supported, assume %dbpp panel color "
+- "depth.\n",
+- dev_priv->edp.bpp);
+- }
++ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
++ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ return;
+ }
+
+@@ -653,9 +649,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+-
+- /* eDP data */
+- dev_priv->edp.bpp = 18;
+ }
+
+ static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index fdae61f..54acad3 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2384,18 +2384,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
+ FDI_FE_ERRC_ENABLE);
+ }
+
+-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 flags = I915_READ(SOUTH_CHICKEN1);
+-
+- flags |= FDI_PHASE_SYNC_OVR(pipe);
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+- flags |= FDI_PHASE_SYNC_EN(pipe);
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+- POSTING_READ(SOUTH_CHICKEN1);
+-}
+-
+ /* The FDI link training functions for ILK/Ibexpeak. */
+ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ {
+@@ -2439,11 +2427,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+- if (HAS_PCH_IBX(dev)) {
+- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+- FDI_RX_PHASE_SYNC_POINTER_EN);
+- }
++ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
++ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
++ FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+@@ -2546,9 +2532,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ POSTING_READ(reg);
+ udelay(150);
+
+- if (HAS_PCH_CPT(dev))
+- cpt_phase_pointer_enable(dev, pipe);
+-
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+@@ -2667,9 +2650,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+ POSTING_READ(reg);
+ udelay(150);
+
+- if (HAS_PCH_CPT(dev))
+- cpt_phase_pointer_enable(dev, pipe);
+-
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+@@ -2779,17 +2759,6 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+ }
+ }
+
+-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 flags = I915_READ(SOUTH_CHICKEN1);
+-
+- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+- POSTING_READ(SOUTH_CHICKEN1);
+-}
+ static void ironlake_fdi_disable(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+@@ -2819,8 +2788,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+- } else if (HAS_PCH_CPT(dev)) {
+- cpt_phase_pointer_disable(dev, pipe);
+ }
+
+ /* still set train pattern 1 */
+@@ -3073,7 +3040,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
++ if (IS_IVYBRIDGE(dev))
++ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
++ PF_PIPE_SEL_IVB(pipe));
++ else
++ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+@@ -4782,6 +4753,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ }
+ }
+
++ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
++ /* Use VBT settings if we have an eDP panel */
++ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
++
++ if (edp_bpc && edp_bpc < display_bpc) {
++ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
++ display_bpc = edp_bpc;
++ }
++ continue;
++ }
++
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+@@ -6945,11 +6927,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+- if (work == NULL || !work->pending) {
++
++ /* Ensure we don't miss a work->pending update ... */
++ smp_rmb();
++
++ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
++ /* and that the unpin work is consistent wrt ->pending. */
++ smp_rmb();
++
+ intel_crtc->unpin_work = NULL;
+
+ if (work->event) {
+@@ -7021,16 +7010,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
+
++ /* NB: An MMIO update of the plane base pointer will also
++ * generate a page-flip completion irq, i.e. every modeset
++ * is also accompanied by a spurious intel_prepare_page_flip().
++ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+- if (intel_crtc->unpin_work) {
+- if ((++intel_crtc->unpin_work->pending) > 1)
+- DRM_ERROR("Prepared flip multiple times\n");
+- } else {
+- DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+- }
++ if (intel_crtc->unpin_work)
++ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
++inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
++{
++ /* Ensure that the work item is consistent when activating it ... */
++ smp_wmb();
++ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
++ /* and that it is marked active as soon as the irq could fire. */
++ smp_wmb();
++}
++
+ static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+@@ -7067,6 +7065,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
++
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7110,6 +7110,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7153,6 +7154,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
++
++ intel_mark_page_flip_active(intel_crtc);
++
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7242,6 +7247,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
++
++ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index bcadf74..5212284 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -257,7 +257,10 @@ struct intel_unpin_work {
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+- int pending;
++ atomic_t pending;
++#define INTEL_FLIP_INACTIVE 0
++#define INTEL_FLIP_PENDING 1
++#define INTEL_FLIP_COMPLETE 2
+ bool enable_stall_check;
+ };
+
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 57152a7..cf5ea3d 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -779,6 +779,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Gigabyte GA-D525TUD",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Supermicro X7SPA-H",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index a4011b0..a25d08a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -541,6 +541,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+
+ if (rdev->family < CHIP_RV770)
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
++ /* use frac fb div on APUs */
++ if (ASIC_IS_DCE41(rdev))
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
++ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ } else {
+ pll->flags |= RADEON_PLL_LEGACY;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index aca4755..f0dc04b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -111,7 +111,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- radeon_dp_set_link_config(connector, mode);
++ radeon_dp_set_link_config(connector, adjusted_mode);
+ }
+
+ return true;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index b919b11..0977849 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1730,7 +1730,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ case CHIP_SUMO:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+- rdev->config.evergreen.max_tile_pipes = 2;
++ rdev->config.evergreen.max_tile_pipes = 4;
+ if (rdev->pdev->device == 0x9648)
+ rdev->config.evergreen.max_simds = 3;
+ else if ((rdev->pdev->device == 0x9647) ||
+@@ -1819,7 +1819,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ break;
+ case CHIP_CAICOS:
+ rdev->config.evergreen.num_ses = 1;
+- rdev->config.evergreen.max_pipes = 4;
++ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 899c712..2da7335 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -458,6 +458,9 @@ static const struct hid_device_id apple_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
++ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index a21e763..279b863 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1386,6 +1386,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 652f230..2d41336 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -131,6 +131,7 @@
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index 2ab7175..7cf3ffe 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -418,6 +418,8 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
+ __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
++ __set_bit(INPUT_PROP_POINTER, input->propbit);
++ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ }
+
+ if (report_touches) {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 19b4412..3d630bb 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -190,6 +190,27 @@ static ssize_t show_temp(struct device *dev,
+ return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
+ }
+
++struct tjmax {
++ char const *id;
++ int tjmax;
++};
++
++static struct tjmax __cpuinitconst tjmax_table[] = {
++ { "CPU D410", 100000 },
++ { "CPU D425", 100000 },
++ { "CPU D510", 100000 },
++ { "CPU D525", 100000 },
++ { "CPU N450", 100000 },
++ { "CPU N455", 100000 },
++ { "CPU N470", 100000 },
++ { "CPU N475", 100000 },
++ { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
++ { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
++ { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */
++ { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
++ { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
++};
++
+ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ {
+ /* The 100C is default for both mobile and non mobile CPUs */
+@@ -200,6 +221,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ int err;
+ u32 eax, edx;
+ struct pci_dev *host_bridge;
++ int i;
++
++ /* explicit tjmax table entries override heuristics */
++ for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
++ if (strstr(c->x86_model_id, tjmax_table[i].id))
++ return tjmax_table[i].tjmax;
++ }
+
+ /* Early chips have no MSR for TjMax */
+
+@@ -208,7 +236,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+
+ /* Atom CPUs */
+
+- if (c->x86_model == 0x1c) {
++ if (c->x86_model == 0x1c || c->x86_model == 0x26
++ || c->x86_model == 0x27) {
+ usemsr_ee = 0;
+
+ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+@@ -221,6 +250,9 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ tjmax = 90000;
+
+ pci_dev_put(host_bridge);
++ } else if (c->x86_model == 0x36) {
++ usemsr_ee = 0;
++ tjmax = 100000;
+ }
+
+ if (c->x86_model > 0xe && usemsr_ee) {
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index ac2d6cb..770e959 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
+ MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>");
+ MODULE_LICENSE("GPL");
+
++/* Family 16h Northbridge's function 4 PCI ID */
++#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
++
+ /* D18F3 */
+ #define REG_NORTHBRIDGE_CAP 0xe8
+
+@@ -256,6 +259,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev)
+
+ static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+ {}
+ };
+ MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
+diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
+index 4dfa1ee..f8f892b 100644
+--- a/drivers/input/joystick/walkera0701.c
++++ b/drivers/input/joystick/walkera0701.c
+@@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev)
+ struct walkera_dev *w = input_get_drvdata(dev);
+
+ parport_disable_irq(w->parport);
++ hrtimer_cancel(&w->timer);
+ }
+
+ static int walkera0701_connect(struct walkera_dev *w, int parport)
+@@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+ if (parport_claim(w->pardevice))
+ goto init_err1;
+
++ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ w->timer.function = timer_handler;
++
+ w->input_dev = input_allocate_device();
+ if (!w->input_dev)
+ goto init_err2;
+@@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+ if (err)
+ goto init_err3;
+
+- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+- w->timer.function = timer_handler;
+ return 0;
+
+ init_err3:
+@@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+
+ static void walkera0701_disconnect(struct walkera_dev *w)
+ {
+- hrtimer_cancel(&w->timer);
+ input_unregister_device(w->input_dev);
+ parport_release(w->pardevice);
+ parport_unregister_device(w->pardevice);
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index cd1a843..031270c 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -914,6 +914,7 @@ static int __init i8042_platform_init(void)
+ int retval;
+
+ #ifdef CONFIG_X86
++ u8 a20_on = 0xdf;
+ /* Just return if pre-detection shows no i8042 controller exist */
+ if (!x86_platform.i8042_detect())
+ return -ENODEV;
+@@ -953,6 +954,14 @@ static int __init i8042_platform_init(void)
+
+ if (dmi_check_system(i8042_dmi_dritek_table))
+ i8042_dritek = true;
++
++ /*
++ * A20 was already enabled during early kernel init. But some buggy
++ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
++ * resume from S3. So we do it here and hope that nothing breaks.
++ */
++ i8042_command(&a20_on, 0x10d1);
++ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
+ #endif /* CONFIG_X86 */
+
+ return retval;
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index b9062c0..9a6cc92 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1801,10 +1801,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ if (!pte)
+ return -ENOMEM;
+ /* It is large page*/
+- if (largepage_lvl > 1)
++ if (largepage_lvl > 1) {
+ pteval |= DMA_PTE_LARGE_PAGE;
+- else
++ /* Ensure that old small page tables are removed to make room
++ for superpage, if they exist. */
++ dma_pte_clear_range(domain, iov_pfn,
++ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
++ dma_pte_free_pagetable(domain, iov_pfn,
++ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
++ } else {
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
++ }
+
+ }
+ /* We don't need lock here, nobody else
+diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
+index cb641f1..bf5cfd7 100644
+--- a/drivers/leds/leds-lp5521.c
++++ b/drivers/leds/leds-lp5521.c
+@@ -198,9 +198,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+
+ /* move current engine to direct mode and remember the state */
+ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
++ if (ret)
++ return ret;
++
+ /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
+ usleep_range(1000, 2000);
+- ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
++ ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
++ if (ret)
++ return ret;
+
+ /* For loading, all the engines to load mode */
+ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+@@ -216,8 +221,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+ LP5521_PROG_MEM_SIZE,
+ pattern);
+
+- ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
+- return ret;
++ return lp5521_write(client, LP5521_REG_OP_MODE, mode);
+ }
+
+ static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
+@@ -692,9 +696,9 @@ static int __devinit lp5521_probe(struct i2c_client *client,
+ * otherwise further access to the R G B channels in the
+ * LP5521_REG_ENABLE register will not have any effect - strange!
+ */
+- lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
+- if (buf != LP5521_REG_R_CURR_DEFAULT) {
+- dev_err(&client->dev, "error in reseting chip\n");
++ ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
++ if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
++ dev_err(&client->dev, "error in resetting chip\n");
+ goto fail2;
+ }
+ usleep_range(10000, 20000);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 1ce84ed..42c873f 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1562,6 +1562,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
+ if (copy_from_user(dmi, user, tmp.data_size))
+ goto bad;
+
++ /*
++ * Abort if something changed the ioctl data while it was being copied.
++ */
++ if (dmi->data_size != tmp.data_size) {
++ DMERR("rejecting ioctl: data size modified while processing parameters");
++ goto bad;
++ }
++
+ /* Wipe the user buffer so we do not return it to userspace */
+ if (secure_data && clear_user(user, tmp.data_size))
+ goto bad;
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index d279c76..acba54e 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -36,13 +36,13 @@ struct node_header {
+ __le32 padding;
+ } __packed;
+
+-struct node {
++struct btree_node {
+ struct node_header header;
+ __le64 keys[0];
+ } __packed;
+
+
+-void inc_children(struct dm_transaction_manager *tm, struct node *n,
++void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ struct dm_btree_value_type *vt);
+
+ int new_block(struct dm_btree_info *info, struct dm_block **result);
+@@ -64,7 +64,7 @@ struct ro_spine {
+ void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
+ int exit_ro_spine(struct ro_spine *s);
+ int ro_step(struct ro_spine *s, dm_block_t new_child);
+-struct node *ro_node(struct ro_spine *s);
++struct btree_node *ro_node(struct ro_spine *s);
+
+ struct shadow_spine {
+ struct dm_btree_info *info;
+@@ -98,12 +98,12 @@ int shadow_root(struct shadow_spine *s);
+ /*
+ * Some inlines.
+ */
+-static inline __le64 *key_ptr(struct node *n, uint32_t index)
++static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
+ {
+ return n->keys + index;
+ }
+
+-static inline void *value_base(struct node *n)
++static inline void *value_base(struct btree_node *n)
+ {
+ return &n->keys[le32_to_cpu(n->header.max_entries)];
+ }
+@@ -111,7 +111,7 @@ static inline void *value_base(struct node *n)
+ /*
+ * FIXME: Now that value size is stored in node we don't need the third parm.
+ */
+-static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
++static inline void *value_ptr(struct btree_node *n, uint32_t index, size_t value_size)
+ {
+ BUG_ON(value_size != le32_to_cpu(n->header.value_size));
+ return value_base(n) + (value_size * index);
+@@ -120,7 +120,7 @@ static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
+ /*
+ * Assumes the values are suitably-aligned and converts to core format.
+ */
+-static inline uint64_t value64(struct node *n, uint32_t index)
++static inline uint64_t value64(struct btree_node *n, uint32_t index)
+ {
+ __le64 *values_le = value_base(n);
+
+@@ -130,7 +130,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
+ /*
+ * Searching for a key within a single node.
+ */
+-int lower_bound(struct node *n, uint64_t key);
++int lower_bound(struct btree_node *n, uint64_t key);
+
+ extern struct dm_block_validator btree_node_validator;
+
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 1a35caf..e6cdfde 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -53,7 +53,7 @@
+ /*
+ * Some little utilities for moving node data around.
+ */
+-static void node_shift(struct node *n, int shift)
++static void node_shift(struct btree_node *n, int shift)
+ {
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
+@@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
+ }
+ }
+
+-static void node_copy(struct node *left, struct node *right, int shift)
++static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
+ {
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(left->header.value_size);
+@@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
+ /*
+ * Delete a specific entry from a leaf node.
+ */
+-static void delete_at(struct node *n, unsigned index)
++static void delete_at(struct btree_node *n, unsigned index)
+ {
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+@@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+
+-static unsigned merge_threshold(struct node *n)
++static unsigned merge_threshold(struct btree_node *n)
+ {
+ return le32_to_cpu(n->header.max_entries) / 3;
+ }
+@@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
+ struct child {
+ unsigned index;
+ struct dm_block *block;
+- struct node *n;
++ struct btree_node *n;
+ };
+
+ static struct dm_btree_value_type le64_type = {
+@@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
+ .equal = NULL
+ };
+
+-static int init_child(struct dm_btree_info *info, struct node *parent,
++static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+ unsigned index, struct child *result)
+ {
+ int r, inc;
+@@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
+ return dm_tm_unlock(info->tm, c->block);
+ }
+
+-static void shift(struct node *left, struct node *right, int count)
++static void shift(struct btree_node *left, struct btree_node *right, int count)
+ {
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+@@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
+ right->header.nr_entries = cpu_to_le32(nr_right + count);
+ }
+
+-static void __rebalance2(struct dm_btree_info *info, struct node *parent,
++static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *r)
+ {
+- struct node *left = l->n;
+- struct node *right = r->n;
++ struct btree_node *left = l->n;
++ struct btree_node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned threshold = 2 * merge_threshold(left) + 1;
+@@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+ {
+ int r;
+- struct node *parent;
++ struct btree_node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+@@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ * in right, then rebalance2. This wastes some cpu, but I want something
+ * simple atm.
+ */
+-static void delete_center_node(struct dm_btree_info *info, struct node *parent,
++static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+- struct node *left, struct node *center, struct node *right,
++ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+@@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+ /*
+ * Redistributes entries among 3 sibling nodes.
+ */
+-static void redistribute3(struct dm_btree_info *info, struct node *parent,
++static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+- struct node *left, struct node *center, struct node *right,
++ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ int s;
+@@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+
+-static void __rebalance3(struct dm_btree_info *info, struct node *parent,
++static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r)
+ {
+- struct node *left = l->n;
+- struct node *center = c->n;
+- struct node *right = r->n;
++ struct btree_node *left = l->n;
++ struct btree_node *center = c->n;
++ struct btree_node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+@@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+ {
+ int r;
+- struct node *parent = dm_block_data(shadow_current(s));
++ struct btree_node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+@@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
+ {
+ int r;
+ struct dm_block *block;
+- struct node *n;
++ struct btree_node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+@@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
+ {
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+- struct node *n;
++ struct btree_node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+@@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
+ return r;
+ }
+
+-static int do_leaf(struct node *n, uint64_t key, unsigned *index)
++static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+ {
+ int i = lower_bound(n, key);
+
+@@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ uint64_t key, unsigned *index)
+ {
+ int i = *index, r;
+- struct node *n;
++ struct btree_node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+@@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+- struct node *n;
++ struct btree_node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index d9a7912..2f0805c 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+ {
+- struct node *n = dm_block_data(b);
++ struct btree_node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+
+ h->blocknr = cpu_to_le64(dm_block_location(b));
+@@ -38,7 +38,7 @@ static int node_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+ {
+- struct node *n = dm_block_data(b);
++ struct btree_node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+ size_t value_size;
+ __le32 csum_disk;
+@@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
+ return r;
+ }
+
+-struct node *ro_node(struct ro_spine *s)
++struct btree_node *ro_node(struct ro_spine *s)
+ {
+ struct dm_block *block;
+
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index bd1e7ff..bbb2ec5 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ /*----------------------------------------------------------------*/
+
+ /* makes the assumption that no two keys are the same. */
+-static int bsearch(struct node *n, uint64_t key, int want_hi)
++static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
+ {
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+@@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
+ return want_hi ? hi : lo;
+ }
+
+-int lower_bound(struct node *n, uint64_t key)
++int lower_bound(struct btree_node *n, uint64_t key)
+ {
+ return bsearch(n, key, 0);
+ }
+
+-void inc_children(struct dm_transaction_manager *tm, struct node *n,
++void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ struct dm_btree_value_type *vt)
+ {
+ unsigned i;
+@@ -78,7 +78,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ value_ptr(n, i, vt->size));
+ }
+
+-static int insert_at(size_t value_size, struct node *node, unsigned index,
++static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+ {
+@@ -123,7 +123,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
+ {
+ int r;
+ struct dm_block *b;
+- struct node *n;
++ struct btree_node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
+ #define MAX_SPINE_DEPTH 64
+ struct frame {
+ struct dm_block *b;
+- struct node *n;
++ struct btree_node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+@@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
+ /*----------------------------------------------------------------*/
+
+ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+- int (*search_fn)(struct node *, uint64_t),
++ int (*search_fn)(struct btree_node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+ {
+ int i, r;
+@@ -407,7 +407,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+- struct node *ln, *rn, *pn;
++ struct btree_node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+@@ -492,7 +492,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+- struct node *pn, *ln, *rn;
++ struct btree_node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+@@ -577,7 +577,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ uint64_t key, unsigned *index)
+ {
+ int r, i = *index, top = 1;
+- struct node *node;
++ struct btree_node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+@@ -644,7 +644,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+- struct node *n;
++ struct btree_node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 411f523..6dad2ef 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -19,6 +19,10 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+
++static struct device_type mfd_dev_type = {
++ .name = "mfd_device",
++};
++
+ int mfd_cell_enable(struct platform_device *pdev)
+ {
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+@@ -88,6 +92,7 @@ static int mfd_add_device(struct device *parent, int id,
+ goto fail_device;
+
+ pdev->dev.parent = parent;
++ pdev->dev.type = &mfd_dev_type;
+
+ if (cell->pdata_size) {
+ ret = platform_device_add_data(pdev,
+@@ -183,10 +188,16 @@ EXPORT_SYMBOL(mfd_add_devices);
+
+ static int mfd_remove_devices_fn(struct device *dev, void *c)
+ {
+- struct platform_device *pdev = to_platform_device(dev);
+- const struct mfd_cell *cell = mfd_get_cell(pdev);
++ struct platform_device *pdev;
++ const struct mfd_cell *cell;
+ atomic_t **usage_count = c;
+
++ if (dev->type != &mfd_dev_type)
++ return 0;
++
++ pdev = to_platform_device(dev);
++ cell = mfd_get_cell(pdev);
++
+ /* find the base address of usage_count pointers (for freeing) */
+ if (!*usage_count || (cell->usage_count < *usage_count))
+ *usage_count = cell->usage_count;
+diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
+index fffc227..9c99680 100644
+--- a/drivers/misc/hpilo.c
++++ b/drivers/misc/hpilo.c
+@@ -735,7 +735,14 @@ static void ilo_remove(struct pci_dev *pdev)
+ free_irq(pdev->irq, ilo_hw);
+ ilo_unmap_device(pdev, ilo_hw);
+ pci_release_regions(pdev);
+- pci_disable_device(pdev);
++ /*
++ * pci_disable_device(pdev) used to be here. But this PCI device has
++ * two functions with interrupt lines connected to a single pin. The
++ * other one is a USB host controller. So when we disable the PIN here
++ * e.g. by rmmod hpilo, the controller stops working. It is because
++ * the interrupt link is disabled in ACPI since it is not refcounted
++ * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable.
++ */
+ kfree(ilo_hw);
+ ilo_hwdev[(minor / MAX_CCB)] = 0;
+ }
+@@ -820,7 +827,7 @@ unmap:
+ free_regions:
+ pci_release_regions(pdev);
+ disable:
+- pci_disable_device(pdev);
++/* pci_disable_device(pdev); see comment in ilo_remove */
+ free:
+ kfree(ilo_hw);
+ out:
+diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
+index 8d082b4..d971817 100644
+--- a/drivers/misc/sgi-xp/xpc_main.c
++++ b/drivers/misc/sgi-xp/xpc_main.c
+@@ -53,6 +53,10 @@
+ #include <linux/kthread.h>
+ #include "xpc.h"
+
++#ifdef CONFIG_X86_64
++#include <asm/traps.h>
++#endif
++
+ /* define two XPC debug device structures to be used with dev_dbg() et al */
+
+ struct device_driver xpc_dbg_name = {
+@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
+ return NOTIFY_DONE;
+ }
+
++/* Used to only allow one cpu to complete disconnect */
++static unsigned int xpc_die_disconnecting;
++
+ /*
+ * Notify other partitions to deactivate from us by first disengaging from all
+ * references to our memory.
+@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
+ long keep_waiting;
+ long wait_to_print;
+
++ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
++ return;
++
+ /* keep xpc_hb_checker thread from doing anything (just in case) */
+ xpc_exiting = 1;
+
+@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
+ * about the lack of a heartbeat.
+ */
+ static int
+-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
++xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
+ {
+ #ifdef CONFIG_IA64 /* !!! temporary kludge */
+ switch (event) {
+@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+ break;
+ }
+ #else
+- xpc_die_deactivate();
++ struct die_args *die_args = _die_args;
++
++ switch (event) {
++ case DIE_TRAP:
++ if (die_args->trapnr == X86_TRAP_DF)
++ xpc_die_deactivate();
++
++ if (((die_args->trapnr == X86_TRAP_MF) ||
++ (die_args->trapnr == X86_TRAP_XF)) &&
++ !user_mode_vm(die_args->regs))
++ xpc_die_deactivate();
++
++ break;
++ case DIE_INT3:
++ case DIE_DEBUG:
++ break;
++ case DIE_OOPS:
++ case DIE_GPF:
++ default:
++ xpc_die_deactivate();
++ }
+ #endif
+
+ return NOTIFY_DONE;
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index 559d30d..d5505f3 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1003,10 +1003,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+ host->sd_error = true;
+ dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ }
+- if (host->state == STATE_IDLE) {
+- dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
+- return IRQ_HANDLED;
+- }
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
+ complete(&host->intr_wait);
+ else
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+index bb2fe60..1d02ec9 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+@@ -135,6 +135,15 @@ int gpmi_init(struct gpmi_nand_data *this)
+ if (ret)
+ goto err_out;
+
++ /*
++ * Reset BCH here, too. We got failures otherwise :(
++ * See later BCH reset for explanation of MX23 handling
++ */
++ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
++ if (ret)
++ goto err_out;
++
++
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 6c284d1..202ae34 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1366,6 +1366,8 @@ static void bond_compute_features(struct bonding *bond)
+ struct net_device *bond_dev = bond->dev;
+ u32 vlan_features = BOND_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
++ unsigned int gso_max_size = GSO_MAX_SIZE;
++ u16 gso_max_segs = GSO_MAX_SEGS;
+ int i;
+
+ read_lock(&bond->lock);
+@@ -1379,11 +1381,16 @@ static void bond_compute_features(struct bonding *bond)
+
+ if (slave->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = slave->dev->hard_header_len;
++
++ gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
++ gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
+ }
+
+ done:
+ bond_dev->vlan_features = vlan_features;
+ bond_dev->hard_header_len = max_hard_header_len;
++ bond_dev->gso_max_segs = gso_max_segs;
++ netif_set_gso_max_size(bond_dev, gso_max_size);
+
+ read_unlock(&bond->lock);
+
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index 4ef7e2f..a03fde9 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -1579,6 +1579,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
+ goto out;
+ }
+
++ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave, i) {
+ if (!bond_is_active_slave(slave)) {
+ if (new_value)
+@@ -1587,6 +1588,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
+ slave->inactive = 1;
+ }
+ }
++ read_unlock(&bond->lock);
+ out:
+ return ret;
+ }
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 25695bd..a319057 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -569,8 +569,7 @@ void close_candev(struct net_device *dev)
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- if (del_timer_sync(&priv->restart_timer))
+- dev_put(dev);
++ del_timer_sync(&priv->restart_timer);
+ can_flush_echo_skb(dev);
+ }
+ EXPORT_SYMBOL_GPL(close_candev);
+diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
+index 1063093..e8ee2bc 100644
+--- a/drivers/net/ethernet/8390/ne.c
++++ b/drivers/net/ethernet/8390/ne.c
+@@ -814,6 +814,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ }
++ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = do_ne_probe(dev);
+ if (err) {
+ free_netdev(dev);
+diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
+index 5039f08..43e9ab4 100644
+--- a/drivers/net/irda/sir_dev.c
++++ b/drivers/net/irda/sir_dev.c
+@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+- if (dev->dongle_drv->reset) {
++ if (dev->dongle_drv->set_speed) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 00ed9c1..ac53952 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -62,6 +62,7 @@
+ #define USB_PRODUCT_IPAD 0x129a
+ #define USB_PRODUCT_IPHONE_4_VZW 0x129c
+ #define USB_PRODUCT_IPHONE_4S 0x12a0
++#define USB_PRODUCT_IPHONE_5 0x12a8
+
+ #define IPHETH_USBINTF_CLASS 255
+ #define IPHETH_USBINTF_SUBCLASS 253
+@@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = {
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
++ { USB_DEVICE_AND_INTERFACE_INFO(
++ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
++ IPHETH_USBINTF_PROTO) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, ipheth_table);
+diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
+index 6650fde..9f1e947 100644
+--- a/drivers/net/wimax/i2400m/i2400m-usb.h
++++ b/drivers/net/wimax/i2400m/i2400m-usb.h
+@@ -152,6 +152,9 @@ enum {
+ /* Device IDs */
+ USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
++ USB_DEVICE_ID_I6150 = 0x07d6,
++ USB_DEVICE_ID_I6150_2 = 0x07d7,
++ USB_DEVICE_ID_I6150_3 = 0x07d9,
+ USB_DEVICE_ID_I6250 = 0x0187,
+ };
+
+diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
+index 2c1b8b6..6bb7f3c 100644
+--- a/drivers/net/wimax/i2400m/usb.c
++++ b/drivers/net/wimax/i2400m/usb.c
+@@ -492,6 +492,9 @@ int i2400mu_probe(struct usb_interface *iface,
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
++ case USB_DEVICE_ID_I6150:
++ case USB_DEVICE_ID_I6150_2:
++ case USB_DEVICE_ID_I6150_3:
+ case USB_DEVICE_ID_I6250:
+ i2400mu->i6050 = 1;
+ break;
+@@ -741,6 +744,9 @@ static
+ struct usb_device_id i2400mu_id_table[] = {
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
+ { USB_DEVICE(0x8086, 0x0181) },
+ { USB_DEVICE(0x8086, 0x1403) },
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+index cc54153..498a3c1 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+@@ -835,98 +835,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
+
+ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+- {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+- {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+- {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+- {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+- {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+- {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+- {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+- {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+- {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+- {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+- {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+- {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+- {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+- {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+- {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+- {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+- {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+- {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+- {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+- {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+- {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+- {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+- {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+- {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+- {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+- {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+- {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+- {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+- {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+- {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+- {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+- {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+- {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+- {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+- {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+- {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+- {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
++ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
++ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
++ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
++ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
++ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
++ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
++ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
++ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
++ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
++ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
++ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
++ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
++ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
++ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
++ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
++ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
++ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
++ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
++ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
++ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
++ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
++ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
++ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
++ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
++ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
++ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
++ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
++ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
++ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
++ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
++ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
++ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
++ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
++ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
++ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
++ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
++ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
++ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
++ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
++ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
++ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
++ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
++ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
++ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
++ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
++ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
++ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
++ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
++ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
++ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
++ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+- {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+- {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+- {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+- {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+- {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+- {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
++ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
++ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
++ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
++ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
++ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
++ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
++ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+index 6335a86..69bcdb6 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+@@ -69,13 +69,13 @@
+ #define AR9300_BASE_ADDR 0x3ff
+ #define AR9300_BASE_ADDR_512 0x1ff
+
+-#define AR9300_OTP_BASE 0x14000
+-#define AR9300_OTP_STATUS 0x15f18
++#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
++#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
+ #define AR9300_OTP_STATUS_TYPE 0x7
+ #define AR9300_OTP_STATUS_VALID 0x4
+ #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
+ #define AR9300_OTP_STATUS_SM_BUSY 0x1
+-#define AR9300_OTP_READ_DATA 0x15f1c
++#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
+
+ enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+index fb937ba..e9d73e7 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+@@ -34,9 +34,6 @@
+ */
+ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+ {
+-#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
+- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
+-
+ #define AR9462_BB_CTX_COEFJ(x) \
+ ar9462_##x##_baseband_core_txfir_coeff_japan_2484
+
+@@ -369,13 +366,13 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+
+ /* Awake -> Sleep Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
+- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
++ ar9462_pciephy_clkreq_disable_L1_2p0,
++ ARRAY_SIZE(ar9462_pciephy_clkreq_disable_L1_2p0),
+ 2);
+ /* Sleep -> Awake Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
+- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
++ ar9462_pciephy_clkreq_disable_L1_2p0,
++ ARRAY_SIZE(ar9462_pciephy_clkreq_disable_L1_2p0),
+ 2);
+
+ /* Fast clock modal settings */
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index d771de5..bcabfbf 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+
+ if (chan && chan->noisefloor) {
+ s8 delta = chan->noisefloor -
++ ATH9K_NF_CAL_NOISE_THRESH -
+ ath9k_hw_get_default_nf(ah, chan);
+ if (delta > 0)
+ noise += delta;
+diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
+index 05b9dbf..e300a73 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.h
++++ b/drivers/net/wireless/ath/ath9k/calib.h
+@@ -22,6 +22,9 @@
+ #define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
+ #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+
++/* Internal noise floor can vary by about 6db depending on the frequency */
++#define ATH9K_NF_CAL_NOISE_THRESH 6
++
+ #define NUM_NF_READINGS 6
+ #define ATH9K_NF_CAL_HIST_MAX 5
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+index 17fb25d..9fefb56 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+@@ -311,6 +311,14 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+ rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
++ if (dma_mapping_error(bus(trans)->dev, rxb->page_dma)) {
++ rxb->page = NULL;
++ spin_lock_irqsave(&rxq->lock, flags);
++ list_add(&rxb->list, &rxq->rx_used);
++ spin_unlock_irqrestore(&rxq->lock, flags);
++ __free_pages(page, hw_params(trans).rx_page_order);
++ return;
++ }
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+@@ -489,8 +497,19 @@ static void iwl_rx_handle(struct iwl_trans *trans)
+ 0, PAGE_SIZE <<
+ hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+- list_add_tail(&rxb->list, &rxq->rx_free);
+- rxq->free_count++;
++ if (dma_mapping_error(bus(trans)->dev, rxb->page_dma)) {
++ /*
++ * free the page(s) as well to not break
++ * the invariant that the items on the used
++ * list have no page(s)
++ */
++ __free_pages(rxb->page, hw_params(trans).rx_page_order);
++ rxb->page = NULL;
++ list_add_tail(&rxb->list, &rxq->rx_used);
++ } else {
++ list_add_tail(&rxb->list, &rxq->rx_free);
++ rxq->free_count++;
++ }
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+@@ -1263,12 +1282,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
++ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(bus(trans), CSR_INT);
+
++ if (inta & (~inta_mask)) {
++ IWL_DEBUG_ISR(trans,
++ "We got a masked interrupt (0x%08x)...Ack and ignore\n",
++ inta & (~inta_mask));
++ iwl_write32(bus(trans), CSR_INT, inta & (~inta_mask));
++ inta &= inta_mask;
++ }
++
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+@@ -1349,7 +1376,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
++ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+
+diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
+index d21e8f5..291906e 100644
+--- a/drivers/pnp/pnpacpi/core.c
++++ b/drivers/pnp/pnpacpi/core.c
+@@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
+ if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \
+ return 0
+ #define TEST_ALPHA(c) \
+- if (!('@' <= (c) || (c) <= 'Z')) \
++ if (!('A' <= (c) && (c) <= 'Z')) \
+ return 0
+ static int __init ispnpidacpi(const char *id)
+ {
+@@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
+ return -ENODEV;
+ }
+
++ if (WARN_ON_ONCE(acpi_dev != dev->data))
++ dev->data = acpi_dev;
++
+ ret = pnpacpi_build_resource_template(dev, &buffer);
+ if (ret)
+ return ret;
+diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
+index bd3531d..77a6faf 100644
+--- a/drivers/regulator/wm831x-dcdc.c
++++ b/drivers/regulator/wm831x-dcdc.c
+@@ -330,7 +330,7 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
+ if (vsel > dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x, dvs_reg,
+ WM831X_DC1_DVS_VSEL_MASK,
+- dcdc->dvs_vsel);
++ vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
+index 07a4fd2..daa6b90 100644
+--- a/drivers/s390/cio/device_pgid.c
++++ b/drivers/s390/cio/device_pgid.c
+@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+ * Determine pathgroup state from PGID data.
+ */
+ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+- int *mismatch, int *reserved, u8 *reset)
++ int *mismatch, u8 *reserved, u8 *reset)
+ {
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+- *reserved = 1;
++ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
+ continue;
+@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+- int reserved = 0;
++ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+- if (reserved)
++ if (reserved == cdev->private->pgid_valid_mask)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
+ }
+ out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+- "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
++ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+ switch (rc) {
+diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
+index f17c92c..0fd5ea7 100644
+--- a/drivers/scsi/aha152x.c
++++ b/drivers/scsi/aha152x.c
+@@ -2985,8 +2985,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr)
+ char *start = pos;
+ int i;
+
+- SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ",
+- (unsigned int) ptr, ptr->device->id, ptr->device->lun);
++ SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
++ ptr, ptr->device->id, ptr->device->lun);
+
+ for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
+ SPRINTF("0x%02x ", ptr->cmnd[i]);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 22523aa..0f48550 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -98,6 +98,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
+ {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ {0,}
+@@ -117,13 +126,22 @@ static struct board_type products[] = {
+ {0x3249103C, "Smart Array P812", &SA5_access},
+ {0x324a103C, "Smart Array P712m", &SA5_access},
+ {0x324b103C, "Smart Array P711m", &SA5_access},
+- {0x3350103C, "Smart Array", &SA5_access},
+- {0x3351103C, "Smart Array", &SA5_access},
+- {0x3352103C, "Smart Array", &SA5_access},
+- {0x3353103C, "Smart Array", &SA5_access},
+- {0x3354103C, "Smart Array", &SA5_access},
+- {0x3355103C, "Smart Array", &SA5_access},
+- {0x3356103C, "Smart Array", &SA5_access},
++ {0x3350103C, "Smart Array P222", &SA5_access},
++ {0x3351103C, "Smart Array P420", &SA5_access},
++ {0x3352103C, "Smart Array P421", &SA5_access},
++ {0x3353103C, "Smart Array P822", &SA5_access},
++ {0x3354103C, "Smart Array P420i", &SA5_access},
++ {0x3355103C, "Smart Array P220i", &SA5_access},
++ {0x3356103C, "Smart Array P721m", &SA5_access},
++ {0x1920103C, "Smart Array", &SA5_access},
++ {0x1921103C, "Smart Array", &SA5_access},
++ {0x1922103C, "Smart Array", &SA5_access},
++ {0x1923103C, "Smart Array", &SA5_access},
++ {0x1924103C, "Smart Array", &SA5_access},
++ {0x1925103C, "Smart Array", &SA5_access},
++ {0x1926103C, "Smart Array", &SA5_access},
++ {0x1928103C, "Smart Array", &SA5_access},
++ {0x334d103C, "Smart Array P822se", &SA5_access},
+ {0xFFFF103C, "Unknown Smart Array", &SA5_access},
+ };
+
+diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
+index 8f7eb4f..487aa6f 100644
+--- a/drivers/scsi/mvsas/mv_94xx.h
++++ b/drivers/scsi/mvsas/mv_94xx.h
+@@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
+ #define SPI_ADDR_VLD_94XX (1U << 1)
+ #define SPI_CTRL_SpiStart_94XX (1U << 0)
+
+-#define mv_ffc(x) ffz(x)
+-
+ static inline int
+ mv_ffc64(u64 v)
+ {
+- int i;
+- i = mv_ffc((u32)v);
+- if (i >= 0)
+- return i;
+- i = mv_ffc((u32)(v>>32));
+-
+- if (i != 0)
+- return 32 + i;
+-
+- return -1;
++ u64 x = ~v;
++ return x ? __ffs64(x) : -1;
+ }
+
+ #define r_reg_set_enable(i) \
+diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
+index c04a4f5..da24955 100644
+--- a/drivers/scsi/mvsas/mv_sas.h
++++ b/drivers/scsi/mvsas/mv_sas.h
+@@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
+ #define DEV_IS_EXPANDER(type) \
+ ((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+-#define bit(n) ((u32)1 << n)
++#define bit(n) ((u64)1 << n)
+
+ #define for_each_phy(__lseq_mask, __mc, __lseq) \
+ for ((__mc) = (__lseq_mask), (__lseq) = 0; \
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index f9e5b85..82a5ca6 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3541,9 +3541,9 @@ qla2x00_do_dpc(void *data)
+ "ISP abort end.\n");
+ }
+
+- if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
++ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
++ &base_vha->dpc_flags)) {
+ qla2x00_update_fcports(base_vha);
+- clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+ }
+
+ if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 08d48a3..72ca515 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -246,11 +246,11 @@ show_shost_active_mode(struct device *dev,
+
+ static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+
+-static int check_reset_type(char *str)
++static int check_reset_type(const char *str)
+ {
+- if (strncmp(str, "adapter", 10) == 0)
++ if (sysfs_streq(str, "adapter"))
+ return SCSI_ADAPTER_RESET;
+- else if (strncmp(str, "firmware", 10) == 0)
++ else if (sysfs_streq(str, "firmware"))
+ return SCSI_FIRMWARE_RESET;
+ else
+ return 0;
+@@ -263,12 +263,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct scsi_host_template *sht = shost->hostt;
+ int ret = -EINVAL;
+- char str[10];
+ int type;
+
+- sscanf(buf, "%s", str);
+- type = check_reset_type(str);
+-
++ type = check_reset_type(buf);
+ if (!type)
+ goto exit_store_host_reset;
+
+diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
+index a09d351..3582535 100644
+--- a/drivers/staging/bcm/InterfaceInit.c
++++ b/drivers/staging/bcm/InterfaceInit.c
+@@ -4,10 +4,12 @@ static struct usb_device_id InterfaceUsbtable[] = {
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
+- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SM250) },
++ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SYM) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) },
++ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_226) },
++ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_326) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, InterfaceUsbtable);
+diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
+index 058315a..6fa4f09 100644
+--- a/drivers/staging/bcm/InterfaceInit.h
++++ b/drivers/staging/bcm/InterfaceInit.h
+@@ -8,10 +8,11 @@
+ #define BCM_USB_PRODUCT_ID_T3 0x0300
+ #define BCM_USB_PRODUCT_ID_T3B 0x0210
+ #define BCM_USB_PRODUCT_ID_T3L 0x0220
+-#define BCM_USB_PRODUCT_ID_SM250 0xbccd
+ #define BCM_USB_PRODUCT_ID_SYM 0x15E
+ #define BCM_USB_PRODUCT_ID_1901 0xe017
+-#define BCM_USB_PRODUCT_ID_226 0x0132
++#define BCM_USB_PRODUCT_ID_226 0x0132 /* not sure if this is valid */
++#define BCM_USB_PRODUCT_ID_ZTE_226 0x172
++#define BCM_USB_PRODUCT_ID_ZTE_326 0x173 /* ZTE AX326 */
+ #define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007
+
+ #define BCM_USB_MINOR_BASE 192
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index 08021f4..4664e9d 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -1238,7 +1238,7 @@ static BOOL s_bHandleRxEncryption (
+
+ PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
+ *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
+ if (byDecMode == KEY_CTL_TKIP) {
+ *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
+ } else {
+@@ -1349,7 +1349,7 @@ static BOOL s_bHostWepRxEncryption (
+
+ PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
+ *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
+
+ if (byDecMode == KEY_CTL_TKIP) {
+ *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
+diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
+index 27bb523..fd93e83 100644
+--- a/drivers/staging/vt6656/key.c
++++ b/drivers/staging/vt6656/key.c
+@@ -223,7 +223,7 @@ BOOL KeybSetKey(
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -235,7 +235,8 @@ BOOL KeybSetKey(
+ PSKeyItem pKey;
+ unsigned int uKeyIdx;
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Enter KeybSetKey: %X\n", dwKeyIndex);
+
+ j = (MAX_KEY_TABLE-1);
+ for (i=0;i<(MAX_KEY_TABLE-1);i++) {
+@@ -261,7 +262,9 @@ BOOL KeybSetKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[i].dwGTKeyIndex, i);
+ }
+ pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
+ pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
+@@ -302,9 +305,12 @@ BOOL KeybSetKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
++ pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ",
++ pKey->wTSC15_0);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -326,7 +332,9 @@ BOOL KeybSetKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(N)[%X]: %d\n",
++ pTable->KeyTable[j].dwGTKeyIndex, j);
+ }
+ pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
+ pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
+@@ -367,9 +375,11 @@ BOOL KeybSetKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
++ pKey->dwTSC47_16);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]);
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n",
++ pTable->KeyTable[i].dwGTKeyIndex);
+
+ return (TRUE);
+ }
+@@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -693,7 +704,10 @@ BOOL KeybSetDefaultKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
++ MAX_KEY_TABLE-1);
+
+ }
+ pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
+@@ -744,9 +758,11 @@ BOOL KeybSetDefaultKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n",
++ pKey->dwTSC47_16);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -772,7 +788,7 @@ BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -784,7 +800,8 @@ BOOL KeybSetAllGroupKey(
+ PSKeyItem pKey;
+ unsigned int uKeyIdx;
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n",
++ dwKeyIndex);
+
+
+ if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
+@@ -801,7 +818,9 @@ BOOL KeybSetAllGroupKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[i].dwGTKeyIndex, i);
+
+ }
+ pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
+diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
+index f749c7a..bd35d39 100644
+--- a/drivers/staging/vt6656/key.h
++++ b/drivers/staging/vt6656/key.h
+@@ -58,7 +58,7 @@
+ typedef struct tagSKeyItem
+ {
+ BOOL bKeyValid;
+- unsigned long uKeyLength;
++ u32 uKeyLength;
+ BYTE abyKey[MAX_KEY_LEN];
+ QWORD KeyRSC;
+ DWORD dwTSC47_16;
+@@ -107,7 +107,7 @@ BOOL KeybSetKey(
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
+index 26c19d1..0636d82 100644
+--- a/drivers/staging/vt6656/mac.c
++++ b/drivers/staging/vt6656/mac.c
+@@ -262,7 +262,8 @@ BYTE pbyData[24];
+ dwData1 <<= 16;
+ dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\
++ " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
+
+ //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
+ //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
+@@ -279,7 +280,8 @@ BYTE pbyData[24];
+ dwData2 <<= 8;
+ dwData2 |= *(pbyAddr+0);
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n",
++ wOffset, dwData2);
+
+ //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
+ //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
+diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
+index 3fd0478..8cf0881 100644
+--- a/drivers/staging/vt6656/rf.c
++++ b/drivers/staging/vt6656/rf.c
+@@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr;
+ return TRUE;
+ }
+
++ if (uCH == 0)
++ return -EINVAL;
++
+ switch (uRATE) {
+ case RATE_1M:
+ case RATE_2M:
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index fe21868..3beb126 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -377,7 +377,8 @@ s_vFillTxKey (
+ *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
+ // Append IV&ExtIV after Mac Header
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
++ *pdwExtIV);
+
+ } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
+ pTransmitKey->wTSC15_0++;
+@@ -1753,7 +1754,8 @@ s_bPacketToWirelessUsb(
+ MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
+ dwMIC_Priority = 0;
+ MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n",
++ dwMICKey0, dwMICKey1);
+
+ ///////////////////////////////////////////////////////////////////
+
+@@ -2635,7 +2637,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
+ MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
+ dwMIC_Priority = 0;
+ MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\
++ " %X, %X\n", dwMICKey0, dwMICKey1);
+
+ uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
+
+@@ -2655,7 +2658,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n",
++ *pdwMIC_L, *pdwMIC_R);
+
+ }
+
+@@ -3029,7 +3033,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
+ }
+ else {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
++ pTransmitKey->dwKeyIndex);
+ bNeedEncryption = TRUE;
+ }
+ }
+@@ -3043,7 +3048,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ if (pDevice->bEnableHostWEP) {
+ if ((uNodeIndex != 0) &&
+ (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
++ pTransmitKey->dwKeyIndex);
+ bNeedEncryption = TRUE;
+ }
+ }
+diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
+index 8e9450e..dfbf747 100644
+--- a/drivers/staging/vt6656/ttype.h
++++ b/drivers/staging/vt6656/ttype.h
+@@ -29,6 +29,8 @@
+ #ifndef __TTYPE_H__
+ #define __TTYPE_H__
+
++#include <linux/types.h>
++
+ /******* Common definitions and typedefs ***********************************/
+
+ typedef int BOOL;
+@@ -42,17 +44,17 @@ typedef int BOOL;
+
+ /****** Simple typedefs ***************************************************/
+
+-typedef unsigned char BYTE; // 8-bit
+-typedef unsigned short WORD; // 16-bit
+-typedef unsigned long DWORD; // 32-bit
++typedef u8 BYTE;
++typedef u16 WORD;
++typedef u32 DWORD;
+
+ // QWORD is for those situation that we want
+ // an 8-byte-aligned 8 byte long structure
+ // which is NOT really a floating point number.
+ typedef union tagUQuadWord {
+ struct {
+- DWORD dwLowDword;
+- DWORD dwHighDword;
++ u32 dwLowDword;
++ u32 dwHighDword;
+ } u;
+ double DoNotUseThisField;
+ } UQuadWord;
+@@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit
+
+ /****** Common pointer types ***********************************************/
+
+-typedef unsigned long ULONG_PTR; // 32-bit
+-typedef unsigned long DWORD_PTR; // 32-bit
++typedef u32 ULONG_PTR;
++typedef u32 DWORD_PTR;
+
+ // boolean pointer
+
+diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
+index 78ea121..31fb96a 100644
+--- a/drivers/staging/vt6656/wcmd.c
++++ b/drivers/staging/vt6656/wcmd.c
+@@ -316,17 +316,19 @@ s_MgrMakeProbeRequest(
+ return pTxPacket;
+ }
+
+-void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond)
++void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
+ {
+- PSDevice pDevice = (PSDevice)hDeviceContext;
++ PSDevice pDevice = (PSDevice)hDeviceContext;
+
+- init_timer(&pDevice->sTimerCommand);
+- pDevice->sTimerCommand.data = (unsigned long)pDevice;
+- pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
+- // RUN_AT :1 msec ~= (HZ/1024)
+- pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
+- add_timer(&pDevice->sTimerCommand);
+- return;
++ init_timer(&pDevice->sTimerCommand);
++
++ pDevice->sTimerCommand.data = (unsigned long)pDevice;
++ pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
++ pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000);
++
++ add_timer(&pDevice->sTimerCommand);
++
++ return;
+ }
+
+ void vRunCommand(void *hDeviceContext)
+diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
+index 46c2959..c359252 100644
+--- a/drivers/staging/vt6656/wpa2.h
++++ b/drivers/staging/vt6656/wpa2.h
+@@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo {
+ } PMKIDInfo, *PPMKIDInfo;
+
+ typedef struct tagSPMKIDCache {
+- unsigned long BSSIDInfoCount;
+- PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
++ u32 BSSIDInfoCount;
++ PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
+ } SPMKIDCache, *PSPMKIDCache;
+
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6fa7222..3effde2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -2367,7 +2367,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ if (!conn_p)
+ return;
+
+- cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
++ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+ if (!cmd) {
+ iscsit_dec_conn_usage_count(conn_p);
+ return;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index eb0c9fe..0df4a5f 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -130,13 +130,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+
+ initiatorname_param = iscsi_find_param_from_key(
+ INITIATORNAME, conn->param_list);
+- if (!initiatorname_param)
+- return -1;
+-
+ sessiontype_param = iscsi_find_param_from_key(
+ SESSIONTYPE, conn->param_list);
+- if (!sessiontype_param)
++ if (!initiatorname_param || !sessiontype_param) {
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
++ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
++ }
+
+ sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 98936cb..7d85f88 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -632,8 +632,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+- if (ret < 0)
++ if (ret < 0) {
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
++ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
++ }
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+@@ -648,8 +651,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+- if (ret < 0)
++ if (ret < 0) {
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
++ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
++ }
+
+ if (!login->auth_complete &&
+ ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index cafa477..ea29eaf 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -300,7 +300,7 @@ static int fd_do_readv(struct se_task *task)
+
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+- iov[i].iov_base = sg_virt(sg);
++ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+ }
+
+ old_fs = get_fs();
+@@ -308,6 +308,8 @@ static int fd_do_readv(struct se_task *task)
+ ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
+ set_fs(old_fs);
+
++ for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
++ kunmap(sg_page(sg));
+ kfree(iov);
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+@@ -353,7 +355,7 @@ static int fd_do_writev(struct se_task *task)
+
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+- iov[i].iov_base = sg_virt(sg);
++ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+ }
+
+ old_fs = get_fs();
+@@ -361,6 +363,9 @@ static int fd_do_writev(struct se_task *task)
+ ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
+ set_fs(old_fs);
+
++ for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
++ kunmap(sg_page(sg));
++
+ kfree(iov);
+
+ if (ret < 0 || ret != task->task_size) {
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 64ddb63..3f28fdb 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -465,7 +465,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
+ {
+ struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
+
+- transport_deregister_session(sess->se_sess);
+ kfree(sess);
+ }
+
+@@ -473,6 +472,7 @@ static void ft_sess_free(struct kref *kref)
+ {
+ struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+
++ transport_deregister_session(sess->se_sess);
+ call_rcu(&sess->rcu, ft_sess_rcu_free);
+ }
+
+diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
+index d5f923b..e1abb45 100644
+--- a/drivers/telephony/ixj.c
++++ b/drivers/telephony/ixj.c
+@@ -3190,12 +3190,12 @@ static void ixj_write_cid(IXJ *j)
+
+ ixj_fsk_alloc(j);
+
+- strcpy(sdmf1, j->cid_send.month);
+- strcat(sdmf1, j->cid_send.day);
+- strcat(sdmf1, j->cid_send.hour);
+- strcat(sdmf1, j->cid_send.min);
+- strcpy(sdmf2, j->cid_send.number);
+- strcpy(sdmf3, j->cid_send.name);
++ strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1));
++ strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2));
++ strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3));
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+@@ -3340,12 +3340,12 @@ static void ixj_write_cidcw(IXJ *j)
+ ixj_pre_cid(j);
+ }
+ j->flags.cidcw_ack = 0;
+- strcpy(sdmf1, j->cid_send.month);
+- strcat(sdmf1, j->cid_send.day);
+- strcat(sdmf1, j->cid_send.hour);
+- strcat(sdmf1, j->cid_send.min);
+- strcpy(sdmf2, j->cid_send.number);
+- strcpy(sdmf3, j->cid_send.name);
++ strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1));
++ strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2));
++ strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3));
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 9aaed0d..97b2c55 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -301,7 +301,7 @@ static void cleanup(struct wdm_device *desc)
+ desc->sbuf,
+ desc->validity->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+- desc->bMaxPacketSize0,
++ desc->wMaxCommand,
+ desc->inbuf,
+ desc->response->transfer_dma);
+ kfree(desc->orq);
+@@ -788,7 +788,7 @@ out:
+ err3:
+ usb_set_intfdata(intf, NULL);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+- desc->bMaxPacketSize0,
++ desc->wMaxCommand,
+ desc->inbuf,
+ desc->response->transfer_dma);
+ err2:
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index ef116a5..ab11ca3c 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1770,28 +1770,8 @@ free_interfaces:
+ goto free_interfaces;
+ }
+
+- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+- USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
+- NULL, 0, USB_CTRL_SET_TIMEOUT);
+- if (ret < 0) {
+- /* All the old state is gone, so what else can we do?
+- * The device is probably useless now anyway.
+- */
+- cp = NULL;
+- }
+-
+- dev->actconfig = cp;
+- if (!cp) {
+- usb_set_device_state(dev, USB_STATE_ADDRESS);
+- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+- mutex_unlock(hcd->bandwidth_mutex);
+- usb_autosuspend_device(dev);
+- goto free_interfaces;
+- }
+- mutex_unlock(hcd->bandwidth_mutex);
+- usb_set_device_state(dev, USB_STATE_CONFIGURED);
+-
+- /* Initialize the new interface structures and the
++ /*
++ * Initialize the new interface structures and the
+ * hc/hcd/usbcore interface/endpoint state.
+ */
+ for (i = 0; i < nintf; ++i) {
+@@ -1835,6 +1815,35 @@ free_interfaces:
+ }
+ kfree(new_interfaces);
+
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++ USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
++ NULL, 0, USB_CTRL_SET_TIMEOUT);
++ if (ret < 0 && cp) {
++ /*
++ * All the old state is gone, so what else can we do?
++ * The device is probably useless now anyway.
++ */
++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
++ for (i = 0; i < nintf; ++i) {
++ usb_disable_interface(dev, cp->interface[i], true);
++ put_device(&cp->interface[i]->dev);
++ cp->interface[i] = NULL;
++ }
++ cp = NULL;
++ }
++
++ dev->actconfig = cp;
++ mutex_unlock(hcd->bandwidth_mutex);
++
++ if (!cp) {
++ usb_set_device_state(dev, USB_STATE_ADDRESS);
++
++ /* Leave LPM disabled while the device is unconfigured. */
++ usb_autosuspend_device(dev);
++ return ret;
++ }
++ usb_set_device_state(dev, USB_STATE_CONFIGURED);
++
+ if (cp->string == NULL &&
+ !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
+ cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
+diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
+index 11c07cb..72fd355 100644
+--- a/drivers/usb/gadget/f_ecm.c
++++ b/drivers/usb/gadget/f_ecm.c
+@@ -790,9 +790,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+- if (ecm->port.out_ep->desc)
++ if (ecm->port.out_ep)
+ ecm->port.out_ep->driver_data = NULL;
+- if (ecm->port.in_ep->desc)
++ if (ecm->port.in_ep)
+ ecm->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
+index 1a7b2dd..a9cf2052 100644
+--- a/drivers/usb/gadget/f_eem.c
++++ b/drivers/usb/gadget/f_eem.c
+@@ -319,10 +319,9 @@ fail:
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+
+- /* we might as well release our claims on endpoints */
+- if (eem->port.out_ep->desc)
++ if (eem->port.out_ep)
+ eem->port.out_ep->driver_data = NULL;
+- if (eem->port.in_ep->desc)
++ if (eem->port.in_ep)
+ eem->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
+index 3797b3d..dfd7b98 100644
+--- a/drivers/usb/gadget/f_midi.c
++++ b/drivers/usb/gadget/f_midi.c
+@@ -416,6 +416,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
+ midi->id = NULL;
+
+ usb_free_descriptors(f->descriptors);
++ usb_free_descriptors(f->hs_descriptors);
+ kfree(midi);
+ }
+
+diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
+index aab8ede..d7811ae 100644
+--- a/drivers/usb/gadget/f_ncm.c
++++ b/drivers/usb/gadget/f_ncm.c
+@@ -1259,9 +1259,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (ncm->notify)
+ ncm->notify->driver_data = NULL;
+- if (ncm->port.out_ep->desc)
++ if (ncm->port.out_ep)
+ ncm->port.out_ep->driver_data = NULL;
+- if (ncm->port.in_ep->desc)
++ if (ncm->port.in_ep)
+ ncm->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
+index 16a509a..5431493 100644
+--- a/drivers/usb/gadget/f_phonet.c
++++ b/drivers/usb/gadget/f_phonet.c
+@@ -532,7 +532,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
+
+ req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
+ if (!req)
+- goto err;
++ goto err_req;
+
+ req->complete = pn_rx_complete;
+ fp->out_reqv[i] = req;
+@@ -541,14 +541,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
+ /* Outgoing USB requests */
+ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL);
+ if (!fp->in_req)
+- goto err;
++ goto err_req;
+
+ INFO(cdev, "USB CDC Phonet function\n");
+ INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name,
+ fp->out_ep->name, fp->in_ep->name);
+ return 0;
+
++err_req:
++ for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
++ usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
+ err:
++
+ if (fp->out_ep)
+ fp->out_ep->driver_data = NULL;
+ if (fp->in_ep)
+diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
+index 704d1d9..817d611 100644
+--- a/drivers/usb/gadget/f_rndis.c
++++ b/drivers/usb/gadget/f_rndis.c
+@@ -802,9 +802,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+- if (rndis->port.out_ep->desc)
++ if (rndis->port.out_ep)
+ rndis->port.out_ep->driver_data = NULL;
+- if (rndis->port.in_ep->desc)
++ if (rndis->port.in_ep)
+ rndis->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
+index 21ab474..e5bb966 100644
+--- a/drivers/usb/gadget/f_subset.c
++++ b/drivers/usb/gadget/f_subset.c
+@@ -370,9 +370,9 @@ fail:
+ usb_free_descriptors(f->hs_descriptors);
+
+ /* we might as well release our claims on endpoints */
+- if (geth->port.out_ep->desc)
++ if (geth->port.out_ep)
+ geth->port.out_ep->driver_data = NULL;
+- if (geth->port.in_ep->desc)
++ if (geth->port.in_ep)
+ geth->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
+index 2022fe49..a0abc65 100644
+--- a/drivers/usb/gadget/f_uvc.c
++++ b/drivers/usb/gadget/f_uvc.c
+@@ -335,7 +335,6 @@ uvc_register_video(struct uvc_device *uvc)
+ return -ENOMEM;
+
+ video->parent = &cdev->gadget->dev;
+- video->minor = -1;
+ video->fops = &uvc_v4l2_fops;
+ video->release = video_device_release;
+ strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+@@ -462,23 +461,12 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+
+ INFO(cdev, "uvc_function_unbind\n");
+
+- if (uvc->vdev) {
+- if (uvc->vdev->minor == -1)
+- video_device_release(uvc->vdev);
+- else
+- video_unregister_device(uvc->vdev);
+- uvc->vdev = NULL;
+- }
+-
+- if (uvc->control_ep)
+- uvc->control_ep->driver_data = NULL;
+- if (uvc->video.ep)
+- uvc->video.ep->driver_data = NULL;
++ video_unregister_device(uvc->vdev);
++ uvc->control_ep->driver_data = NULL;
++ uvc->video.ep->driver_data = NULL;
+
+- if (uvc->control_req) {
+- usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+- kfree(uvc->control_buf);
+- }
++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
++ kfree(uvc->control_buf);
+
+ kfree(f->descriptors);
+ kfree(f->hs_descriptors);
+@@ -563,7 +551,22 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ return 0;
+
+ error:
+- uvc_function_unbind(c, f);
++ if (uvc->vdev)
++ video_device_release(uvc->vdev);
++
++ if (uvc->control_ep)
++ uvc->control_ep->driver_data = NULL;
++ if (uvc->video.ep)
++ uvc->video.ep->driver_data = NULL;
++
++ if (uvc->control_req) {
++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
++ kfree(uvc->control_buf);
++ }
++
++ kfree(f->descriptors);
++ kfree(f->hs_descriptors);
++ kfree(f->ss_descriptors);
+ return ret;
+ }
+
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index a79e64b..b71e22e 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -359,7 +359,8 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == 0x1E26 ||
+ pdev->device == 0x8C2D ||
+- pdev->device == 0x8C26);
++ pdev->device == 0x8C26 ||
++ pdev->device == 0x9C26);
+ }
+
+ static void ehci_enable_xhci_companion(void)
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index 2023733..5bb2dcb 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -264,18 +264,14 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
+ __releases(ehci->lock)
+ __acquires(ehci->lock)
+ {
+- if (likely (urb->hcpriv != NULL)) {
+- struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
+-
+- /* S-mask in a QH means it's an interrupt urb */
+- if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
+-
+- /* ... update hc-wide periodic stats (for usbfs) */
+- ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
+- }
+- qh_put (qh);
++ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
++ /* ... update hc-wide periodic stats */
++ ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
+ }
+
++ if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS)
++ qh_put((struct ehci_qh *) urb->hcpriv);
++
+ if (unlikely(urb->unlinked)) {
+ COUNT(ehci->stats.unlink);
+ } else {
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index a60679c..34655d0 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1684,7 +1684,7 @@ itd_link_urb (
+
+ /* don't need that schedule data any more */
+ iso_sched_free (stream, iso_sched);
+- urb->hcpriv = NULL;
++ urb->hcpriv = stream;
+
+ timer_action (ehci, TIMER_IO_WATCHDOG);
+ return enable_periodic(ehci);
+@@ -2094,7 +2094,7 @@ sitd_link_urb (
+
+ /* don't need that schedule data any more */
+ iso_sched_free (stream, sched);
+- urb->hcpriv = NULL;
++ urb->hcpriv = stream;
+
+ timer_action (ehci, TIMER_IO_WATCHDOG);
+ return enable_periodic(ehci);
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index 15dc51d..e0ae777 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -1130,6 +1130,25 @@ dl_done_list (struct ohci_hcd *ohci)
+
+ while (td) {
+ struct td *td_next = td->next_dl_td;
++ struct ed *ed = td->ed;
++
++ /*
++ * Some OHCI controllers (NVIDIA for sure, maybe others)
++ * occasionally forget to add TDs to the done queue. Since
++ * TDs for a given endpoint are always processed in order,
++ * if we find a TD on the donelist then all of its
++ * predecessors must be finished as well.
++ */
++ for (;;) {
++ struct td *td2;
++
++ td2 = list_first_entry(&ed->td_list, struct td,
++ td_list);
++ if (td2 == td)
++ break;
++ takeback_td(ohci, td2);
++ }
++
+ takeback_td(ohci, td);
+ td = td_next;
+ }
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index c2815a5..5cc401b 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -723,6 +723,7 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ }
+
+ #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31
++#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31
+
+ bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
+ {
+@@ -736,7 +737,8 @@ bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
+ {
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI;
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI);
+ }
+
+ bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 4cddbfc..5719c4d 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -178,8 +178,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, flags);
+- if (!next)
++ if (!next) {
++ prev = ring->first_seg;
++ while (prev) {
++ next = prev->next;
++ xhci_segment_free(xhci, prev);
++ prev = next;
++ }
+ goto fail;
++ }
+ xhci_link_segments(xhci, prev, next, link_trbs, isoc);
+
+ prev = next;
+@@ -199,7 +206,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ return ring;
+
+ fail:
+- xhci_ring_free(xhci, ring);
++ kfree(ring);
+ return NULL;
+ }
+
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4ed7572..aca647a 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -29,6 +29,7 @@
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
++#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
+
+ #define PCI_VENDOR_ID_ETRON 0x1b6f
+ #define PCI_DEVICE_ID_ASROCK_P67 0x7023
+@@ -58,8 +59,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+
+ /* Look for vendor-specific quirks */
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) {
+- if (pdev->revision == 0x0) {
++ (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
++ pdev->revision == 0x0) {
+ xhci->quirks |= XHCI_RESET_EP_QUIRK;
+ xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint\n");
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7de9993..1ba98f5 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2995,11 +2995,11 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ }
+
+ /*
+- * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
+- * the TD (*not* including this TRB).
++ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
++ * packets remaining in the TD (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+- * roundup(TD size in bytes / wMaxPacketSize)
++ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+@@ -3007,15 +3007,16 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
++ * The last TRB in a TD must have the TD size set to zero.
+ */
+-
+ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+- unsigned int total_packet_count, struct urb *urb)
++ unsigned int total_packet_count, struct urb *urb,
++ unsigned int num_trbs_left)
+ {
+ int packets_transferred;
+
+ /* One TRB with a zero-length data packet. */
+- if (running_total == 0 && trb_buff_len == 0)
++ if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
+ return 0;
+
+ /* All the TRB queueing functions don't count the current TRB in
+@@ -3024,7 +3025,9 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+ packets_transferred = (running_total + trb_buff_len) /
+ usb_endpoint_maxp(&urb->ep->desc);
+
+- return xhci_td_remainder(total_packet_count - packets_transferred);
++ if ((total_packet_count - packets_transferred) > 31)
++ return 31 << 17;
++ return (total_packet_count - packets_transferred) << 17;
+ }
+
+ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+@@ -3051,7 +3054,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+ num_sgs = urb->num_mapped_sgs;
+- total_packet_count = roundup(urb->transfer_buffer_length,
++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+
+ trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+@@ -3141,7 +3144,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb);
++ trb_buff_len, total_packet_count, urb,
++ num_trbs - 1);
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+@@ -3258,7 +3262,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+- total_packet_count = roundup(urb->transfer_buffer_length,
++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+@@ -3304,7 +3308,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb);
++ trb_buff_len, total_packet_count, urb,
++ num_trbs - 1);
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+@@ -3579,7 +3584,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ addr = start_addr + urb->iso_frame_desc[i].offset;
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+- total_packet_count = roundup(td_len,
++ total_packet_count = DIV_ROUND_UP(td_len,
+ usb_endpoint_maxp(&urb->ep->desc));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+@@ -3659,7 +3664,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ } else {
+ remainder = xhci_v1_0_td_remainder(
+ running_total, trb_buff_len,
+- total_packet_count, urb);
++ total_packet_count, urb,
++ (trbs_per_td - j - 1));
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index dab05d1..9dc5870 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -472,7 +472,7 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+ if (strstr(dmi_product_name, "Z420") ||
+ strstr(dmi_product_name, "Z620") ||
+ strstr(dmi_product_name, "Z820") ||
+- strstr(dmi_product_name, "Z1"))
++ strstr(dmi_product_name, "Z1 Workstation"))
+ return true;
+
+ return false;
+@@ -2241,7 +2241,7 @@ static bool xhci_is_async_ep(unsigned int ep_type)
+
+ static bool xhci_is_sync_in_ep(unsigned int ep_type)
+ {
+- return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
++ return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
+ }
+
+ static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
+diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
+index 318fb4e..64d7b38 100644
+--- a/drivers/usb/musb/cppi_dma.c
++++ b/drivers/usb/musb/cppi_dma.c
+@@ -1313,6 +1313,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
+
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(cppi_interrupt);
+
+ /* Instantiate a software object representing a DMA controller. */
+ struct dma_controller *__init
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 35e6b5f..381d00d 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
++ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index e29a664..3f989d6 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -197,6 +197,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+@@ -1807,7 +1808,7 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ dbg("%s", __func__);
+
+ if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
+- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100")))
++ (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+ return ftdi_jtag_probe(serial);
+
+ return 0;
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 7b5eb74..aedf65f 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -752,6 +752,12 @@
+ #define TTI_VID 0x103E /* Vendor Id */
+ #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
+
++/*
++ * Newport Cooperation (www.newport.com)
++ */
++#define NEWPORT_VID 0x104D
++#define NEWPORT_AGILIS_PID 0x3000
++
+ /* Interbiometrics USB I/O Board */
+ /* Developed for Interbiometrics by Rudolf Gugler */
+ #define INTERBIOMETRICS_VID 0x1209
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a5f875d..872807b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -80,6 +80,7 @@ static void option_instat_callback(struct urb *urb);
+ #define OPTION_PRODUCT_GTM380_MODEM 0x7201
+
+ #define HUAWEI_VENDOR_ID 0x12D1
++#define HUAWEI_PRODUCT_E173 0x140C
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+@@ -552,6 +553,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -883,6 +886,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
+@@ -903,20 +910,34 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+@@ -1096,6 +1117,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
+index fe2d803..303c34b 100644
+--- a/drivers/usb/storage/Kconfig
++++ b/drivers/usb/storage/Kconfig
+@@ -203,7 +203,7 @@ config USB_STORAGE_ENE_UB6250
+
+ config USB_UAS
+ tristate "USB Attached SCSI"
+- depends on USB && SCSI
++ depends on USB && SCSI && BROKEN
+ help
+ The USB Attached SCSI protocol is supported by some USB
+ storage devices. It permits higher performance by supporting
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index c7a2c20..dc2eed1 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -121,6 +121,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
+ unsigned head;
+ int i;
+
++ /*
++ * We require lowmem mappings for the descriptors because
++ * otherwise virt_to_phys will give us bogus addresses in the
++ * virtqueue.
++ */
++ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
++
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
+ if (!desc)
+ return -ENOMEM;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 1e9edbd..ca52e92 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -175,7 +175,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ goto _error;
+ bprm->argc ++;
+
+- bprm->interp = iname; /* for binfmt_script */
++ /* Update interp in case binfmt_script needs it. */
++ retval = bprm_change_interp(iname, bprm);
++ if (retval < 0)
++ goto _error;
+
+ interp_file = open_exec (iname);
+ retval = PTR_ERR (interp_file);
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 396a988..e39c18a 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ retval = copy_strings_kernel(1, &i_name, bprm);
+ if (retval) return retval;
+ bprm->argc++;
+- bprm->interp = interp;
++ retval = bprm_change_interp(interp, bprm);
++ if (retval < 0)
++ return retval;
+
+ /*
+ * OK, now restart the process with the interpreter's dentry.
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 63c0c6b..bb7f4cc 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1492,7 +1492,7 @@ static struct dentry * d_find_any_alias(struct inode *inode)
+ */
+ struct dentry *d_obtain_alias(struct inode *inode)
+ {
+- static const struct qstr anonstring = { .name = "" };
++ static const struct qstr anonstring = { .name = "/", .len = 1 };
+ struct dentry *tmp;
+ struct dentry *res;
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 121ccae..c27fa0d 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1095,7 +1095,8 @@ int flush_old_exec(struct linux_binprm * bprm)
+ bprm->mm = NULL; /* We're using it now */
+
+ set_fs(USER_DS);
+- current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD | PF_NOFREEZE);
++ current->flags &=
++ ~(PF_RANDOMIZE | PF_KTHREAD | PF_NOFREEZE | PF_FREEZER_NOSIG);
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+@@ -1201,9 +1202,24 @@ void free_bprm(struct linux_binprm *bprm)
+ mutex_unlock(&current->signal->cred_guard_mutex);
+ abort_creds(bprm->cred);
+ }
++ /* If a binfmt changed the interp, free it. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
+ kfree(bprm);
+ }
+
++int bprm_change_interp(char *interp, struct linux_binprm *bprm)
++{
++ /* If a binfmt changed the interp, free it first. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
++ bprm->interp = kstrdup(interp, GFP_KERNEL);
++ if (!bprm->interp)
++ return -ENOMEM;
++ return 0;
++}
++EXPORT_SYMBOL(bprm_change_interp);
++
+ /*
+ * install the new credentials for this executable
+ */
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index a5c29bb..8535c45 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -410,8 +410,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
+
+ retry:
+ handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+- if (IS_ERR(handle))
+- return PTR_ERR(handle);
++ if (IS_ERR(handle)) {
++ error = PTR_ERR(handle);
++ goto release_and_out;
++ }
+ error = ext4_set_acl(handle, inode, type, acl);
+ ext4_journal_stop(handle);
+ if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bac2330..8424dda 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1422,6 +1422,7 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
+
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
++ pagevec_init(&pvec, 0);
+ while (index <= end) {
+ nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+ if (nr_pages == 0)
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 873bf00..d03a400 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -672,8 +672,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
+ */
+ static void nfs_destroy_server(struct nfs_server *server)
+ {
+- if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) ||
+- !(server->flags & NFS_MOUNT_LOCAL_FCNTL))
++ if (server->nlm_host)
+ nlmclnt_done(server->nlm_host);
+ }
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 864b831..2f98c53 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5544,13 +5544,26 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+ rpc_call_start(task);
+ }
+
++static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data)
++{
++ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
++ nfs41_sequence_prepare(task, data);
++}
++
+ static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+ .rpc_release = nfs41_sequence_release,
+ };
+
+-static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
++static const struct rpc_call_ops nfs41_sequence_privileged_ops = {
++ .rpc_call_done = nfs41_sequence_call_done,
++ .rpc_call_prepare = nfs41_sequence_prepare_privileged,
++ .rpc_release = nfs41_sequence_release,
++};
++
++static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred,
++ const struct rpc_call_ops *seq_ops)
+ {
+ struct nfs4_sequence_data *calldata;
+ struct rpc_message msg = {
+@@ -5560,7 +5573,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+- .callback_ops = &nfs41_sequence_ops,
++ .callback_ops = seq_ops,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
+ };
+
+@@ -5586,7 +5599,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+ return 0;
+- task = _nfs41_proc_sequence(clp, cred);
++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else
+@@ -5600,7 +5613,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+ struct rpc_task *task;
+ int ret;
+
+- task = _nfs41_proc_sequence(clp, cred);
++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index b8c5538..fe5c5fb 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -193,6 +193,7 @@ static __be32
+ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+ {
+ struct svc_fh resfh;
++ int accmode;
+ __be32 status;
+
+ fh_init(&resfh, NFS4_FHSIZE);
+@@ -252,9 +253,10 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
+ /* set reply cache */
+ fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
+ &resfh.fh_handle);
+- if (!open->op_created)
+- status = do_open_permission(rqstp, current_fh, open,
+- NFSD_MAY_NOP);
++ accmode = NFSD_MAY_NOP;
++ if (open->op_created)
++ accmode |= NFSD_MAY_OWNER_OVERRIDE;
++ status = do_open_permission(rqstp, current_fh, open, accmode);
+
+ out:
+ fh_put(&resfh);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index d225b51..8b197d2 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2309,7 +2309,7 @@ nfsd4_init_slabs(void)
+ if (openowner_slab == NULL)
+ goto out_nomem;
+ lockowner_slab = kmem_cache_create("nfsd4_lockowners",
+- sizeof(struct nfs4_openowner), 0, 0, NULL);
++ sizeof(struct nfs4_lockowner), 0, 0, NULL);
+ if (lockowner_slab == NULL)
+ goto out_nomem;
+ file_slab = kmem_cache_create("nfsd4_files",
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 87a1746..800c215 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2909,11 +2909,16 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ len = maxcount;
+ v = 0;
+ while (len > 0) {
+- pn = resp->rqstp->rq_resused++;
++ pn = resp->rqstp->rq_resused;
++ if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */
++ maxcount -= len;
++ break;
++ }
+ resp->rqstp->rq_vec[v].iov_base =
+ page_address(resp->rqstp->rq_respages[pn]);
+ resp->rqstp->rq_vec[v].iov_len =
+ len < PAGE_SIZE ? len : PAGE_SIZE;
++ resp->rqstp->rq_resused++;
+ v++;
+ len -= PAGE_SIZE;
+ }
+@@ -2959,6 +2964,8 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
+ return nfserr;
+ if (resp->xbuf->page_len)
+ return nfserr_resource;
++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused])
++ return nfserr_resource;
+
+ page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
+
+@@ -3008,6 +3015,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ return nfserr;
+ if (resp->xbuf->page_len)
+ return nfserr_resource;
++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused])
++ return nfserr_resource;
+
+ RESERVE_SPACE(8); /* verifier */
+ savep = p;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index eda7d7e..7595582 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -633,7 +633,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ }
+
+ /* Store reply in cache. */
+- nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
++ nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
+ return 1;
+ }
+
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 5c3cd82..1ec1fde 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1458,13 +1458,19 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ case NFS3_CREATE_EXCLUSIVE:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+- && dchild->d_inode->i_size == 0 )
++ && dchild->d_inode->i_size == 0 ) {
++ if (created)
++ *created = 1;
+ break;
++ }
+ case NFS4_CREATE_EXCLUSIVE4_1:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+- && dchild->d_inode->i_size == 0 )
++ && dchild->d_inode->i_size == 0 ) {
++ if (created)
++ *created = 1;
+ goto set_attr;
++ }
+ /* fallthru */
+ case NFS3_CREATE_GUARDED:
+ err = nfserr_exist;
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 3a1dafd..439b5a1 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -204,7 +204,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
+ group_info = cred->group_info;
+ task_unlock(p);
+
+- for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
++ for (g = 0; g < group_info->ngroups; g++)
+ seq_printf(m, "%d ", GROUP_AT(group_info, g));
+ put_cred(cred);
+
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index e2787d0..15df1a4 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -744,6 +744,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ goal, err);
+ if (!newblocknum) {
+ brelse(prev_epos.bh);
++ brelse(cur_epos.bh);
++ brelse(next_epos.bh);
+ *err = -ENOSPC;
+ return NULL;
+ }
+@@ -774,6 +776,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
+
+ brelse(prev_epos.bh);
++ brelse(cur_epos.bh);
++ brelse(next_epos.bh);
+
+ newblock = udf_get_pblock(inode->i_sb, newblocknum,
+ iinfo->i_location.partitionReferenceNum, 0);
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index fd88a39..f606406 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -127,6 +127,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
+ unsigned long stack_top,
+ int executable_stack);
+ extern int bprm_mm_init(struct linux_binprm *bprm);
++extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+ extern int copy_strings_kernel(int argc, const char *const *argv,
+ struct linux_binprm *bprm);
+ extern int prepare_bprm_creds(struct linux_binprm *bprm);
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 1b7f9d5..9bab75f 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -32,7 +32,6 @@ extern int cgroup_lock_is_held(void);
+ extern bool cgroup_lock_live_group(struct cgroup *cgrp);
+ extern void cgroup_unlock(void);
+ extern void cgroup_fork(struct task_struct *p);
+-extern void cgroup_fork_callbacks(struct task_struct *p);
+ extern void cgroup_post_fork(struct task_struct *p);
+ extern void cgroup_exit(struct task_struct *p, int run_callbacks);
+ extern int cgroupstats_build(struct cgroupstats *stats,
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index b5d6b6a..862e67b 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -88,9 +88,16 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+ * parent.
+ */
+
+-/*
+- * If the current task is a user space one, tell the freezer not to count it as
+- * freezable.
++/**
++ * freezer_do_not_count - tell freezer to ignore %current if a user space task
++ *
++ * Tell freezers to ignore the current task when determining whether the
++ * target frozen state is reached. IOW, the current task will be
++ * considered frozen enough by freezers.
++ *
++ * The caller shouldn't do anything which isn't allowed for a frozen task
++ * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
++ * wrap a scheduling operation and nothing much else.
+ */
+ static inline void freezer_do_not_count(void)
+ {
+@@ -98,24 +105,48 @@ static inline void freezer_do_not_count(void)
+ current->flags |= PF_FREEZER_SKIP;
+ }
+
+-/*
+- * If the current task is a user space one, tell the freezer to count it as
+- * freezable again and try to freeze it.
++/**
++ * freezer_count - tell freezer to stop ignoring %current if a user space task
++ *
++ * Undo freezer_do_not_count(). It tells freezers that %current should be
++ * considered again and tries to freeze if freezing condition is already in
++ * effect.
+ */
+ static inline void freezer_count(void)
+ {
+ if (current->mm) {
+ current->flags &= ~PF_FREEZER_SKIP;
++ /*
++ * If freezing is in progress, the following paired with smp_mb()
++ * in freezer_should_skip() ensures that either we see %true
++ * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
++ */
++ smp_mb();
+ try_to_freeze();
+ }
+ }
+
+-/*
+- * Check if the task should be counted as freezable by the freezer
++/**
++ * freezer_should_skip - whether to skip a task when determining frozen
++ * state is reached
++ * @p: task in quesion
++ *
++ * This function is used by freezers after establishing %true freezing() to
++ * test whether a task should be skipped when determining the target frozen
++ * state is reached. IOW, if this function returns %true, @p is considered
++ * frozen enough.
+ */
+-static inline int freezer_should_skip(struct task_struct *p)
++static inline bool freezer_should_skip(struct task_struct *p)
+ {
+- return !!(p->flags & PF_FREEZER_SKIP);
++ /*
++ * The following smp_mb() paired with the one in freezer_count()
++ * ensures that either freezer_count() sees %true freezing() or we
++ * see cleared %PF_FREEZER_SKIP and return %false. This makes it
++ * impossible for a task to slip frozen state testing after
++ * clearing %PF_FREEZER_SKIP.
++ */
++ smp_mb();
++ return p->flags & PF_FREEZER_SKIP;
+ }
+
+ /*
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 3a93f73..52e9620 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -38,10 +38,17 @@ extern unsigned long totalhigh_pages;
+
+ void kmap_flush_unused(void);
+
++struct page *kmap_to_page(void *addr);
++
+ #else /* CONFIG_HIGHMEM */
+
+ static inline unsigned int nr_free_highpages(void) { return 0; }
+
++static inline struct page *kmap_to_page(void *addr)
++{
++ return virt_to_page(addr);
++}
++
+ #define totalhigh_pages 0UL
+
+ #ifndef ARCH_HAS_KMAP
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 3e8f2f7..f85c5ab 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
+ __mpol_put(pol);
+ }
+
+-extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+- struct mempolicy *frompol);
+-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
+- struct mempolicy *frompol)
+-{
+- if (!frompol)
+- return frompol;
+- return __mpol_cond_copy(tompol, frompol);
+-}
+-
+ extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
+ {
+@@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
+ {
+ }
+
+-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
+- struct mempolicy *from)
+-{
+- return from;
+-}
+-
+ static inline void mpol_get(struct mempolicy *pol)
+ {
+ }
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 1874c5e..5776609 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -752,6 +752,7 @@
+ #define PCI_DEVICE_ID_HP_CISSD 0x3238
+ #define PCI_DEVICE_ID_HP_CISSE 0x323a
+ #define PCI_DEVICE_ID_HP_CISSF 0x323b
++#define PCI_DEVICE_ID_HP_CISSH 0x323c
+ #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
+
+ #define PCI_VENDOR_ID_PCTECH 0x1042
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 6337535..b6cacf1 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2636,9 +2636,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
+ dentry->d_fsdata = cgrp;
+ inc_nlink(parent->d_inode);
+ rcu_assign_pointer(cgrp->dentry, dentry);
+- dget(dentry);
+ }
+- dput(dentry);
+
+ return error;
+ }
+@@ -4508,41 +4506,19 @@ void cgroup_fork(struct task_struct *child)
+ }
+
+ /**
+- * cgroup_fork_callbacks - run fork callbacks
+- * @child: the new task
+- *
+- * Called on a new task very soon before adding it to the
+- * tasklist. No need to take any locks since no-one can
+- * be operating on this task.
+- */
+-void cgroup_fork_callbacks(struct task_struct *child)
+-{
+- if (need_forkexit_callback) {
+- int i;
+- /*
+- * forkexit callbacks are only supported for builtin
+- * subsystems, and the builtin section of the subsys array is
+- * immutable, so we don't need to lock the subsys array here.
+- */
+- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+- struct cgroup_subsys *ss = subsys[i];
+- if (ss->fork)
+- ss->fork(ss, child);
+- }
+- }
+-}
+-
+-/**
+ * cgroup_post_fork - called on a new task after adding it to the task list
+ * @child: the task in question
+ *
+- * Adds the task to the list running through its css_set if necessary.
+- * Has to be after the task is visible on the task list in case we race
+- * with the first call to cgroup_iter_start() - to guarantee that the
+- * new task ends up on its list.
++ * Adds the task to the list running through its css_set if necessary and
++ * call the subsystem fork() callbacks. Has to be after the task is
++ * visible on the task list in case we race with the first call to
++ * cgroup_iter_start() - to guarantee that the new task ends up on its
++ * list.
+ */
+ void cgroup_post_fork(struct task_struct *child)
+ {
++ int i;
++
+ if (use_task_css_set_links) {
+ write_lock(&css_set_lock);
+ task_lock(child);
+@@ -4551,7 +4527,21 @@ void cgroup_post_fork(struct task_struct *child)
+ task_unlock(child);
+ write_unlock(&css_set_lock);
+ }
++
++ /*
++ * Call ss->fork(). This must happen after @child is linked on
++ * css_set; otherwise, @child might change state between ->fork()
++ * and addition to css_set.
++ */
++ if (need_forkexit_callback) {
++ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
++ struct cgroup_subsys *ss = subsys[i];
++ if (ss->fork)
++ ss->fork(ss, child);
++ }
++ }
+ }
++
+ /**
+ * cgroup_exit - detach cgroup from exiting task
+ * @tsk: pointer to task_struct of exiting process
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index 213c035..6c132a4 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -197,23 +197,15 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+ {
+ struct freezer *freezer;
+
+- /*
+- * No lock is needed, since the task isn't on tasklist yet,
+- * so it can't be moved to another cgroup, which means the
+- * freezer won't be removed and will be valid during this
+- * function call. Nevertheless, apply RCU read-side critical
+- * section to suppress RCU lockdep false positives.
+- */
+ rcu_read_lock();
+ freezer = task_freezer(task);
+- rcu_read_unlock();
+
+ /*
+ * The root cgroup is non-freezable, so we can skip the
+ * following check.
+ */
+ if (!freezer->css.cgroup->parent)
+- return;
++ goto out;
+
+ spin_lock_irq(&freezer->lock);
+ BUG_ON(freezer->state == CGROUP_FROZEN);
+@@ -221,7 +213,10 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+ /* Locking avoids race with FREEZING -> THAWED transitions. */
+ if (freezer->state == CGROUP_FREEZING)
+ freeze_task(task, true);
++
+ spin_unlock_irq(&freezer->lock);
++out:
++ rcu_read_unlock();
+ }
+
+ /*
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 222457a..ce0c182 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1057,7 +1057,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ {
+ int retval;
+ struct task_struct *p;
+- int cgroup_callbacks_done = 0;
+
+ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
+ return ERR_PTR(-EINVAL);
+@@ -1312,12 +1311,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ p->group_leader = p;
+ INIT_LIST_HEAD(&p->thread_group);
+
+- /* Now that the task is set up, run cgroup callbacks if
+- * necessary. We need to run them before the task is visible
+- * on the tasklist. */
+- cgroup_fork_callbacks(p);
+- cgroup_callbacks_done = 1;
+-
+ /* Need tasklist lock for parent etc handling! */
+ write_lock_irq(&tasklist_lock);
+
+@@ -1419,7 +1412,7 @@ bad_fork_cleanup_cgroup:
+ #endif
+ if (clone_flags & CLONE_THREAD)
+ threadgroup_fork_read_unlock(current);
+- cgroup_exit(p, cgroup_callbacks_done);
++ cgroup_exit(p, 0);
+ delayacct_tsk_free(p);
+ module_put(task_thread_info(p)->exec_domain->module);
+ bad_fork_cleanup_count:
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 7600092..382a6bd 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -701,6 +701,7 @@ static void
+ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+ {
+ cpumask_var_t mask;
++ bool valid = true;
+
+ if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+ return;
+@@ -715,10 +716,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+ }
+
+ raw_spin_lock_irq(&desc->lock);
+- cpumask_copy(mask, desc->irq_data.affinity);
++ /*
++ * This code is triggered unconditionally. Check the affinity
++ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
++ */
++ if (desc->irq_data.affinity)
++ cpumask_copy(mask, desc->irq_data.affinity);
++ else
++ valid = false;
+ raw_spin_unlock_irq(&desc->lock);
+
+- set_cpus_allowed_ptr(current, mask);
++ if (valid)
++ set_cpus_allowed_ptr(current, mask);
+ free_cpumask_var(mask);
+ }
+ #else
+@@ -950,6 +959,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ */
+ get_task_struct(t);
+ new->thread = t;
++ /*
++ * Tell the thread to set its affinity. This is
++ * important for shared interrupt handlers as we do
++ * not invoke setup_affinity() for the secondary
++ * handlers as everything is already set up. Even for
++ * interrupts marked with IRQF_NO_BALANCE this is
++ * correct as we want the thread to move to the cpu(s)
++ * on which the requesting code placed the interrupt.
++ */
++ set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ }
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index a122196..1aa52af 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -202,13 +202,13 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+ };
+ #endif /* #ifdef CONFIG_NO_HZ */
+
+-static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
+-static int qhimark = 10000; /* If this many pending, ignore blimit. */
+-static int qlowmark = 100; /* Once only this many pending, use blimit. */
++static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
++static long qhimark = 10000; /* If this many pending, ignore blimit. */
++static long qlowmark = 100; /* Once only this many pending, use blimit. */
+
+-module_param(blimit, int, 0);
+-module_param(qhimark, int, 0);
+-module_param(qlowmark, int, 0);
++module_param(blimit, long, 0);
++module_param(qhimark, long, 0);
++module_param(qlowmark, long, 0);
+
+ int rcu_cpu_stall_suppress __read_mostly;
+ module_param(rcu_cpu_stall_suppress, int, 0644);
+@@ -1260,7 +1260,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
+ unsigned long flags;
+ struct rcu_head *next, *list, **tail;
+- int bl, count;
++ long bl, count;
+
+ /* If no callbacks are ready, just return.*/
+ if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
+diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
+index 429242f..f280df1 100644
+--- a/kernel/sched_autogroup.c
++++ b/kernel/sched_autogroup.c
+@@ -160,15 +160,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
+
+ p->signal->autogroup = autogroup_kref_get(ag);
+
+- if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
+- goto out;
+-
+ t = p;
+ do {
+ sched_move_task(t);
+ } while_each_thread(p, t);
+
+-out:
+ unlock_task_sighand(p, &flags);
+ autogroup_kref_put(prev);
+ }
+diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
+index c2f0e72..3d7a50e 100644
+--- a/kernel/sched_autogroup.h
++++ b/kernel/sched_autogroup.h
+@@ -1,11 +1,6 @@
+ #ifdef CONFIG_SCHED_AUTOGROUP
+
+ struct autogroup {
+- /*
+- * reference doesn't mean how many thread attach to this
+- * autogroup now. It just stands for the number of task
+- * could use this autogroup.
+- */
+ struct kref kref;
+ struct task_group *tg;
+ struct rw_semaphore lock;
+diff --git a/kernel/sys.c b/kernel/sys.c
+index d7c4ab0..f5939c2 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1190,7 +1190,7 @@ static int override_release(char __user *release, size_t len)
+ rest++;
+ }
+ v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+- copy = min(sizeof(buf), max_t(size_t, 1, len));
++ copy = clamp_t(size_t, len, 1, sizeof(buf));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 25b4f4d..54dba59 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2074,7 +2074,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
+ {
+ iter->pos = 0;
+ iter->func_pos = 0;
+- iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
++ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
+ }
+
+ static void *t_start(struct seq_file *m, loff_t *pos)
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index f5b7b5c..6fdc629 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2683,7 +2683,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+ unsigned long flags;
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *bpage;
+- unsigned long ret;
++ unsigned long ret = 0;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+@@ -2698,7 +2698,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+ bpage = cpu_buffer->reader_page;
+ else
+ bpage = rb_set_head_page(cpu_buffer);
+- ret = bpage->page->time_stamp;
++ if (bpage)
++ ret = bpage->page->time_stamp;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return ret;
+@@ -3005,6 +3006,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ * Splice the empty reader page into the list around the head.
+ */
+ reader = rb_set_head_page(cpu_buffer);
++ if (!reader)
++ goto out;
+ cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
+ cpu_buffer->reader_page->list.prev = reader->list.prev;
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d551d5f..7bf068a 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1146,8 +1146,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ unsigned int lcpu;
+
+- BUG_ON(timer_pending(timer));
+- BUG_ON(!list_empty(&work->entry));
++ WARN_ON_ONCE(timer_pending(timer));
++ WARN_ON_ONCE(!list_empty(&work->entry));
+
+ timer_stats_timer_set_start_info(&dwork->timer);
+
+diff --git a/mm/dmapool.c b/mm/dmapool.c
+index c5ab33b..da1b0f0 100644
+--- a/mm/dmapool.c
++++ b/mm/dmapool.c
+@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
+ size_t allocation;
+ size_t boundary;
+ char name[32];
+- wait_queue_head_t waitq;
+ struct list_head pools;
+ };
+
+@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
+ unsigned int offset;
+ };
+
+-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
+-
+ static DEFINE_MUTEX(pools_lock);
+
+ static ssize_t
+@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+ retval->size = size;
+ retval->boundary = boundary;
+ retval->allocation = allocation;
+- init_waitqueue_head(&retval->waitq);
+
+ if (dev) {
+ int ret;
+@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
+ memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+ #endif
+ pool_initialise_page(pool, page);
+- list_add(&page->page_list, &pool->page_list);
+ page->in_use = 0;
+ page->offset = 0;
+ } else {
+@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ might_sleep_if(mem_flags & __GFP_WAIT);
+
+ spin_lock_irqsave(&pool->lock, flags);
+- restart:
+ list_for_each_entry(page, &pool->page_list, page_list) {
+ if (page->offset < pool->allocation)
+ goto ready;
+ }
+- page = pool_alloc_page(pool, GFP_ATOMIC);
+- if (!page) {
+- if (mem_flags & __GFP_WAIT) {
+- DECLARE_WAITQUEUE(wait, current);
+
+- __set_current_state(TASK_UNINTERRUPTIBLE);
+- __add_wait_queue(&pool->waitq, &wait);
+- spin_unlock_irqrestore(&pool->lock, flags);
++ /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
++ spin_unlock_irqrestore(&pool->lock, flags);
+
+- schedule_timeout(POOL_TIMEOUT_JIFFIES);
++ page = pool_alloc_page(pool, mem_flags);
++ if (!page)
++ return NULL;
+
+- spin_lock_irqsave(&pool->lock, flags);
+- __remove_wait_queue(&pool->waitq, &wait);
+- goto restart;
+- }
+- retval = NULL;
+- goto done;
+- }
++ spin_lock_irqsave(&pool->lock, flags);
+
++ list_add(&page->page_list, &pool->page_list);
+ ready:
+ page->in_use++;
+ offset = page->offset;
+@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ #ifdef DMAPOOL_DEBUG
+ memset(retval, POOL_POISON_ALLOCATED, pool->size);
+ #endif
+- done:
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return retval;
+ }
+@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
+ page->in_use--;
+ *(int *)vaddr = page->offset;
+ page->offset = offset;
+- if (waitqueue_active(&pool->waitq))
+- wake_up_locked(&pool->waitq);
+ /*
+ * Resist a temptation to do
+ * if (!is_page_busy(page)) pool_free_page(pool, page);
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 57d82c6..2a07f97 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -94,6 +94,19 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+ #endif
+
++struct page *kmap_to_page(void *vaddr)
++{
++ unsigned long addr = (unsigned long)vaddr;
++
++ if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
++ int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
++ return pte_page(pkmap_page_table[i]);
++ }
++
++ return virt_to_page(addr);
++}
++EXPORT_SYMBOL(kmap_to_page);
++
+ static void flush_all_zero_pkmaps(void)
+ {
+ int i;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 8f005e9..470cbb4 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -921,6 +921,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ count_vm_event(THP_FAULT_FALLBACK);
+ ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+ pmd, orig_pmd, page, haddr);
++ if (ret & VM_FAULT_OOM)
++ split_huge_page(page);
+ put_page(page);
+ goto out;
+ }
+@@ -928,6 +930,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+
+ if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+ put_page(new_page);
++ split_huge_page(page);
+ put_page(page);
+ ret |= VM_FAULT_OOM;
+ goto out;
+diff --git a/mm/memory.c b/mm/memory.c
+index 70f5daf..15e686a 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3469,6 +3469,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
++retry:
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3482,13 +3483,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ pmd, flags);
+ } else {
+ pmd_t orig_pmd = *pmd;
++ int ret;
++
+ barrier();
+ if (pmd_trans_huge(orig_pmd)) {
+ if (flags & FAULT_FLAG_WRITE &&
+ !pmd_write(orig_pmd) &&
+- !pmd_trans_splitting(orig_pmd))
+- return do_huge_pmd_wp_page(mm, vma, address,
+- pmd, orig_pmd);
++ !pmd_trans_splitting(orig_pmd)) {
++ ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
++ orig_pmd);
++ /*
++ * If COW results in an oom, the huge pmd will
++ * have been split, so retry the fault on the
++ * pte for a smaller charge.
++ */
++ if (unlikely(ret & VM_FAULT_OOM))
++ goto retry;
++ return ret;
++ }
+ return 0;
+ }
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4c82c21..c59d44b 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1999,28 +1999,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
+ return new;
+ }
+
+-/*
+- * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
+- * eliminate the * MPOL_F_* flags that require conditional ref and
+- * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
+- * after return. Use the returned value.
+- *
+- * Allows use of a mempolicy for, e.g., multiple allocations with a single
+- * policy lookup, even if the policy needs/has extra ref on lookup.
+- * shmem_readahead needs this.
+- */
+-struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+- struct mempolicy *frompol)
+-{
+- if (!mpol_needs_cond_ref(frompol))
+- return frompol;
+-
+- *tompol = *frompol;
+- tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
+- __mpol_put(frompol);
+- return tompol;
+-}
+-
+ /* Slow path of a mempolicy comparison */
+ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+ {
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 2d46e23..12b9e80 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -798,24 +798,28 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
+ {
+- struct mempolicy mpol, *spol;
+ struct vm_area_struct pvma;
+-
+- spol = mpol_cond_copy(&mpol,
+- mpol_shared_policy_lookup(&info->policy, index));
++ struct page *page;
+
+ /* Create a pseudo vma that just contains the policy */
+ pvma.vm_start = 0;
+ pvma.vm_pgoff = index;
+ pvma.vm_ops = NULL;
+- pvma.vm_policy = spol;
+- return swapin_readahead(swap, gfp, &pvma, 0);
++ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
++
++ page = swapin_readahead(swap, gfp, &pvma, 0);
++
++ /* Drop reference taken by mpol_shared_policy_lookup() */
++ mpol_cond_put(pvma.vm_policy);
++
++ return page;
+ }
+
+ static struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
+ {
+ struct vm_area_struct pvma;
++ struct page *page;
+
+ /* Create a pseudo vma that just contains the policy */
+ pvma.vm_start = 0;
+@@ -823,10 +827,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
+ pvma.vm_ops = NULL;
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+
+- /*
+- * alloc_page_vma() will drop the shared policy reference
+- */
+- return alloc_page_vma(gfp, &pvma, 0);
++ page = alloc_page_vma(gfp, &pvma, 0);
++
++ /* Drop reference taken by mpol_shared_policy_lookup() */
++ mpol_cond_put(pvma.vm_policy);
++
++ return page;
+ }
+ #else /* !CONFIG_NUMA */
+ #ifdef CONFIG_TMPFS
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 1e4ee1a..313381c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2492,19 +2492,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+ }
+ #endif
+
+-static bool zone_balanced(struct zone *zone, int order,
+- unsigned long balance_gap, int classzone_idx)
+-{
+- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+- balance_gap, classzone_idx, 0))
+- return false;
+-
+- if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+- return false;
+-
+- return true;
+-}
+-
+ /*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+@@ -2564,7 +2551,8 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+ continue;
+ }
+
+- if (!zone_balanced(zone, order, 0, i))
++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
++ i, 0))
+ all_zones_ok = false;
+ else
+ balanced += zone->present_pages;
+@@ -2667,7 +2655,8 @@ loop_again:
+ shrink_active_list(SWAP_CLUSTER_MAX, zone,
+ &sc, priority, 0);
+
+- if (!zone_balanced(zone, order, 0, 0)) {
++ if (!zone_watermark_ok_safe(zone, order,
++ high_wmark_pages(zone), 0, 0)) {
+ end_zone = i;
+ break;
+ } else {
+@@ -2728,8 +2717,9 @@ loop_again:
+ (zone->present_pages +
+ KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+- if (!zone_balanced(zone, order,
+- balance_gap, end_zone)) {
++ if (!zone_watermark_ok_safe(zone, order,
++ high_wmark_pages(zone) + balance_gap,
++ end_zone, 0)) {
+ shrink_zone(priority, zone, &sc);
+
+ reclaim_state->reclaimed_slab = 0;
+@@ -2756,7 +2746,8 @@ loop_again:
+ continue;
+ }
+
+- if (!zone_balanced(zone, order, 0, end_zone)) {
++ if (!zone_watermark_ok_safe(zone, order,
++ high_wmark_pages(zone), end_zone, 0)) {
+ all_zones_ok = 0;
+ /*
+ * We are still under min water mark. This
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 32aa983..55f0c09 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -37,6 +37,7 @@
+ #include <linux/inet.h>
+ #include <linux/idr.h>
+ #include <linux/file.h>
++#include <linux/highmem.h>
+ #include <linux/slab.h>
+ #include <net/9p/9p.h>
+ #include <linux/parser.h>
+@@ -323,7 +324,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+- pages[index++] = virt_to_page(data);
++ pages[index++] = kmap_to_page(data);
+ data += s;
+ nr_pages--;
+ }
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 7ee4ead..14c4864 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -486,7 +486,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
+ long timeo;
+ int err = 0;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+@@ -523,7 +523,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index fdaabf2..a4e7131 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -684,28 +684,27 @@ EXPORT_SYMBOL(ip_defrag);
+
+ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+ {
+- const struct iphdr *iph;
++ struct iphdr iph;
+ u32 len;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return skb;
+
+- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
+ return skb;
+
+- iph = ip_hdr(skb);
+- if (iph->ihl < 5 || iph->version != 4)
++ if (iph.ihl < 5 || iph.version != 4)
+ return skb;
+- if (!pskb_may_pull(skb, iph->ihl*4))
+- return skb;
+- iph = ip_hdr(skb);
+- len = ntohs(iph->tot_len);
+- if (skb->len < len || len < (iph->ihl * 4))
++
++ len = ntohs(iph.tot_len);
++ if (skb->len < len || len < (iph.ihl * 4))
+ return skb;
+
+- if (ip_is_fragment(ip_hdr(skb))) {
++ if (ip_is_fragment(&iph)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb) {
++ if (!pskb_may_pull(skb, iph.ihl*4))
++ return skb;
+ if (pskb_trim_rcsum(skb, len))
+ return skb;
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
+index 6c85564..0018b65 100644
+--- a/net/sctp/chunk.c
++++ b/net/sctp/chunk.c
+@@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ msg = sctp_datamsg_new(GFP_KERNEL);
+ if (!msg)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ /* Note: Calculate this outside of the loop, so that all fragments
+ * have the same expiration.
+@@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
+
+- if (!chunk)
++ if (!chunk) {
++ err = -ENOMEM;
+ goto errout;
++ }
++
+ err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
+ if (err < 0)
+- goto errout;
++ goto errout_chunk_free;
+
+ offset += len;
+
+@@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
+
+- if (!chunk)
++ if (!chunk) {
++ err = -ENOMEM;
+ goto errout;
++ }
+
+ err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
+
+@@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+ __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
+ - (__u8 *)chunk->skb->data);
+ if (err < 0)
+- goto errout;
++ goto errout_chunk_free;
+
+ sctp_datamsg_assign(msg, chunk);
+ list_add_tail(&chunk->frag_list, &msg->chunks);
+@@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ return msg;
+
++errout_chunk_free:
++ sctp_chunk_free(chunk);
++
+ errout:
+ list_for_each_safe(pos, temp, &msg->chunks) {
+ list_del_init(pos);
+@@ -339,7 +347,7 @@ errout:
+ sctp_chunk_free(chunk);
+ }
+ sctp_datamsg_put(msg);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ /* Check whether this message has expired. */
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 8e49d76..fa8333b 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1908,8 +1908,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
+
+ /* Break the message into multiple chunks of maximum size. */
+ datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
+- if (!datamsg) {
+- err = -ENOMEM;
++ if (IS_ERR(datamsg)) {
++ err = PTR_ERR(datamsg);
+ goto out_free;
+ }
+
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index 92e24bb..4450fbe 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -202,8 +202,8 @@ static void devcgroup_destroy(struct cgroup_subsys *ss,
+
+ dev_cgroup = cgroup_to_devcgroup(cgroup);
+ list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
+- list_del_rcu(&wh->list);
+- kfree_rcu(wh, rcu);
++ list_del(&wh->list);
++ kfree(wh);
+ }
+ kfree(dev_cgroup);
+ }
+@@ -278,7 +278,7 @@ static int may_access_whitelist(struct dev_cgroup *c,
+ {
+ struct dev_whitelist_item *whitem;
+
+- list_for_each_entry_rcu(whitem, &c->whitelist, list) {
++ list_for_each_entry(whitem, &c->whitelist, list) {
+ if (whitem->type & DEV_ALL)
+ return 1;
+ if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 53345bc..a1e312f 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -148,6 +148,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
+ "{Intel, PCH},"
+ "{Intel, CPT},"
+ "{Intel, PPT},"
++ "{Intel, LPT},"
+ "{Intel, PBG},"
+ "{Intel, SCH},"
+ "{ATI, SB450},"
+@@ -2973,6 +2974,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ { PCI_DEVICE(0x8086, 0x1e20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
+ AZX_DCAPS_BUFSIZE},
++ /* Lynx Point */
++ { PCI_DEVICE(0x8086, 0x8c20),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
++ AZX_DCAPS_BUFSIZE},
+ /* SCH */
+ { PCI_DEVICE(0x8086, 0x811b),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 58c287b..498b62e 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4505,6 +4505,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
+ };
+
+ static const struct snd_pci_quirk cxt5066_fixups[] = {
++ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index c119f33..bde2615 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -424,9 +424,11 @@ static void hdmi_init_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+- /* Disable pin out until stream is active*/
++ /* Enable pin out: some machines with GM965 gets broken output when
++ * the pin is disabled or changed while using with HDMI
++ */
+ snd_hda_codec_write(codec, pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ }
+
+ static int hdmi_get_channel_count(struct hda_codec *codec, hda_nid_t cvt_nid)
+@@ -1141,17 +1143,11 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx = hinfo_to_pin_index(spec, hinfo);
+ hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
+- int pinctl;
+
+ hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+
+ hdmi_setup_audio_infoframe(codec, pin_idx, substream);
+
+- pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
+-
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ }
+
+@@ -1163,7 +1159,6 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ int cvt_idx, pin_idx;
+ struct hdmi_spec_per_cvt *per_cvt;
+ struct hdmi_spec_per_pin *per_pin;
+- int pinctl;
+
+ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
+
+@@ -1182,11 +1177,6 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ return -EINVAL;
+ per_pin = &spec->pins[pin_idx];
+
+- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- pinctl & ~PIN_OUT);
+ snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ }
+
+@@ -1911,6 +1901,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862804, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862805, .name = "CougarPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
++{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
+ {} /* terminator */
+ };
+@@ -1957,6 +1948,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862803");
+ MODULE_ALIAS("snd-hda-codec-id:80862804");
+ MODULE_ALIAS("snd-hda-codec-id:80862805");
+ MODULE_ALIAS("snd-hda-codec-id:80862806");
++MODULE_ALIAS("snd-hda-codec-id:80862880");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+
+ MODULE_LICENSE("GPL");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1a09fbf..f3e0b24 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3854,6 +3854,7 @@ static void alc_auto_init_std(struct hda_codec *codec)
+ ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir))
+
+ static const struct snd_pci_quirk beep_white_list[] = {
++ SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
+ SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
+@@ -5468,8 +5469,8 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PRE_PROBE) {
+- snd_hda_override_pin_caps(codec, 0x18, 0x00001714);
+- snd_hda_override_pin_caps(codec, 0x19, 0x0000171c);
++ snd_hda_override_pin_caps(codec, 0x18, 0x00000734);
++ snd_hda_override_pin_caps(codec, 0x19, 0x0000073c);
+ }
+ }
+
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 2b973f5..467a73b 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1716,7 +1716,7 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658,
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659,
+- "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
++ "HP Pavilion dv7", STAC_HP_DV7_4000),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A,
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B,
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index eeefbce..34b9bb7 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -116,6 +116,7 @@ struct snd_usb_midi {
+ struct list_head list;
+ struct timer_list error_timer;
+ spinlock_t disc_lock;
++ struct rw_semaphore disc_rwsem;
+ struct mutex mutex;
+ u32 usb_id;
+ int next_midi_device;
+@@ -125,8 +126,10 @@ struct snd_usb_midi {
+ struct snd_usb_midi_in_endpoint *in;
+ } endpoints[MIDI_MAX_ENDPOINTS];
+ unsigned long input_triggered;
+- unsigned int opened;
++ bool autopm_reference;
++ unsigned int opened[2];
+ unsigned char disconnected;
++ unsigned char input_running;
+
+ struct snd_kcontrol *roland_load_ctl;
+ };
+@@ -148,7 +151,6 @@ struct snd_usb_midi_out_endpoint {
+ struct snd_usb_midi_out_endpoint* ep;
+ struct snd_rawmidi_substream *substream;
+ int active;
+- bool autopm_reference;
+ uint8_t cable; /* cable number << 4 */
+ uint8_t state;
+ #define STATE_UNKNOWN 0
+@@ -1033,29 +1035,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi)
+ snd_usbmidi_input_start(&umidi->list);
+ }
+
+-static void substream_open(struct snd_rawmidi_substream *substream, int open)
++static int substream_open(struct snd_rawmidi_substream *substream, int dir,
++ int open)
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct snd_kcontrol *ctl;
++ int err;
++
++ down_read(&umidi->disc_rwsem);
++ if (umidi->disconnected) {
++ up_read(&umidi->disc_rwsem);
++ return open ? -ENODEV : 0;
++ }
+
+ mutex_lock(&umidi->mutex);
+ if (open) {
+- if (umidi->opened++ == 0 && umidi->roland_load_ctl) {
+- ctl = umidi->roland_load_ctl;
+- ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+- snd_ctl_notify(umidi->card,
++ if (!umidi->opened[0] && !umidi->opened[1]) {
++ err = usb_autopm_get_interface(umidi->iface);
++ umidi->autopm_reference = err >= 0;
++ if (err < 0 && err != -EACCES) {
++ mutex_unlock(&umidi->mutex);
++ up_read(&umidi->disc_rwsem);
++ return -EIO;
++ }
++ if (umidi->roland_load_ctl) {
++ ctl = umidi->roland_load_ctl;
++ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+- update_roland_altsetting(umidi);
++ update_roland_altsetting(umidi);
++ }
+ }
++ umidi->opened[dir]++;
++ if (umidi->opened[1])
++ snd_usbmidi_input_start(&umidi->list);
+ } else {
+- if (--umidi->opened == 0 && umidi->roland_load_ctl) {
+- ctl = umidi->roland_load_ctl;
+- ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+- snd_ctl_notify(umidi->card,
++ umidi->opened[dir]--;
++ if (!umidi->opened[1])
++ snd_usbmidi_input_stop(&umidi->list);
++ if (!umidi->opened[0] && !umidi->opened[1]) {
++ if (umidi->roland_load_ctl) {
++ ctl = umidi->roland_load_ctl;
++ ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
++ }
++ if (umidi->autopm_reference)
++ usb_autopm_put_interface(umidi->iface);
+ }
+ }
+ mutex_unlock(&umidi->mutex);
++ up_read(&umidi->disc_rwsem);
++ return 0;
+ }
+
+ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+@@ -1063,7 +1094,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct usbmidi_out_port* port = NULL;
+ int i, j;
+- int err;
+
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+ if (umidi->endpoints[i].out)
+@@ -1076,25 +1106,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ snd_BUG();
+ return -ENXIO;
+ }
+- err = usb_autopm_get_interface(umidi->iface);
+- port->autopm_reference = err >= 0;
+- if (err < 0 && err != -EACCES)
+- return -EIO;
++
+ substream->runtime->private_data = port;
+ port->state = STATE_UNKNOWN;
+- substream_open(substream, 1);
+- return 0;
++ return substream_open(substream, 0, 1);
+ }
+
+ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+- struct snd_usb_midi* umidi = substream->rmidi->private_data;
+- struct usbmidi_out_port *port = substream->runtime->private_data;
+-
+- substream_open(substream, 0);
+- if (port->autopm_reference)
+- usb_autopm_put_interface(umidi->iface);
+- return 0;
++ return substream_open(substream, 0, 0);
+ }
+
+ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+@@ -1147,14 +1167,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
+
+ static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream)
+ {
+- substream_open(substream, 1);
+- return 0;
++ return substream_open(substream, 1, 1);
+ }
+
+ static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream)
+ {
+- substream_open(substream, 0);
+- return 0;
++ return substream_open(substream, 1, 0);
+ }
+
+ static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
+@@ -1403,9 +1421,12 @@ void snd_usbmidi_disconnect(struct list_head* p)
+ * a timer may submit an URB. To reliably break the cycle
+ * a flag under lock must be used
+ */
++ down_write(&umidi->disc_rwsem);
+ spin_lock_irq(&umidi->disc_lock);
+ umidi->disconnected = 1;
+ spin_unlock_irq(&umidi->disc_lock);
++ up_write(&umidi->disc_rwsem);
++
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
+ if (ep->out)
+@@ -2060,12 +2081,15 @@ void snd_usbmidi_input_stop(struct list_head* p)
+ unsigned int i, j;
+
+ umidi = list_entry(p, struct snd_usb_midi, list);
++ if (!umidi->input_running)
++ return;
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
+ if (ep->in)
+ for (j = 0; j < INPUT_URBS; ++j)
+ usb_kill_urb(ep->in->urbs[j]);
+ }
++ umidi->input_running = 0;
+ }
+
+ static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep)
+@@ -2090,8 +2114,11 @@ void snd_usbmidi_input_start(struct list_head* p)
+ int i;
+
+ umidi = list_entry(p, struct snd_usb_midi, list);
++ if (umidi->input_running || !umidi->opened[1])
++ return;
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+ snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
++ umidi->input_running = 1;
+ }
+
+ /*
+@@ -2117,6 +2144,7 @@ int snd_usbmidi_create(struct snd_card *card,
+ umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
+ init_timer(&umidi->error_timer);
+ spin_lock_init(&umidi->disc_lock);
++ init_rwsem(&umidi->disc_rwsem);
+ mutex_init(&umidi->mutex);
+ umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
+ le16_to_cpu(umidi->dev->descriptor.idProduct));
+@@ -2229,9 +2257,6 @@ int snd_usbmidi_create(struct snd_card *card,
+ }
+
+ list_add_tail(&umidi->list, midi_list);
+-
+- for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+- snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
+ return 0;
+ }
+
diff --git a/3.2.54/1036_linux-3.2.37.patch b/3.2.54/1036_linux-3.2.37.patch
new file mode 100644
index 0000000..ad13251
--- /dev/null
+++ b/3.2.54/1036_linux-3.2.37.patch
@@ -0,0 +1,1689 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index a4399f5..3b979c6 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -524,6 +524,11 @@ tcp_thin_dupack - BOOLEAN
+ Documentation/networking/tcp-thin.txt
+ Default: 0
+
++tcp_challenge_ack_limit - INTEGER
++ Limits number of Challenge ACK sent per second, as recommended
++ in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
++ Default: 100
++
+ UDP variables:
+
+ udp_mem - vector of 3 INTEGERs: min, pressure, max
+diff --git a/Makefile b/Makefile
+index 2052c29..21c77e2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index ec8affe..e74f86e 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -859,13 +859,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+
+ void update_vsyscall_tz(void)
+ {
+- /* Make userspace gettimeofday spin until we're done. */
+- ++vdso_data->tb_update_count;
+- smp_mb();
+ vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+- smp_mb();
+- ++vdso_data->tb_update_count;
+ }
+
+ static void __init clocksource_init(void)
+diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
+index e8dd5c5..d10c123 100644
+--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
++++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
+@@ -55,7 +55,8 @@ static const char *board[] __initdata = {
+ "amcc,haleakala",
+ "amcc,kilauea",
+ "amcc,makalu",
+- "est,hotfoot"
++ "est,hotfoot",
++ NULL
+ };
+
+ static int __init ppc40x_probe(void)
+diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
+index 1770610..f368cef 100644
+--- a/arch/sparc/include/asm/hugetlb.h
++++ b/arch/sparc/include/asm/hugetlb.h
+@@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
+ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+ {
+- ptep_set_wrprotect(mm, addr, ptep);
++ pte_t old_pte = *ptep;
++ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ }
+
+ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+ {
+- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
++ int changed = !pte_same(*ptep, pte);
++ if (changed) {
++ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
++ flush_tlb_page(vma, addr);
++ }
++ return changed;
+ }
+
+ static inline pte_t huge_ptep_get(pte_t *ptep)
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 8ab80ba..792b66f 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -789,8 +789,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
+ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+ {
+ struct acpi_device_id button_device_ids[] = {
+- {"PNP0C0D", 0},
+ {"PNP0C0C", 0},
++ {"PNP0C0D", 0},
+ {"PNP0C0E", 0},
+ {"", 0},
+ };
+@@ -802,6 +802,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+ /* Power button, Lid switch always enable wakeup */
+ if (!acpi_match_device_ids(device, button_device_ids)) {
+ device->wakeup.flags.run_wake = 1;
++ if (!acpi_match_device_ids(device, &button_device_ids[1])) {
++ /* Do not use Lid/sleep button for S5 wakeup */
++ if (device->wakeup.sleep_state == ACPI_STATE_S5)
++ device->wakeup.sleep_state = ACPI_STATE_S4;
++ }
+ device_set_wakeup_capable(&device->dev, true);
+ return;
+ }
+@@ -1152,7 +1157,7 @@ static void acpi_device_set_id(struct acpi_device *device)
+ acpi_add_id(device, ACPI_DOCK_HID);
+ else if (!acpi_ibm_smbus_match(device))
+ acpi_add_id(device, ACPI_SMBUS_IBM_HID);
+- else if (!acpi_device_hid(device) &&
++ else if (list_empty(&device->pnp.ids) &&
+ ACPI_IS_ROOT_DEVICE(device->parent)) {
+ acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
+index db195ab..e49ddd0 100644
+--- a/drivers/block/aoe/aoe.h
++++ b/drivers/block/aoe/aoe.h
+@@ -1,5 +1,5 @@
+ /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
+-#define VERSION "47"
++#define VERSION "47q"
+ #define AOE_MAJOR 152
+ #define DEVICE_NAME "aoe"
+
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index 321de7b..7eca463 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -276,8 +276,6 @@ aoeblk_gdalloc(void *vp)
+ goto err_mempool;
+ blk_queue_make_request(d->blkq, aoeblk_make_request);
+ d->blkq->backing_dev_info.name = "aoe";
+- if (bdi_init(&d->blkq->backing_dev_info))
+- goto err_blkq;
+ spin_lock_irqsave(&d->lock, flags);
+ gd->major = AOE_MAJOR;
+ gd->first_minor = d->sysminor * AOE_PARTITIONS;
+@@ -298,9 +296,6 @@ aoeblk_gdalloc(void *vp)
+ aoedisk_add_sysfs(d);
+ return;
+
+-err_blkq:
+- blk_cleanup_queue(d->blkq);
+- d->blkq = NULL;
+ err_mempool:
+ mempool_destroy(d->bufpool);
+ err_disk:
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 791df46..012a9d2 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1305,6 +1305,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
+ #endif /* CONFIG_ACPI */
+
+ /* modesetting */
++extern void i915_redisable_vga(struct drm_device *dev);
+ extern void intel_modeset_init(struct drm_device *dev);
+ extern void intel_modeset_gem_init(struct drm_device *dev);
+ extern void intel_modeset_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 54acad3..fa9639b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8898,6 +8898,23 @@ static void i915_disable_vga(struct drm_device *dev)
+ POSTING_READ(vga_reg);
+ }
+
++void i915_redisable_vga(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 vga_reg;
++
++ if (HAS_PCH_SPLIT(dev))
++ vga_reg = CPU_VGACNTRL;
++ else
++ vga_reg = VGACNTRL;
++
++ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
++ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
++ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
++ POSTING_READ(vga_reg);
++ }
++}
++
+ void intel_modeset_init(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index cf5ea3d..c6d0966 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -535,6 +535,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
++ i915_redisable_vga(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return NOTIFY_OK;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index d5af089..2bb29c9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -940,7 +940,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+- mem->bus.is_iomem = true;
++ mem->bus.is_iomem = !dev->agp->cant_use_aperture;
+ }
+ #endif
+ break;
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 81fc100..1b98338 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1536,6 +1536,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ of_machine_is_compatible("PowerBook6,7")) {
+ /* ibook */
+ rdev->mode_info.connector_table = CT_IBOOK;
++ } else if (of_machine_is_compatible("PowerMac3,5")) {
++ /* PowerMac G4 Silver radeon 7500 */
++ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+ } else if (of_machine_is_compatible("PowerMac4,4")) {
+ /* emac */
+ rdev->mode_info.connector_table = CT_EMAC;
+@@ -1561,6 +1564,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ (rdev->pdev->subsystem_device == 0x4150)) {
+ /* Mac G5 tower 9600 */
+ rdev->mode_info.connector_table = CT_MAC_G5_9600;
++ } else if ((rdev->pdev->device == 0x4c66) &&
++ (rdev->pdev->subsystem_vendor == 0x1002) &&
++ (rdev->pdev->subsystem_device == 0x4c66)) {
++ /* SAM440ep RV250 embedded board */
++ rdev->mode_info.connector_table = CT_SAM440EP;
+ } else
+ #endif /* CONFIG_PPC_PMAC */
+ #ifdef CONFIG_PPC64
+@@ -2134,6 +2142,115 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
++ case CT_SAM440EP:
++ DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
++ rdev->mode_info.connector_table);
++ /* LVDS */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_LCD1_SUPPORT,
++ 0),
++ ATOM_DEVICE_LCD1_SUPPORT);
++ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
++ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_LVDS,
++ &hpd);
++ /* DVI-I - secondary dac, int tmds */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
++ hpd.hpd = RADEON_HPD_1; /* ??? */
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_DFP1_SUPPORT,
++ 0),
++ ATOM_DEVICE_DFP1_SUPPORT);
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT2_SUPPORT,
++ 2),
++ ATOM_DEVICE_CRT2_SUPPORT);
++ radeon_add_legacy_connector(dev, 1,
++ ATOM_DEVICE_DFP1_SUPPORT |
++ ATOM_DEVICE_CRT2_SUPPORT,
++ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++ &hpd);
++ /* VGA - primary dac */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT1_SUPPORT,
++ 1),
++ ATOM_DEVICE_CRT1_SUPPORT);
++ radeon_add_legacy_connector(dev, 2,
++ ATOM_DEVICE_CRT1_SUPPORT,
++ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_VGA,
++ &hpd);
++ /* TV - TV DAC */
++ ddc_i2c.valid = false;
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_TV1_SUPPORT,
++ 2),
++ ATOM_DEVICE_TV1_SUPPORT);
++ radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
++ DRM_MODE_CONNECTOR_SVIDEO,
++ &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SVIDEO,
++ &hpd);
++ break;
++ case CT_MAC_G4_SILVER:
++ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
++ rdev->mode_info.connector_table);
++ /* DVI-I - tv dac, int tmds */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
++ hpd.hpd = RADEON_HPD_1; /* ??? */
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_DFP1_SUPPORT,
++ 0),
++ ATOM_DEVICE_DFP1_SUPPORT);
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT2_SUPPORT,
++ 2),
++ ATOM_DEVICE_CRT2_SUPPORT);
++ radeon_add_legacy_connector(dev, 0,
++ ATOM_DEVICE_DFP1_SUPPORT |
++ ATOM_DEVICE_CRT2_SUPPORT,
++ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++ &hpd);
++ /* VGA - primary dac */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT1_SUPPORT,
++ 1),
++ ATOM_DEVICE_CRT1_SUPPORT);
++ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
++ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_VGA,
++ &hpd);
++ /* TV - TV DAC */
++ ddc_i2c.valid = false;
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_TV1_SUPPORT,
++ 2),
++ ATOM_DEVICE_TV1_SUPPORT);
++ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
++ DRM_MODE_CONNECTOR_SVIDEO,
++ &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SVIDEO,
++ &hpd);
++ break;
+ default:
+ DRM_INFO("Connector table: %d (invalid)\n",
+ rdev->mode_info.connector_table);
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 87d494d..6fd53b6 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -689,7 +689,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
+ ret = connector_status_disconnected;
+
+ if (radeon_connector->ddc_bus)
+- dret = radeon_ddc_probe(radeon_connector);
++ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+@@ -871,7 +871,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ bool dret = false;
+
+ if (radeon_connector->ddc_bus)
+- dret = radeon_ddc_probe(radeon_connector);
++ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+@@ -1299,7 +1299,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (encoder) {
+ /* setup ddc on the bridge */
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
++ /* bridge chips are always aux */
++ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+ ret = connector_status_connected;
+ else if (radeon_connector->dac_load_detect) { /* try load detection */
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -1317,7 +1318,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+- if (radeon_ddc_probe(radeon_connector))
++ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
++ if (radeon_ddc_probe(radeon_connector, false))
+ ret = connector_status_connected;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index a22d6e6..aec8e0c 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -701,10 +701,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+- ENCODER_OBJECT_ID_NONE)) {
++ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
++ ENCODER_OBJECT_ID_NONE) {
++ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++
++ if (dig->dp_i2c_bus)
++ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
++ &dig->dp_i2c_bus->adapter);
++ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
++ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index 1441b00..cf20351 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -34,7 +34,7 @@
+ * radeon_ddc_probe
+ *
+ */
+-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
++bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+ {
+ u8 out = 0x0;
+ u8 buf[8];
+@@ -58,7 +58,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
++ if (use_aux) {
++ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
++ } else {
++ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
++ }
++
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index 8254d5a..bb42df4 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -210,6 +210,8 @@ enum radeon_connector_table {
+ CT_RN50_POWER,
+ CT_MAC_X800,
+ CT_MAC_G5_9600,
++ CT_SAM440EP,
++ CT_MAC_G4_SILVER
+ };
+
+ enum radeon_dvo_chip {
+@@ -521,7 +523,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 val);
+ extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+ extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
++extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+ extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
+index 9e64d96..376d9d9 100644
+--- a/drivers/hwmon/lm73.c
++++ b/drivers/hwmon/lm73.c
+@@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ struct i2c_client *client = to_i2c_client(dev);
+ long temp;
+ short value;
++ s32 err;
+
+ int status = strict_strtol(buf, 10, &temp);
+ if (status < 0)
+@@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ /* Write value */
+ value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
+ (LM73_TEMP_MAX*4)) << 5;
+- i2c_smbus_write_word_swapped(client, attr->index, value);
+- return count;
++ err = i2c_smbus_write_word_swapped(client, attr->index, value);
++ return (err < 0) ? err : count;
+ }
+
+ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+@@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
++ int temp;
++
++ s32 err = i2c_smbus_read_word_swapped(client, attr->index);
++ if (err < 0)
++ return err;
++
+ /* use integer division instead of equivalent right shift to
+ guarantee arithmetic shift and preserve the sign */
+- int temp = ((s16) (i2c_smbus_read_word_swapped(client,
+- attr->index))*250) / 32;
+- return sprintf(buf, "%d\n", temp);
++ temp = (((s16) err) * 250) / 32;
++ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
+ }
+
+
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index 568b4f1..3ade373 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -524,6 +524,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+ int nes_destroy_cqp(struct nes_device *);
+ int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+ void nes_recheck_link_status(struct work_struct *work);
++void nes_terminate_timeout(unsigned long context);
+
+ /* nes_nic.c */
+ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 7c0ff19..4cd1bf7 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
+ static void process_critical_error(struct nes_device *nesdev);
+ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
+ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+-static void nes_terminate_timeout(unsigned long context);
+ static void nes_terminate_start_timer(struct nes_qp *nesqp);
+
+ #ifdef CONFIG_INFINIBAND_NES_DEBUG
+@@ -3522,7 +3521,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
+ }
+
+ /* Timeout routine in case terminate fails to complete */
+-static void nes_terminate_timeout(unsigned long context)
++void nes_terminate_timeout(unsigned long context)
+ {
+ struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+@@ -3532,11 +3531,7 @@ static void nes_terminate_timeout(unsigned long context)
+ /* Set a timer in case hw cannot complete the terminate sequence */
+ static void nes_terminate_start_timer(struct nes_qp *nesqp)
+ {
+- init_timer(&nesqp->terminate_timer);
+- nesqp->terminate_timer.function = nes_terminate_timeout;
+- nesqp->terminate_timer.expires = jiffies + HZ;
+- nesqp->terminate_timer.data = (unsigned long)nesqp;
+- add_timer(&nesqp->terminate_timer);
++ mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 5095bc4..b0471b4 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ }
+
+ nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
++ init_timer(&nesqp->terminate_timer);
++ nesqp->terminate_timer.function = nes_terminate_timeout;
++ nesqp->terminate_timer.data = (unsigned long)nesqp;
+
+ /* update the QP table */
+ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
+@@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ return &nesqp->ibqp;
+ }
+
+-
+ /**
+ * nes_clean_cq
+ */
+@@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ return ibmr;
+ case IWNES_MEMREG_TYPE_QP:
+ case IWNES_MEMREG_TYPE_CQ:
++ if (!region->length) {
++ nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
++ ib_umem_release(region);
++ return ERR_PTR(-EINVAL);
++ }
+ nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
+ if (!nespbl) {
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+index e9d73e7..979d225 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+@@ -701,8 +701,8 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
+ 2);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+- ar9485Common_wo_xlna_rx_gain_1_1,
+- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
++ ar9485_common_rx_gain_1_1,
++ ARRAY_SIZE(ar9485_common_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 8a009bc..7ca84c3 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -47,6 +47,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
+ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
++ {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */
+ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
+ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
+ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
+@@ -82,6 +83,8 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
++ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
++ {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
+@@ -101,6 +104,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
+ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
+ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
++ /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */
+ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
+ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index fb19447..67cbe5a 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -4208,7 +4208,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+- IEEE80211_HW_AMPDU_AGGREGATION;
++ IEEE80211_HW_AMPDU_AGGREGATION |
++ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL;
+ /*
+ * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
+ * unless we are capable of sending the buffered frames out after the
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 78fda9c..cab24f7 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2747,7 +2747,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ if (PCI_FUNC(dev->devfn))
+ return;
+ /*
+- * RICOH 0xe823 SD/MMC card reader fails to recognize
++ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+@@ -2758,7 +2758,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+- if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
++ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
++ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+@@ -2785,6 +2786,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+ #endif /*CONFIG_MMC_RICOH_MMC*/
+diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
+index f93f412..f818dae 100644
+--- a/drivers/rtc/rtc-vt8500.c
++++ b/drivers/rtc/rtc-vt8500.c
+@@ -69,7 +69,7 @@
+ | ALARM_SEC_BIT)
+
+ #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */
+-#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */
++#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */
+ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */
+ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
+ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
+@@ -118,7 +118,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
+ tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
+ tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
+- tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
++ tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1;
+ tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
+ + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
+ tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
+@@ -137,8 +137,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ }
+
+ writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
+- | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
+- | (bin2bcd(tm->tm_mday)),
++ | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
++ | (bin2bcd(tm->tm_mday))
++ | ((tm->tm_year >= 200) << DATE_CENTURY_S),
+ vt8500_rtc->regbase + VT8500_RTC_DS);
+ writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
+ | (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
+@@ -248,7 +249,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
+ }
+
+ /* Enable RTC and set it to 24-hour mode */
+- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
++ writel(VT8500_RTC_CR_ENABLE,
+ vt8500_rtc->regbase + VT8500_RTC_CR);
+
+ vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index a4884a5..c6ad694 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -1635,7 +1635,7 @@ int mvs_abort_task(struct sas_task *task)
+ mv_dprintk("mvs_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+- mvs_tmf_timedout((unsigned long)task);
++ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
+index d837d63..03cb95a 100644
+--- a/drivers/video/mxsfb.c
++++ b/drivers/video/mxsfb.c
+@@ -366,7 +366,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
+ loop--;
+ }
+
+- writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
++ reg = readl(host->base + LCDC_VDCTRL4);
++ writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
+
+ clk_disable(host->clk);
+
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 99a27cf..4e5dfb7 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -485,6 +485,13 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+ mutex_unlock(&server->srv_mutex);
+ return rc;
+ }
++
++ /*
++ * The response to this call was already factored into the sequence
++ * number when the call went out, so we must adjust it back downward
++ * after signing here.
++ */
++ --server->sequence_number;
+ rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+ mutex_unlock(&server->srv_mutex);
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index a6f3763..451b9b8 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1197,10 +1197,30 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
+ * otherwise we might miss an event that happens between the
+ * f_op->poll() call and the new event set registering.
+ */
+- epi->event.events = event->events;
++ epi->event.events = event->events; /* need barrier below */
+ epi->event.data = event->data; /* protected by mtx */
+
+ /*
++ * The following barrier has two effects:
++ *
++ * 1) Flush epi changes above to other CPUs. This ensures
++ * we do not miss events from ep_poll_callback if an
++ * event occurs immediately after we call f_op->poll().
++ * We need this because we did not take ep->lock while
++ * changing epi above (but ep_poll_callback does take
++ * ep->lock).
++ *
++ * 2) We also need to ensure we do not miss _past_ events
++ * when calling f_op->poll(). This barrier also
++ * pairs with the barrier in wq_has_sleeper (see
++ * comments for wq_has_sleeper).
++ *
++ * This barrier will now guarantee ep_poll_callback or f_op->poll
++ * (or both) will notice the readiness of an item.
++ */
++ smp_mb();
++
++ /*
+ * Get current event bits. We can safely use the file* here because
+ * its usage count has been increased by the caller of this function.
+ */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 191580a..fbb92e6 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2093,13 +2093,14 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
+ * removes index from the index block.
+ */
+ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+- struct ext4_ext_path *path)
++ struct ext4_ext_path *path, int depth)
+ {
+ int err;
+ ext4_fsblk_t leaf;
+
+ /* free index block */
+- path--;
++ depth--;
++ path = path + depth;
+ leaf = ext4_idx_pblock(path->p_idx);
+ if (unlikely(path->p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+@@ -2124,6 +2125,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+
+ ext4_free_blocks(handle, inode, NULL, leaf, 1,
+ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
++
++ while (--depth >= 0) {
++ if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
++ break;
++ path--;
++ err = ext4_ext_get_access(handle, inode, path);
++ if (err)
++ break;
++ path->p_idx->ei_block = (path+1)->p_idx->ei_block;
++ err = ext4_ext_dirty(handle, inode, path);
++ if (err)
++ break;
++ }
+ return err;
+ }
+
+@@ -2454,7 +2468,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ /* if this leaf is free, then we should
+ * remove it from index block above */
+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
+- err = ext4_ext_rm_idx(handle, inode, path + depth);
++ err = ext4_ext_rm_idx(handle, inode, path, depth);
+
+ out:
+ return err;
+@@ -2587,7 +2601,7 @@ again:
+ /* index is empty, remove it;
+ * handle must be already prepared by the
+ * truncatei_leaf() */
+- err = ext4_ext_rm_idx(handle, inode, path + i);
++ err = ext4_ext_rm_idx(handle, inode, path, i);
+ }
+ /* root level has p_bh == NULL, brelse() eats this */
+ brelse(path[i].p_bh);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f8d5fce..24ac7a2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1942,6 +1942,16 @@ set_qf_format:
+ }
+ }
+ #endif
++ if (test_opt(sb, DIOREAD_NOLOCK)) {
++ int blocksize =
++ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
++
++ if (blocksize < PAGE_CACHE_SIZE) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "dioread_nolock if block size != PAGE_SIZE");
++ return 0;
++ }
++ }
+ return 1;
+ }
+
+@@ -3367,15 +3377,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ clear_opt(sb, DELALLOC);
+ }
+
+- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+- if (test_opt(sb, DIOREAD_NOLOCK)) {
+- if (blocksize < PAGE_SIZE) {
+- ext4_msg(sb, KERN_ERR, "can't mount with "
+- "dioread_nolock if block size != PAGE_SIZE");
+- goto failed_mount;
+- }
+- }
+-
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+@@ -3417,6 +3418,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
+ goto failed_mount;
+
++ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+@@ -4652,7 +4654,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ }
+
+ ext4_setup_system_zone(sb);
+- if (sbi->s_journal == NULL)
++ if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+ ext4_commit_super(sb, 1);
+
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 8267de5..d7dd774 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -179,7 +179,8 @@ repeat:
+ if (!new_transaction)
+ goto alloc_transaction;
+ write_lock(&journal->j_state_lock);
+- if (!journal->j_running_transaction) {
++ if (!journal->j_running_transaction &&
++ !journal->j_barrier_count) {
+ jbd2_get_transaction(journal, new_transaction);
+ new_transaction = NULL;
+ }
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 8150344..1943898 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1057,7 +1057,7 @@ static int nfs_get_option_str(substring_t args[], char **option)
+ {
+ kfree(*option);
+ *option = match_strdup(args);
+- return !option;
++ return !*option;
+ }
+
+ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 15df1a4..af37ce3 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -581,6 +581,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
+ int lastblock = 0;
++ bool isBeyondEOF;
+
+ prev_epos.offset = udf_file_entry_alloc_offset(inode);
+ prev_epos.block = iinfo->i_location;
+@@ -659,7 +660,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ /* Are we beyond EOF? */
+ if (etype == -1) {
+ int ret;
+-
++ isBeyondEOF = 1;
+ if (count) {
+ if (c)
+ laarr[0] = laarr[1];
+@@ -702,6 +703,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ endnum = c + 1;
+ lastblock = 1;
+ } else {
++ isBeyondEOF = 0;
+ endnum = startnum = ((count > 2) ? 2 : count);
+
+ /* if the current extent is in position 0,
+@@ -749,7 +751,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ *err = -ENOSPC;
+ return NULL;
+ }
+- iinfo->i_lenExtents += inode->i_sb->s_blocksize;
++ if (isBeyondEOF)
++ iinfo->i_lenExtents += inode->i_sb->s_blocksize;
+ }
+
+ /* if the extent the requsted block is located in contains multiple
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index e58fa77..34e0274 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -78,6 +78,14 @@ struct mmu_gather_batch {
+ #define MAX_GATHER_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+
++/*
++ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
++ * lockups for non-preemptible kernels on huge machines when a lot of memory
++ * is zapped during unmapping.
++ * 10K pages freed at once should be safe even without a preemption point.
++ */
++#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
++
+ /* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+@@ -94,6 +102,7 @@ struct mmu_gather {
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[MMU_GATHER_BUNDLE];
++ unsigned int batch_count;
+ };
+
+ #define HAVE_GENERIC_MMU_GATHER
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 59e4028..3fd17c2 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -50,6 +50,11 @@
+ # define inline inline __attribute__((always_inline))
+ # define __inline__ __inline__ __attribute__((always_inline))
+ # define __inline __inline __attribute__((always_inline))
++#else
++/* A lot of inline functions can cause havoc with function tracing */
++# define inline inline notrace
++# define __inline__ __inline__ notrace
++# define __inline __inline notrace
+ #endif
+
+ #define __deprecated __attribute__((deprecated))
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index e90a673..8d9b903 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -360,7 +360,7 @@ static inline void ClearPageCompound(struct page *page)
+ * pages on the LRU and/or pagecache.
+ */
+ TESTPAGEFLAG(Compound, compound)
+-__PAGEFLAG(Head, compound)
++__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
+
+ /*
+ * PG_reclaim is used in combination with PG_compound to mark the
+@@ -372,8 +372,14 @@ __PAGEFLAG(Head, compound)
+ * PG_compound & PG_reclaim => Tail page
+ * PG_compound & ~PG_reclaim => Head page
+ */
++#define PG_head_mask ((1L << PG_compound))
+ #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
+
++static inline int PageHead(struct page *page)
++{
++ return ((page->flags & PG_head_tail_mask) == PG_head_mask);
++}
++
+ static inline int PageTail(struct page *page)
+ {
+ return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 5776609..3db3da1 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1543,6 +1543,7 @@
+ #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+ #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+ #define PCI_DEVICE_ID_RICOH_R5C822 0x0822
++#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822
+ #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
+ #define PCI_DEVICE_ID_RICOH_R5C832 0x0832
+ #define PCI_DEVICE_ID_RICOH_R5C843 0x0843
+diff --git a/include/linux/snmp.h b/include/linux/snmp.h
+index e16557a..64f5ca7 100644
+--- a/include/linux/snmp.h
++++ b/include/linux/snmp.h
+@@ -209,7 +209,6 @@ enum
+ LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */
+ LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */
+ LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */
+- LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */
+ LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */
+ LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */
+ LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */
+@@ -233,6 +232,8 @@ enum
+ LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
+ LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
+ LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
++ LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */
++ LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */
+ __LINUX_MIB_MAX
+ };
+
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index e6db62e..ca2755f 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -317,6 +317,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent,
+ const unsigned long max_rto);
+
+ extern void inet_csk_destroy_sock(struct sock *sk);
++extern void inet_csk_prepare_forced_close(struct sock *sk);
+
+ /*
+ * LISTEN is a special case for poll..
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 72eddd1..1a6201a 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1128,6 +1128,10 @@ enum sta_notify_cmd {
+ * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session
+ * setup strictly in HW. mac80211 should not attempt to do this in
+ * software.
++ *
++ * @IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL: On this hardware TX BA session
++ * should be tear down once BAR frame will not be acked.
++ *
+ */
+ enum ieee80211_hw_flags {
+ IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
+@@ -1154,6 +1158,7 @@ enum ieee80211_hw_flags {
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
+ IEEE80211_HW_AP_LINK_PS = 1<<22,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
++ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL = 1<<26,
+ };
+
+ /**
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index bb18c4d..0768715 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -251,6 +251,7 @@ extern int sysctl_tcp_max_ssthresh;
+ extern int sysctl_tcp_cookie_size;
+ extern int sysctl_tcp_thin_linear_timeouts;
+ extern int sysctl_tcp_thin_dupack;
++extern int sysctl_tcp_challenge_ack_limit;
+
+ extern atomic_long_t tcp_memory_allocated;
+ extern struct percpu_counter tcp_sockets_allocated;
+diff --git a/mm/memory.c b/mm/memory.c
+index 15e686a..4f2add1 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -205,10 +205,14 @@ static int tlb_next_batch(struct mmu_gather *tlb)
+ return 1;
+ }
+
++ if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
++ return 0;
++
+ batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+ if (!batch)
+ return 0;
+
++ tlb->batch_count++;
+ batch->next = NULL;
+ batch->nr = 0;
+ batch->max = MAX_GATHER_BATCH;
+@@ -235,6 +239,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+ tlb->local.nr = 0;
+ tlb->local.max = ARRAY_SIZE(tlb->__pages);
+ tlb->active = &tlb->local;
++ tlb->batch_count = 0;
+
+ #ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index c59d44b..4d1e637 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2334,8 +2334,7 @@ void numa_default_policy(void)
+ */
+
+ /*
+- * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
+- * Used only for mpol_parse_str() and mpol_to_str()
++ * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
+ */
+ #define MPOL_LOCAL MPOL_MAX
+ static const char * const policy_modes[] =
+@@ -2350,28 +2349,21 @@ static const char * const policy_modes[] =
+
+ #ifdef CONFIG_TMPFS
+ /**
+- * mpol_parse_str - parse string to mempolicy
++ * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
+ * @str: string containing mempolicy to parse
+ * @mpol: pointer to struct mempolicy pointer, returned on success.
+- * @no_context: flag whether to "contextualize" the mempolicy
++ * @unused: redundant argument, to be removed later.
+ *
+ * Format of input:
+ * <mode>[=<flags>][:<nodelist>]
+ *
+- * if @no_context is true, save the input nodemask in w.user_nodemask in
+- * the returned mempolicy. This will be used to "clone" the mempolicy in
+- * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
+- * mount option. Note that if 'static' or 'relative' mode flags were
+- * specified, the input nodemask will already have been saved. Saving
+- * it again is redundant, but safe.
+- *
+ * On success, returns 0, else 1
+ */
+-int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
++int mpol_parse_str(char *str, struct mempolicy **mpol, int unused)
+ {
+ struct mempolicy *new = NULL;
+ unsigned short mode;
+- unsigned short uninitialized_var(mode_flags);
++ unsigned short mode_flags;
+ nodemask_t nodes;
+ char *nodelist = strchr(str, ':');
+ char *flags = strchr(str, '=');
+@@ -2459,24 +2451,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+ if (IS_ERR(new))
+ goto out;
+
+- if (no_context) {
+- /* save for contextualization */
+- new->w.user_nodemask = nodes;
+- } else {
+- int ret;
+- NODEMASK_SCRATCH(scratch);
+- if (scratch) {
+- task_lock(current);
+- ret = mpol_set_nodemask(new, &nodes, scratch);
+- task_unlock(current);
+- } else
+- ret = -ENOMEM;
+- NODEMASK_SCRATCH_FREE(scratch);
+- if (ret) {
+- mpol_put(new);
+- goto out;
+- }
+- }
++ /*
++ * Save nodes for mpol_to_str() to show the tmpfs mount options
++ * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
++ */
++ if (mode != MPOL_PREFERRED)
++ new->v.nodes = nodes;
++ else if (nodelist)
++ new->v.preferred_node = first_node(nodes);
++ else
++ new->flags |= MPOL_F_LOCAL;
++
++ /*
++ * Save nodes for contextualization: this will be used to "clone"
++ * the mempolicy in a specific context [cpuset] at a later time.
++ */
++ new->w.user_nodemask = nodes;
++
+ err = 0;
+
+ out:
+@@ -2496,13 +2487,13 @@ out:
+ * @buffer: to contain formatted mempolicy string
+ * @maxlen: length of @buffer
+ * @pol: pointer to mempolicy to be formatted
+- * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
++ * @unused: redundant argument, to be removed later.
+ *
+ * Convert a mempolicy into a string.
+ * Returns the number of characters in buffer (if positive)
+ * or an error (negative)
+ */
+-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
++int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused)
+ {
+ char *p = buffer;
+ int l;
+@@ -2528,7 +2519,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ case MPOL_PREFERRED:
+ nodes_clear(nodes);
+ if (flags & MPOL_F_LOCAL)
+- mode = MPOL_LOCAL; /* pseudo-policy */
++ mode = MPOL_LOCAL;
+ else
+ node_set(pol->v.preferred_node, nodes);
+ break;
+@@ -2536,10 +2527,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+- if (no_context)
+- nodes = pol->w.user_nodemask;
+- else
+- nodes = pol->v.nodes;
++ nodes = pol->v.nodes;
+ break;
+
+ default:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 3f4e541..72416c8 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -434,8 +434,8 @@ exit:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ return NULL;
+ put_and_exit:
+- bh_unlock_sock(newsk);
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ dccp_done(newsk);
+ goto exit;
+ }
+
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 17ee85c..592b78c 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -609,7 +609,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
+
+ if (__inet_inherit_port(sk, newsk) < 0) {
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ dccp_done(newsk);
+ goto out;
+ }
+ __inet6_hash(newsk, NULL);
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index c14d88a..907ef2c 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -647,6 +647,22 @@ void inet_csk_destroy_sock(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_destroy_sock);
+
++/* This function allows to force a closure of a socket after the call to
++ * tcp/dccp_create_openreq_child().
++ */
++void inet_csk_prepare_forced_close(struct sock *sk)
++{
++ /* sk_clone_lock locked the socket and set refcnt to 2 */
++ bh_unlock_sock(sk);
++ sock_put(sk);
++
++ /* The below has to be done to allow calling inet_csk_destroy_sock */
++ sock_set_flag(sk, SOCK_DEAD);
++ percpu_counter_inc(sk->sk_prot->orphan_count);
++ inet_sk(sk)->inet_num = 0;
++}
++EXPORT_SYMBOL(inet_csk_prepare_forced_close);
++
+ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
+index 466ea8b..f7fdbe9 100644
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -233,7 +233,6 @@ static const struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
+ SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
+ SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
+- SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
+ SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
+ SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
+ SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
+@@ -257,6 +256,8 @@ static const struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
+ SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
+ SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
++ SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
++ SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
+ SNMP_MIB_SENTINEL
+ };
+
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 69fd720..5485077 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -552,6 +552,13 @@ static struct ctl_table ipv4_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
++ {
++ .procname = "tcp_challenge_ack_limit",
++ .data = &sysctl_tcp_challenge_ack_limit,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
+ #ifdef CONFIG_NET_DMA
+ {
+ .procname = "tcp_dma_copybreak",
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a08a621..aab8f08 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -86,6 +86,9 @@ int sysctl_tcp_app_win __read_mostly = 31;
+ int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
++/* rfc5961 challenge ack rate limiting */
++int sysctl_tcp_challenge_ack_limit = 100;
++
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
+@@ -3700,6 +3703,24 @@ static int tcp_process_frto(struct sock *sk, int flag)
+ return 0;
+ }
+
++/* RFC 5961 7 [ACK Throttling] */
++static void tcp_send_challenge_ack(struct sock *sk)
++{
++ /* unprotected vars, we dont care of overwrites */
++ static u32 challenge_timestamp;
++ static unsigned int challenge_count;
++ u32 now = jiffies / HZ;
++
++ if (now != challenge_timestamp) {
++ challenge_timestamp = now;
++ challenge_count = 0;
++ }
++ if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
++ tcp_send_ack(sk);
++ }
++}
++
+ /* This routine deals with incoming acks, but not outgoing ones. */
+ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ {
+@@ -3718,8 +3739,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ /* If the ack is older than previous acks
+ * then we can probably ignore it.
+ */
+- if (before(ack, prior_snd_una))
++ if (before(ack, prior_snd_una)) {
++ /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
++ if (before(ack, prior_snd_una - tp->max_window)) {
++ tcp_send_challenge_ack(sk);
++ return -1;
++ }
+ goto old_ack;
++ }
+
+ /* If the ack includes data we haven't sent yet, discard
+ * this segment (RFC793 Section 3.9).
+@@ -5243,8 +5270,8 @@ out:
+ /* Does PAWS and seqno based validation of an incoming segment, flags will
+ * play significant role here.
+ */
+-static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+- const struct tcphdr *th, int syn_inerr)
++static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
++ const struct tcphdr *th, int syn_inerr)
+ {
+ const u8 *hash_location;
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -5269,38 +5296,48 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ * an acknowledgment should be sent in reply (unless the RST
+ * bit is set, if so drop the segment and return)".
+ */
+- if (!th->rst)
++ if (!th->rst) {
++ if (th->syn)
++ goto syn_challenge;
+ tcp_send_dupack(sk, skb);
++ }
+ goto discard;
+ }
+
+ /* Step 2: check RST bit */
+ if (th->rst) {
+- tcp_reset(sk);
++ /* RFC 5961 3.2 :
++ * If sequence number exactly matches RCV.NXT, then
++ * RESET the connection
++ * else
++ * Send a challenge ACK
++ */
++ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
++ tcp_reset(sk);
++ else
++ tcp_send_challenge_ack(sk);
+ goto discard;
+ }
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ /* step 3: check security and precedence [ignored] */
+
+- /* step 4: Check for a SYN in window. */
+- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
++ /* step 4: Check for a SYN
++ * RFC 5691 4.2 : Send a challenge ack
++ */
++ if (th->syn) {
++syn_challenge:
+ if (syn_inerr)
+ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
+- tcp_reset(sk);
+- return -1;
++ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
++ tcp_send_challenge_ack(sk);
++ goto discard;
+ }
+
+- return 1;
++ return true;
+
+ discard:
+ __kfree_skb(skb);
+- return 0;
++ return false;
+ }
+
+ /*
+@@ -5330,7 +5367,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+- int res;
+
+ /*
+ * Header prediction.
+@@ -5510,14 +5546,18 @@ slow_path:
+ * Standard slow path.
+ */
+
+- res = tcp_validate_incoming(sk, skb, th, 1);
+- if (res <= 0)
+- return -res;
++ if (!tcp_validate_incoming(sk, skb, th, 1))
++ return 0;
+
+ step5:
+ if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+ goto discard;
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+ /* Process urgent data. */
+@@ -5822,7 +5862,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ int queued = 0;
+- int res;
+
+ tp->rx_opt.saw_tstamp = 0;
+
+@@ -5877,9 +5916,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ return 0;
+ }
+
+- res = tcp_validate_incoming(sk, skb, th, 0);
+- if (res <= 0)
+- return -res;
++ if (!tcp_validate_incoming(sk, skb, th, 0))
++ return 0;
+
+ /* step 5: check the ACK field */
+ if (th->ack) {
+@@ -5990,6 +6028,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ } else
+ goto discard;
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 58c09a0..a97c9ad 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1520,9 +1520,8 @@ exit:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ return NULL;
+ put_and_exit:
+- tcp_clear_xmit_timers(newsk);
+- bh_unlock_sock(newsk);
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
+ goto exit;
+ }
+ EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index ccab3c8..db10805 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1524,7 +1524,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ #endif
+
+ if (__inet_inherit_port(sk, newsk) < 0) {
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
+ goto out;
+ }
+ __inet6_hash(newsk, NULL);
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 16518f3..67df50e 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -429,7 +429,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+
+- ieee80211_set_bar_pending(sta, tid, ssn);
++ if (local->hw.flags &
++ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
++ ieee80211_stop_tx_ba_session(&sta->sta, tid);
++ else
++ ieee80211_set_bar_pending(sta, tid, ssn);
+ }
+ }
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 29b942c..f08b9166 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -876,7 +876,7 @@ ok:
+ q->now = psched_get_time();
+ start_at = jiffies;
+
+- next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
++ next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC;
+
+ for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
+ /* common case optimization - skip event handler quickly */
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index c90b832..56c3f85 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -880,16 +880,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
+ return task;
+ }
+
++/*
++ * rpc_free_task - release rpc task and perform cleanups
++ *
++ * Note that we free up the rpc_task _after_ rpc_release_calldata()
++ * in order to work around a workqueue dependency issue.
++ *
++ * Tejun Heo states:
++ * "Workqueue currently considers two work items to be the same if they're
++ * on the same address and won't execute them concurrently - ie. it
++ * makes a work item which is queued again while being executed wait
++ * for the previous execution to complete.
++ *
++ * If a work function frees the work item, and then waits for an event
++ * which should be performed by another work item and *that* work item
++ * recycles the freed work item, it can create a false dependency loop.
++ * There really is no reliable way to detect this short of verifying
++ * every memory free."
++ *
++ */
+ static void rpc_free_task(struct rpc_task *task)
+ {
+- const struct rpc_call_ops *tk_ops = task->tk_ops;
+- void *calldata = task->tk_calldata;
++ unsigned short tk_flags = task->tk_flags;
++
++ rpc_release_calldata(task->tk_ops, task->tk_calldata);
+
+- if (task->tk_flags & RPC_TASK_DYNAMIC) {
++ if (tk_flags & RPC_TASK_DYNAMIC) {
+ dprintk("RPC: %5u freeing task\n", task->tk_pid);
+ mempool_free(task, rpc_task_mempool);
+ }
+- rpc_release_calldata(tk_ops, calldata);
+ }
+
+ static void rpc_async_release(struct work_struct *work)
diff --git a/3.2.54/1037_linux-3.2.38.patch b/3.2.54/1037_linux-3.2.38.patch
new file mode 100644
index 0000000..a3c106f
--- /dev/null
+++ b/3.2.54/1037_linux-3.2.38.patch
@@ -0,0 +1,4587 @@
+diff --git a/Makefile b/Makefile
+index 21c77e2..c8c9d02 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 08c82fd..3606e85 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -221,6 +221,7 @@ __create_page_tables:
+ /*
+ * Then map boot params address in r2 or
+ * the first 1MB of ram if boot params address is not specified.
++ * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
+ */
+ mov r0, r2, lsr #SECTION_SHIFT
+ movs r0, r0, lsl #SECTION_SHIFT
+@@ -229,6 +230,8 @@ __create_page_tables:
+ add r3, r3, #PAGE_OFFSET
+ add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
+ orr r6, r7, r0
++ str r6, [r3], #1 << PMD_ORDER
++ add r6, r6, #1 << SECTION_SHIFT
+ str r6, [r3]
+
+ #ifdef CONFIG_DEBUG_LL
+diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+index ec0f0b0..18c083e 100644
+--- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
++++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+@@ -460,6 +460,9 @@
+ GPIO76_LCD_PCLK, \
+ GPIO77_LCD_BIAS
+
++/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
++#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
++#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
+
+ extern int keypad_set_wake(unsigned int on);
+ #endif /* __ASM_ARCH_MFP_PXA27X_H */
+diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
+index bc5a98e..a9447f9 100644
+--- a/arch/arm/mach-pxa/pxa27x.c
++++ b/arch/arm/mach-pxa/pxa27x.c
+@@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
+ EXPORT_SYMBOL(pxa27x_clear_otgph);
+
+ static unsigned long ac97_reset_config[] = {
+- GPIO113_GPIO,
++ GPIO113_AC97_nRESET_GPIO_HIGH,
+ GPIO113_AC97_nRESET,
+- GPIO95_GPIO,
++ GPIO95_AC97_nRESET_GPIO_HIGH,
+ GPIO95_AC97_nRESET,
+ };
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 1aa664a..e1dd92c 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -500,25 +500,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+ {
++ unsigned long pfn;
++ size_t left = size;
++
++ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
++ offset %= PAGE_SIZE;
++
+ /*
+ * A single sg entry may refer to multiple physically contiguous
+ * pages. But we still need to process highmem pages individually.
+ * If highmem is not configured then the bulk of this loop gets
+ * optimized out.
+ */
+- size_t left = size;
+ do {
+ size_t len = left;
+ void *vaddr;
+
++ page = pfn_to_page(pfn);
++
+ if (PageHighMem(page)) {
+- if (len + offset > PAGE_SIZE) {
+- if (offset >= PAGE_SIZE) {
+- page += offset / PAGE_SIZE;
+- offset %= PAGE_SIZE;
+- }
++ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+- }
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ vaddr += offset;
+@@ -535,7 +537,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ op(vaddr, len, dir);
+ }
+ offset = 0;
+- page++;
++ pfn++;
+ left -= len;
+ } while (left);
+ }
+diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
+index cc926c9..323ce1a 100644
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -22,7 +22,7 @@
+ @ IRQs disabled.
+ @
+ ENTRY(do_vfp)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ add r11, r4, #1 @ increment it
+ str r11, [r10, #TI_PREEMPT]
+@@ -35,7 +35,7 @@ ENTRY(do_vfp)
+ ENDPROC(do_vfp)
+
+ ENTRY(vfp_null_entry)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
+
+ __INIT
+ ENTRY(vfp_testing_entry)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
+index 3a0efaa..6ff903e 100644
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -167,7 +167,7 @@ vfp_hw_state_valid:
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+@@ -191,7 +191,7 @@ look_for_VFP_exceptions:
+ @ not recognised by VFP
+
+ DBGSTR "not VFP"
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
+index 141dce3..2a73d82 100644
+--- a/arch/powerpc/kvm/emulate.c
++++ b/arch/powerpc/kvm/emulate.c
+@@ -35,6 +35,7 @@
+ #define OP_TRAP_64 2
+
+ #define OP_31_XOP_LWZX 23
++#define OP_31_XOP_DCBF 86
+ #define OP_31_XOP_LBZX 87
+ #define OP_31_XOP_STWX 151
+ #define OP_31_XOP_STBX 215
+@@ -370,6 +371,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
+ break;
+
++ case OP_31_XOP_DCBF:
+ case OP_31_XOP_DCBI:
+ /* Do nothing. The guest is performing dcbi because
+ * hardware DMA is not snooped by the dcache, but
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index c447a27..945b7cd 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -137,4 +137,32 @@ static inline unsigned long long get_clock_monotonic(void)
+ return get_clock_xt() - sched_clock_base_cc;
+ }
+
++/**
++ * tod_to_ns - convert a TOD format value to nanoseconds
++ * @todval: to be converted TOD format value
++ * Returns: number of nanoseconds that correspond to the TOD format value
++ *
++ * Converting a 64 Bit TOD format value to nanoseconds means that the value
++ * must be divided by 4.096. In order to achieve that we multiply with 125
++ * and divide by 512:
++ *
++ * ns = (todval * 125) >> 9;
++ *
++ * In order to avoid an overflow with the multiplication we can rewrite this.
++ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
++ * we end up with
++ *
++ * ns = ((2^32 * th + tl) * 125 ) >> 9;
++ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
++ *
++ */
++static inline unsigned long long tod_to_ns(unsigned long long todval)
++{
++ unsigned long long ns;
++
++ ns = ((todval >> 32) << 23) * 125;
++ ns += ((todval & 0xffffffff) * 125) >> 9;
++ return ns;
++}
++
+ #endif
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index e03c555..8644366 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -64,7 +64,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
+ */
+ unsigned long long notrace __kprobes sched_clock(void)
+ {
+- return (get_clock_monotonic() * 125) >> 9;
++ return tod_to_ns(get_clock_monotonic());
+ }
+
+ /*
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 278ee00..5482d1e 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -391,7 +391,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
+- sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
++ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
+ hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+ VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
+diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
+index f38112b..978b7fd 100644
+--- a/arch/sh/include/asm/elf.h
++++ b/arch/sh/include/asm/elf.h
+@@ -202,9 +202,9 @@ extern void __kernel_vsyscall;
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+ else \
+- NEW_AUX_ENT(AT_IGNORE, 0);
++ NEW_AUX_ENT(AT_IGNORE, 0)
+ #else
+-#define VSYSCALL_AUX_ENT
++#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
+ #endif /* CONFIG_VSYSCALL */
+
+ #ifdef CONFIG_SH_FPU
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 7093e4a..035cd81 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
+ #endif /* CONFIG_X86_32 */
+
+ extern int add_efi_memmap;
++extern unsigned long x86_efi_facility;
+ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
+ extern void efi_memblock_x86_reserve_range(void);
+ extern void efi_call_phys_prelog(void);
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index 0012d09..88eae2a 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
+ asmlinkage void mce_threshold_interrupt(void);
+ #endif
+
++/* Interrupts/Exceptions */
++enum {
++ X86_TRAP_DE = 0, /* 0, Divide-by-zero */
++ X86_TRAP_DB, /* 1, Debug */
++ X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
++ X86_TRAP_BP, /* 3, Breakpoint */
++ X86_TRAP_OF, /* 4, Overflow */
++ X86_TRAP_BR, /* 5, Bound Range Exceeded */
++ X86_TRAP_UD, /* 6, Invalid Opcode */
++ X86_TRAP_NM, /* 7, Device Not Available */
++ X86_TRAP_DF, /* 8, Double Fault */
++ X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
++ X86_TRAP_TS, /* 10, Invalid TSS */
++ X86_TRAP_NP, /* 11, Segment Not Present */
++ X86_TRAP_SS, /* 12, Stack Segment Fault */
++ X86_TRAP_GP, /* 13, General Protection Fault */
++ X86_TRAP_PF, /* 14, Page Fault */
++ X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
++ X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
++ X86_TRAP_AC, /* 17, Alignment Check */
++ X86_TRAP_MC, /* 18, Machine Check */
++ X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
++ X86_TRAP_IRET = 32, /* 32, IRET Exception */
++};
++
+ #endif /* _ASM_X86_TRAPS_H */
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 4893d58..d2d488b8 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1074,7 +1074,6 @@ ENTRY(xen_failsafe_callback)
+ lea 16(%esp),%esp
+ CFI_ADJUST_CFA_OFFSET -16
+ jz 5f
+- addl $16,%esp
+ jmp iret_exc
+ 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
+index b3300e6..e328f69 100644
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -61,7 +61,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
+ outb(0, 0xF0);
+ if (ignore_fpu_irq || !boot_cpu_data.hard_math)
+ return IRQ_NONE;
+- math_error(get_irq_regs(), 0, 16);
++ math_error(get_irq_regs(), 0, X86_TRAP_MF);
+ return IRQ_HANDLED;
+ }
+
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index 12fcbe2..f7d1a64 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -175,6 +175,9 @@ static int msr_open(struct inode *inode, struct file *file)
+ unsigned int cpu;
+ struct cpuinfo_x86 *c;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ cpu = iminor(file->f_path.dentry->d_inode);
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -ENXIO; /* No such CPU */
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index e61f79c..47f4e5f 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -603,7 +603,7 @@ static void native_machine_emergency_restart(void)
+ break;
+
+ case BOOT_EFI:
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi.reset_system(reboot_mode ?
+ EFI_RESET_WARM :
+ EFI_RESET_COLD,
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 0d403aa..b506f41 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -631,6 +631,83 @@ static __init void reserve_ibft_region(void)
+
+ static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
+
++static bool __init snb_gfx_workaround_needed(void)
++{
++#ifdef CONFIG_PCI
++ int i;
++ u16 vendor, devid;
++ static const __initconst u16 snb_ids[] = {
++ 0x0102,
++ 0x0112,
++ 0x0122,
++ 0x0106,
++ 0x0116,
++ 0x0126,
++ 0x010a,
++ };
++
++ /* Assume no if something weird is going on with PCI */
++ if (!early_pci_allowed())
++ return false;
++
++ vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
++ if (vendor != 0x8086)
++ return false;
++
++ devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
++ for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
++ if (devid == snb_ids[i])
++ return true;
++#endif
++
++ return false;
++}
++
++/*
++ * Sandy Bridge graphics has trouble with certain ranges, exclude
++ * them from allocation.
++ */
++static void __init trim_snb_memory(void)
++{
++ static const __initconst unsigned long bad_pages[] = {
++ 0x20050000,
++ 0x20110000,
++ 0x20130000,
++ 0x20138000,
++ 0x40004000,
++ };
++ int i;
++
++ if (!snb_gfx_workaround_needed())
++ return;
++
++ printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
++
++ /*
++ * Reserve all memory below the 1 MB mark that has not
++ * already been reserved.
++ */
++ memblock_reserve(0, 1<<20);
++
++ for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
++ if (memblock_reserve(bad_pages[i], PAGE_SIZE))
++ printk(KERN_WARNING "failed to reserve 0x%08lx\n",
++ bad_pages[i]);
++ }
++}
++
++/*
++ * Here we put platform-specific memory range workarounds, i.e.
++ * memory known to be corrupt or otherwise in need to be reserved on
++ * specific platforms.
++ *
++ * If this gets used more widely it could use a real dispatch mechanism.
++ */
++static void __init trim_platform_memory_ranges(void)
++{
++ trim_snb_memory();
++}
++
+ static void __init trim_bios_range(void)
+ {
+ /*
+@@ -651,6 +728,7 @@ static void __init trim_bios_range(void)
+ * take them out.
+ */
+ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+
+@@ -750,15 +828,16 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ #ifdef CONFIG_EFI
+ if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+-#ifdef CONFIG_X86_32
+- "EL32",
+-#else
+- "EL64",
+-#endif
+- 4)) {
+- efi_enabled = 1;
+- efi_memblock_x86_reserve_range();
++ "EL32", 4)) {
++ set_bit(EFI_BOOT, &x86_efi_facility);
++ } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
++ "EL64", 4)) {
++ set_bit(EFI_BOOT, &x86_efi_facility);
++ set_bit(EFI_64BIT, &x86_efi_facility);
+ }
++
++ if (efi_enabled(EFI_BOOT))
++ efi_memblock_x86_reserve_range();
+ #endif
+
+ x86_init.oem.arch_setup();
+@@ -831,7 +910,7 @@ void __init setup_arch(char **cmdline_p)
+
+ finish_e820_parsing();
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_BOOT))
+ efi_init();
+
+ dmi_scan_machine();
+@@ -914,7 +993,7 @@ void __init setup_arch(char **cmdline_p)
+ * The EFI specification says that boot service code won't be called
+ * after ExitBootServices(). This is, in fact, a lie.
+ */
+- if (efi_enabled)
++ if (efi_enabled(EFI_MEMMAP))
+ efi_reserve_boot_services();
+
+ /* preallocate 4k for mptable mpc */
+@@ -929,6 +1008,8 @@ void __init setup_arch(char **cmdline_p)
+
+ setup_trampolines();
+
++ trim_platform_memory_ranges();
++
+ init_gbpages();
+
+ /* max_pfn_mapped is updated here */
+@@ -1048,7 +1129,7 @@ void __init setup_arch(char **cmdline_p)
+
+ #ifdef CONFIG_VT
+ #if defined(CONFIG_VGA_CONSOLE)
+- if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+ conswitchp = &vga_con;
+ #elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 31d9d0f..e6fbb94 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+ */
+- if (trapnr < 6)
++ if (trapnr < X86_TRAP_UD)
+ goto vm86_trap;
+ goto trap_signal;
+ }
+@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
+ do_trap(trapnr, signr, str, regs, error_code, &info); \
+ }
+
+-DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
+-DO_ERROR(4, SIGSEGV, "overflow", overflow)
+-DO_ERROR(5, SIGSEGV, "bounds", bounds)
+-DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
+-DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
+-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+-DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
++ regs->ip)
++DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
++DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
++ regs->ip)
++DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
++ coprocessor_segment_overrun)
++DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
+ #ifdef CONFIG_X86_32
+-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
+ #endif
+-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
++ BUS_ADRALN, 0)
+
+ #ifdef CONFIG_X86_64
+ /* Runs on IST stack */
+ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+ {
+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+- 12, SIGBUS) == NOTIFY_STOP)
++ X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
+ return;
+ preempt_conditional_sti(regs);
+- do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
++ do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
+ preempt_conditional_cli(regs);
+ }
+
+@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ struct task_struct *tsk = current;
+
+ /* Return not checked because double check cannot be ignored */
+- notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
+
+ tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 8;
++ tsk->thread.trap_no = X86_TRAP_DF;
+
+ /*
+ * This is always a kernel trap and never fixable (and thus must
+@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ goto gp_in_kernel;
+
+ tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 13;
++ tsk->thread.trap_no = X86_TRAP_GP;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+ printk_ratelimit()) {
+@@ -291,9 +295,9 @@ gp_in_kernel:
+ return;
+
+ tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 13;
+- if (notify_die(DIE_GPF, "general protection fault", regs,
+- error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ tsk->thread.trap_no = X86_TRAP_GP;
++ if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
++ X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die("general protection fault", regs, error_code);
+ }
+@@ -302,13 +306,14 @@ gp_in_kernel:
+ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ {
+ #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+- if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+- == NOTIFY_STOP)
++ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
++ SIGTRAP) == NOTIFY_STOP)
+ return;
+ #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+ #ifdef CONFIG_KPROBES
+- if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+- == NOTIFY_STOP)
++
++ if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
++ SIGTRAP) == NOTIFY_STOP)
+ return;
+ #else
+ if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
+@@ -317,7 +322,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ #endif
+
+ preempt_conditional_sti(regs);
+- do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
+ preempt_conditional_cli(regs);
+ }
+
+@@ -415,8 +420,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ preempt_conditional_sti(regs);
+
+ if (regs->flags & X86_VM_MASK) {
+- handle_vm86_trap((struct kernel_vm86_regs *) regs,
+- error_code, 1);
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
++ X86_TRAP_DB);
+ preempt_conditional_cli(regs);
+ return;
+ }
+@@ -451,7 +456,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ struct task_struct *task = current;
+ siginfo_t info;
+ unsigned short err;
+- char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
++ char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
++ "simd exception";
+
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
+ return;
+@@ -476,7 +482,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_addr = (void __user *)regs->ip;
+- if (trapnr == 16) {
++ if (trapnr == X86_TRAP_MF) {
+ unsigned short cwd, swd;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+@@ -520,10 +526,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ info.si_code = FPE_FLTRES;
+ } else {
+ /*
+- * If we're using IRQ 13, or supposedly even some trap 16
+- * implementations, it's possible we get a spurious trap...
++ * If we're using IRQ 13, or supposedly even some trap
++ * X86_TRAP_MF implementations, it's possible
++ * we get a spurious trap, which is not an error.
+ */
+- return; /* Spurious trap, no error */
++ return;
+ }
+ force_sig_info(SIGFPE, &info, task);
+ }
+@@ -534,13 +541,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
+ ignore_fpu_irq = 1;
+ #endif
+
+- math_error(regs, error_code, 16);
++ math_error(regs, error_code, X86_TRAP_MF);
+ }
+
+ dotraplinkage void
+ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
+ {
+- math_error(regs, error_code, 19);
++ math_error(regs, error_code, X86_TRAP_XF);
+ }
+
+ dotraplinkage void
+@@ -658,20 +665,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
+ info.si_errno = 0;
+ info.si_code = ILL_BADSTK;
+ info.si_addr = NULL;
+- if (notify_die(DIE_TRAP, "iret exception",
+- regs, error_code, 32, SIGILL) == NOTIFY_STOP)
++ if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
++ X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
+ return;
+- do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
++ do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
++ &info);
+ }
+ #endif
+
+ /* Set of traps needed for early debugging. */
+ void __init early_trap_init(void)
+ {
+- set_intr_gate_ist(1, &debug, DEBUG_STACK);
++ set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
+ /* int3 can be called from all */
+- set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
+- set_intr_gate(14, &page_fault);
++ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
++ set_intr_gate(X86_TRAP_PF, &page_fault);
+ load_idt(&idt_descr);
+ }
+
+@@ -687,30 +695,30 @@ void __init trap_init(void)
+ early_iounmap(p, 4);
+ #endif
+
+- set_intr_gate(0, &divide_error);
+- set_intr_gate_ist(2, &nmi, NMI_STACK);
++ set_intr_gate(X86_TRAP_DE, &divide_error);
++ set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
+ /* int4 can be called from all */
+- set_system_intr_gate(4, &overflow);
+- set_intr_gate(5, &bounds);
+- set_intr_gate(6, &invalid_op);
+- set_intr_gate(7, &device_not_available);
++ set_system_intr_gate(X86_TRAP_OF, &overflow);
++ set_intr_gate(X86_TRAP_BR, &bounds);
++ set_intr_gate(X86_TRAP_UD, &invalid_op);
++ set_intr_gate(X86_TRAP_NM, &device_not_available);
+ #ifdef CONFIG_X86_32
+- set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
++ set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
+ #else
+- set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
++ set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
+ #endif
+- set_intr_gate(9, &coprocessor_segment_overrun);
+- set_intr_gate(10, &invalid_TSS);
+- set_intr_gate(11, &segment_not_present);
+- set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
+- set_intr_gate(13, &general_protection);
+- set_intr_gate(15, &spurious_interrupt_bug);
+- set_intr_gate(16, &coprocessor_error);
+- set_intr_gate(17, &alignment_check);
++ set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
++ set_intr_gate(X86_TRAP_TS, &invalid_TSS);
++ set_intr_gate(X86_TRAP_NP, &segment_not_present);
++ set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
++ set_intr_gate(X86_TRAP_GP, &general_protection);
++ set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
++ set_intr_gate(X86_TRAP_MF, &coprocessor_error);
++ set_intr_gate(X86_TRAP_AC, &alignment_check);
+ #ifdef CONFIG_X86_MCE
+- set_intr_gate_ist(18, &machine_check, MCE_STACK);
++ set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
+ #endif
+- set_intr_gate(19, &simd_coprocessor_error);
++ set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
+
+ /* Reserve all the builtin and the syscall vector: */
+ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 4d320b2..bef9991 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -49,9 +49,6 @@
+ #define EFI_DEBUG 1
+ #define PFX "EFI: "
+
+-int efi_enabled;
+-EXPORT_SYMBOL(efi_enabled);
+-
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+@@ -70,9 +67,25 @@ struct efi_memory_map memmap;
+ static struct efi efi_phys __initdata;
+ static efi_system_table_t efi_systab __initdata;
+
++static inline bool efi_is_native(void)
++{
++ return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
++}
++
++unsigned long x86_efi_facility;
++
++/*
++ * Returns 1 if 'facility' is enabled, 0 otherwise.
++ */
++int efi_enabled(int facility)
++{
++ return test_bit(facility, &x86_efi_facility) != 0;
++}
++EXPORT_SYMBOL(efi_enabled);
++
+ static int __init setup_noefi(char *arg)
+ {
+- efi_enabled = 0;
++ clear_bit(EFI_BOOT, &x86_efi_facility);
+ return 0;
+ }
+ early_param("noefi", setup_noefi);
+@@ -440,6 +453,9 @@ void __init efi_init(void)
+ int i = 0;
+ void *tmp;
+
++ if (!efi_is_native())
++ return;
++
+ #ifdef CONFIG_X86_32
+ efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
+ #else
+@@ -467,6 +483,8 @@ void __init efi_init(void)
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
++ set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
++
+ /*
+ * Show what we know for posterity
+ */
+@@ -529,6 +547,8 @@ void __init efi_init(void)
+ early_iounmap(config_tables,
+ efi.systab->nr_tables * sizeof(efi_config_table_t));
+
++ set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
++
+ /*
+ * Check out the runtime services table. We need to map
+ * the runtime services table so that we can grab the physical
+@@ -552,6 +572,8 @@ void __init efi_init(void)
+ * virtual mode.
+ */
+ efi.get_time = phys_efi_get_time;
++
++ set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ } else
+ printk(KERN_ERR "Could not map the EFI runtime service "
+ "table!\n");
+@@ -571,6 +593,8 @@ void __init efi_init(void)
+ if (add_efi_memmap)
+ do_add_efi_memmap();
+
++ set_bit(EFI_MEMMAP, &x86_efi_facility);
++
+ #ifdef CONFIG_X86_32
+ x86_platform.get_wallclock = efi_get_time;
+ x86_platform.set_wallclock = efi_set_rtc_mmss;
+@@ -731,7 +755,7 @@ void __init efi_enter_virtual_mode(void)
+ *
+ * Call EFI services through wrapper functions.
+ */
+- efi.runtime_version = efi_systab.fw_revision;
++ efi.runtime_version = efi_systab.hdr.revision;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+@@ -747,6 +771,7 @@ void __init efi_enter_virtual_mode(void)
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
++ clear_bit(EFI_MEMMAP, &x86_efi_facility);
+ early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
+ memmap.map = NULL;
+ kfree(new_memmap);
+@@ -760,6 +785,9 @@ u32 efi_mem_type(unsigned long phys_addr)
+ efi_memory_desc_t *md;
+ void *p;
+
++ if (!efi_enabled(EFI_MEMMAP))
++ return 0;
++
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+ if ((md->phys_addr <= phys_addr) &&
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index ac3aa54..0fba86d 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -38,7 +38,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
+
+-static pgd_t save_pgd __initdata;
++static pgd_t *save_pgd __initdata;
+ static unsigned long efi_flags __initdata;
+
+ static void __init early_code_mapping_set_exec(int executable)
+@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
+ void __init efi_call_phys_prelog(void)
+ {
+ unsigned long vaddress;
++ int pgd;
++ int n_pgds;
+
+ early_code_mapping_set_exec(1);
+ local_irq_save(efi_flags);
+- vaddress = (unsigned long)__va(0x0UL);
+- save_pgd = *pgd_offset_k(0x0UL);
+- set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
++
++ n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
++ save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
++
++ for (pgd = 0; pgd < n_pgds; pgd++) {
++ save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
++ vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
++ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
++ }
+ __flush_tlb_all();
+ }
+
+@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+- set_pgd(pgd_offset_k(0x0UL), save_pgd);
++ int pgd;
++ int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
++ for (pgd = 0; pgd < n_pgds; pgd++)
++ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
++ kfree(save_pgd);
+ __flush_tlb_all();
+ local_irq_restore(efi_flags);
+ early_code_mapping_set_exec(0);
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index f31c5c5..a6664d2 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -255,7 +255,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
+ return acpi_rsdp;
+ #endif
+
+- if (efi_enabled) {
++ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 0e8e2de..de0791c 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -989,6 +989,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
+ return -EINVAL;
+ }
+
++ if (!dev)
++ return -EINVAL;
++
+ dev->cpu = pr->id;
+
+ if (max_cstate == 0)
+@@ -1175,6 +1178,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ }
+
+ /* Populate Updated C-state information */
++ acpi_processor_get_power_info(pr);
+ acpi_processor_setup_cpuidle_states(pr);
+
+ /* Enable all cpuidle devices */
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 608257a..b07edc4 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -395,7 +395,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+
+ /* Asmedia */
+- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
+ /* Generic, PCI class code for AHCI */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
+index 4a0f314..be984e0 100644
+--- a/drivers/block/drbd/drbd_req.c
++++ b/drivers/block/drbd/drbd_req.c
+@@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
+ const int rw = bio_data_dir(bio);
+ int cpu;
+ cpu = part_stat_lock();
++ part_round_stats(cpu, &mdev->vdisk->part0);
+ part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
+ part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ part_inc_in_flight(&mdev->vdisk->part0, rw);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index de9c800..166cb36 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -546,6 +546,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+ {
+ struct virtio_blk *vblk = vdev->priv;
+ int index = vblk->index;
++ int refc;
+
+ /* Prevent config work handler from accessing the device. */
+ mutex_lock(&vblk->config_lock);
+@@ -560,11 +561,15 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+
+ flush_work(&vblk->config_work);
+
++ refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
+ put_disk(vblk->disk);
+ mempool_destroy(vblk->pool);
+ vdev->config->del_vqs(vdev);
+ kfree(vblk);
+- ida_simple_remove(&vd_index_ida, index);
++
++ /* Only free device id if we don't have any users */
++ if (refc == 1)
++ ida_simple_remove(&vd_index_ida, index);
+ }
+
+ static const struct virtio_device_id id_table[] = {
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index 2dbf32b..714560f 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -949,7 +949,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
+ goto free_resources;
+ }
+ }
+- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* skip validate if the capability is not present */
+ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index 495198a..8cc8676 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -257,7 +257,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+- if (edac_pci_dev->show)
++ if (edac_pci_dev->store)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+ }
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index b298158..982f1f5 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -16,6 +16,7 @@
+ */
+ static char dmi_empty_string[] = " ";
+
++static u16 __initdata dmi_ver;
+ /*
+ * Catch too early calls to dmi_check_system():
+ */
+@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+ return 0;
+ }
+
+-static int __init dmi_checksum(const u8 *buf)
++static int __init dmi_checksum(const u8 *buf, u8 len)
+ {
+ u8 sum = 0;
+ int a;
+
+- for (a = 0; a < 15; a++)
++ for (a = 0; a < len; a++)
+ sum += buf[a];
+
+ return sum == 0;
+@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
+ return;
+
+ for (i = 0; i < 16 && (is_ff || is_00); i++) {
+- if(d[i] != 0x00) is_ff = 0;
+- if(d[i] != 0xFF) is_00 = 0;
++ if (d[i] != 0x00)
++ is_00 = 0;
++ if (d[i] != 0xFF)
++ is_ff = 0;
+ }
+
+ if (is_ff || is_00)
+@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
+ if (!s)
+ return;
+
+- sprintf(s, "%pUB", d);
++ /*
++ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
++ * the UUID are supposed to be little-endian encoded. The specification
++ * says that this is the defacto standard.
++ */
++ if (dmi_ver >= 0x0206)
++ sprintf(s, "%pUL", d);
++ else
++ sprintf(s, "%pUB", d);
+
+ dmi_ident[slot] = s;
+ }
+@@ -404,35 +415,63 @@ static int __init dmi_present(const char __iomem *p)
+ u8 buf[15];
+
+ memcpy_fromio(buf, p, 15);
+- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
++ if (dmi_checksum(buf, 15)) {
+ dmi_num = (buf[13] << 8) | buf[12];
+ dmi_len = (buf[7] << 8) | buf[6];
+ dmi_base = (buf[11] << 24) | (buf[10] << 16) |
+ (buf[9] << 8) | buf[8];
+
+- /*
+- * DMI version 0.0 means that the real version is taken from
+- * the SMBIOS version, which we don't know at this point.
+- */
+- if (buf[14] != 0)
+- printk(KERN_INFO "DMI %d.%d present.\n",
+- buf[14] >> 4, buf[14] & 0xF);
+- else
+- printk(KERN_INFO "DMI present.\n");
+ if (dmi_walk_early(dmi_decode) == 0) {
++ if (dmi_ver)
++ pr_info("SMBIOS %d.%d present.\n",
++ dmi_ver >> 8, dmi_ver & 0xFF);
++ else {
++ dmi_ver = (buf[14] & 0xF0) << 4 |
++ (buf[14] & 0x0F);
++ pr_info("Legacy DMI %d.%d present.\n",
++ dmi_ver >> 8, dmi_ver & 0xFF);
++ }
+ dmi_dump_ids();
+ return 0;
+ }
+ }
++ dmi_ver = 0;
+ return 1;
+ }
+
++static int __init smbios_present(const char __iomem *p)
++{
++ u8 buf[32];
++ int offset = 0;
++
++ memcpy_fromio(buf, p, 32);
++ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
++ dmi_ver = (buf[6] << 8) + buf[7];
++
++ /* Some BIOS report weird SMBIOS version, fix that up */
++ switch (dmi_ver) {
++ case 0x021F:
++ case 0x0221:
++ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
++ dmi_ver & 0xFF, 3);
++ dmi_ver = 0x0203;
++ break;
++ case 0x0233:
++ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
++ dmi_ver = 0x0206;
++ break;
++ }
++ offset = 16;
++ }
++ return dmi_present(buf + offset);
++}
++
+ void __init dmi_scan_machine(void)
+ {
+ char __iomem *p, *q;
+ int rc;
+
+- if (efi_enabled) {
++ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+ goto error;
+
+@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
+ if (p == NULL)
+ goto error;
+
+- rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
++ rc = smbios_present(p);
+ dmi_iounmap(p, 32);
+ if (!rc) {
+ dmi_available = 1;
+@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
+ goto error;
+
+ for (q = p; q < p + 0x10000; q += 16) {
+- rc = dmi_present(q);
++ if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
++ rc = smbios_present(q);
++ else if (memcmp(q, "_DMI_", 5) == 0)
++ rc = dmi_present(q);
++ else
++ continue;
+ if (!rc) {
+ dmi_available = 1;
+ dmi_iounmap(p, 0x10000);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 3e60e8d..5d5a868 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -1222,7 +1222,7 @@ efivars_init(void)
+ printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+ EFIVARS_DATE);
+
+- if (!efi_enabled)
++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ /* For now we'll register the efi directory at /sys/firmware/efi */
+@@ -1260,7 +1260,7 @@ err_put:
+ static void __exit
+ efivars_exit(void)
+ {
+- if (efi_enabled) {
++ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ unregister_efivars(&__efivars);
+ kobject_put(efi_kobj);
+ }
+diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
+index 4da4eb9..2224f1d 100644
+--- a/drivers/firmware/iscsi_ibft_find.c
++++ b/drivers/firmware/iscsi_ibft_find.c
+@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
+ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+ * only use ACPI for this */
+
+- if (!efi_enabled)
++ if (!efi_enabled(EFI_BOOT))
+ find_ibft_in_mem();
+
+ if (ibft_addr) {
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index d00f905..10fe480 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -30,6 +30,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/slab.h>
+ #include <linux/export.h>
++#include <generated/utsrelease.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "intel_drv.h"
+@@ -755,6 +756,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+
+ seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
++ seq_printf(m, "Kernel: " UTS_RELEASE);
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 5950ba3..b0186b8 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3456,14 +3456,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ goto out;
+ }
+
+- obj->user_pin_count++;
+- obj->pin_filp = file;
+- if (obj->user_pin_count == 1) {
++ if (obj->user_pin_count == 0) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
+ if (ret)
+ goto out;
+ }
+
++ obj->user_pin_count++;
++ obj->pin_filp = file;
++
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 1202198..878b989 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -657,6 +657,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
++ u64 invalid_offset = (u64)-1;
++ int j;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+@@ -667,6 +669,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ goto err;
+ }
+
++ /* As we do not update the known relocation offsets after
++ * relocating (due to the complexities in lock handling),
++ * we need to mark them as invalid now so that we force the
++ * relocation processing next time. Just in case the target
++ * object is evicted and then rebound into its old
++ * presumed_offset before the next execbuffer - if that
++ * happened we would make the mistake of assuming that the
++ * relocations were valid.
++ */
++ for (j = 0; j < exec[i].relocation_count; j++) {
++ if (copy_to_user(&user_relocs[j].presumed_offset,
++ &invalid_offset,
++ sizeof(invalid_offset))) {
++ ret = -EFAULT;
++ mutex_lock(&dev->struct_mutex);
++ goto err;
++ }
++ }
++
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 7a10f5f..124dd87 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -27,6 +27,8 @@
+
+ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
++#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
++
+ /*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+@@ -389,6 +391,7 @@
+ * the enables for writing to the corresponding low bit.
+ */
+ #define _3D_CHICKEN 0x02084
++#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+ #define _3D_CHICKEN2 0x0208c
+ /* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+@@ -399,7 +402,8 @@
+
+ #define MI_MODE 0x0209c
+ # define VS_TIMER_DISPATCH (1 << 6)
+-# define MI_FLUSH_ENABLE (1 << 11)
++# define MI_FLUSH_ENABLE (1 << 12)
++# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
+
+ #define GEN6_GT_MODE 0x20d0
+ #define GEN6_GT_MODE_HI (1 << 9)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index fa9639b..c05e825 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8279,6 +8279,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
++ /* WaDisableHiZPlanesWhenMSAAEnabled */
++ I915_WRITE(_3D_CHICKEN,
++ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
++
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index c6d0966..6601d21 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+- .ident = "ZOTAC ZBOXSD-ID12/ID13",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
+- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+- },
+- },
+- {
+- .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 19085c0..4fddd21 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -398,15 +398,26 @@ static int init_render_ring(struct intel_ring_buffer *ring)
+
+ if (INTEL_INFO(dev)->gen > 3) {
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+- if (IS_GEN6(dev) || IS_GEN7(dev))
+- mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+ I915_WRITE(MI_MODE, mode);
+- if (IS_GEN7(dev))
+- I915_WRITE(GFX_MODE_GEN7,
+- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ }
+
++ /* We need to disable the AsyncFlip performance optimisations in order
++ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
++ * programmed to '1' on all products.
++ */
++ if (INTEL_INFO(dev)->gen >= 6)
++ I915_WRITE(MI_MODE, GFX_MODE_ENABLE(ASYNC_FLIP_PERF_DISABLE));
++
++ /* Required for the hardware to program scanline values for waiting */
++ if (INTEL_INFO(dev)->gen == 6)
++ I915_WRITE(GFX_MODE,
++ GFX_MODE_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
++
++ if (IS_GEN7(dev))
++ I915_WRITE(GFX_MODE_GEN7,
++ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
++ GFX_MODE_ENABLE(GFX_REPLAY_MODE));
++
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 29afd71..1f32557 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -168,6 +168,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
+ kfree(p->chunks[i].kpage[0]);
+ kfree(p->chunks[i].kpage[1]);
++ p->chunks[i].kpage[0] = NULL;
++ p->chunks[i].kpage[1] = NULL;
+ return -ENOMEM;
+ }
+ p->chunks[i].kpage_idx[0] = -1;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c5762e3..bd959c1 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -354,7 +354,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ uint32_t reg;
+
+- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
++ if (efi_enabled(EFI_BOOT) &&
++ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ return false;
+
+ /* first check CRTCs */
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 8165953..a906803 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -617,6 +617,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ enum drm_connector_status found = connector_status_disconnected;
+ bool color = true;
+
++ /* just don't bother on RN50 those chip are often connected to remoting
++ * console hw and often we get failure to load detect those. So to make
++ * everyone happy report the encoder as always connected.
++ */
++ if (ASIC_IS_RN50(rdev)) {
++ return connector_status_connected;
++ }
++
+ /* save the regs we need */
+ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 3f28290..4fa2b11 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -431,10 +431,8 @@ static int intel_idle_probe(void)
+
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+- else {
++ else
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+- register_cpu_notifier(&setup_broadcast_notifier);
+- }
+
+ pr_debug(PREFIX "v" INTEL_IDLE_VERSION
+ " model 0x%X\n", boot_cpu_data.x86_model);
+@@ -597,6 +595,9 @@ static int __init intel_idle_init(void)
+ return retval;
+ }
+
++ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
++ register_cpu_notifier(&setup_broadcast_notifier);
++
+ return 0;
+ }
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index ef2d493..62a4d5c 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -916,6 +916,38 @@ static void __init free_iommu_all(void)
+ }
+
+ /*
++ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
++ * Workaround:
++ * BIOS should disable L2B micellaneous clock gating by setting
++ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
++ */
++static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
++{
++ u32 value;
++
++ if ((boot_cpu_data.x86 != 0x15) ||
++ (boot_cpu_data.x86_model < 0x10) ||
++ (boot_cpu_data.x86_model > 0x1f))
++ return;
++
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
++ pci_read_config_dword(iommu->dev, 0xf4, &value);
++
++ if (value & BIT(2))
++ return;
++
++ /* Select NB indirect register 0x90 and enable writing */
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
++
++ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
++ pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
++ dev_name(&iommu->dev->dev));
++
++ /* Clear the enable writing bit */
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
++}
++
++/*
+ * This function clues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+@@ -970,6 +1002,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+ amd_iommu_np_cache = true;
+
++ amd_iommu_erratum_746_workaround(iommu);
++
+ return pci_enable_device(iommu->dev);
+ }
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 9a6cc92..dffdca8 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2302,8 +2302,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
+ return 0;
+ }
+
++static bool device_has_rmrr(struct pci_dev *dev)
++{
++ struct dmar_rmrr_unit *rmrr;
++ int i;
++
++ for_each_rmrr_units(rmrr) {
++ for (i = 0; i < rmrr->devices_cnt; i++) {
++ /*
++ * Return TRUE if this RMRR contains the device that
++ * is passed in.
++ */
++ if (rmrr->devices[i] == dev)
++ return true;
++ }
++ }
++ return false;
++}
++
+ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+ {
++
++ /*
++ * We want to prevent any device associated with an RMRR from
++ * getting placed into the SI Domain. This is done because
++ * problems exist when devices are moved in and out of domains
++ * and their respective RMRR info is lost. We exempt USB devices
++ * from this process due to their usage of RMRRs that are known
++ * to not be needed after BIOS hand-off to OS.
++ */
++ if (device_has_rmrr(pdev) &&
++ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
++ return 0;
++
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
+
+@@ -4090,6 +4121,21 @@ static struct iommu_ops intel_iommu_ops = {
+ .domain_has_cap = intel_iommu_domain_has_cap,
+ };
+
++static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
++{
++ /* G4x/GM45 integrated gfx dmar support is totally busted. */
++ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
++ dmar_map_gfx = 0;
++}
++
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
++
+ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+ /*
+@@ -4098,12 +4144,6 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
+-
+- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
+- if (dev->revision == 0x07) {
+- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+- dmar_map_gfx = 0;
+- }
+ }
+
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 86cd532..21a3d77 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -914,7 +914,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ break;
+ case LEC_ACK_ERROR:
+ netdev_dbg(dev, "ack error\n");
+- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
++ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL);
+ break;
+ case LEC_BIT1_ERROR:
+@@ -927,7 +927,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ break;
+ case LEC_CRC_ERROR:
+ netdev_dbg(dev, "CRC error\n");
+- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ break;
+ default:
+diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
+index d11fbb2..b508a63 100644
+--- a/drivers/net/can/pch_can.c
++++ b/drivers/net/can/pch_can.c
+@@ -559,7 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
+ stats->rx_errors++;
+ break;
+ case PCH_CRC_ERR:
+- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 79c70ae..1ef9df8 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -735,12 +735,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
+ }
+ if (err_status & HECC_CANES_CRCE) {
+ hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
+- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ }
+ if (err_status & HECC_CANES_ACKE) {
+ hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
+- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
++ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 222954d..cf177b8 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -939,17 +939,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ {
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+- int i, err = 0, vector = 0;
++ int i, err = 0, vector = 0, free_vector = 0;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igb_msix_other, 0, netdev->name, adapter);
+ if (err)
+- goto out;
+- vector++;
++ goto err_out;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+
++ vector++;
++
+ q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+@@ -968,13 +969,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ igb_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+- goto out;
+- vector++;
++ goto err_free;
+ }
+
+ igb_configure_msix(adapter);
+ return 0;
+-out:
++
++err_free:
++ /* free already assigned IRQs */
++ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
++
++ vector--;
++ for (i = 0; i < vector; i++) {
++ free_irq(adapter->msix_entries[free_vector++].vector,
++ adapter->q_vector[i]);
++ }
++err_out:
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+index 12a730d..ae750f9 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+@@ -946,6 +946,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+
++ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
++
+ if (rtt) {
+ if (!ar9003_hw_rtt_restore(ah, chan))
+ run_rtt_cal = true;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index 2330e7e..73be7ff 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -541,35 +541,22 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+ }
+
+-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
++void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+ {
+- switch (rx) {
+- case 0x5:
++ if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+- case 0x3:
+- case 0x1:
+- case 0x2:
+- case 0x7:
+- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+- break;
+- default:
+- break;
+- }
++
++ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
++ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
+- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
++ tx = 3;
+ else if (AR_SREV_9462(ah))
+ /* xxx only when MCI support is enabled */
+- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
+- else
+- REG_WRITE(ah, AR_SELFGEN_MASK, tx);
++ tx = 3;
+
+- if (tx == 0x5) {
+- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+- AR_PHY_SWAP_ALT_CHAIN);
+- }
++ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ }
+
+ /*
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index a13cabb..2bbc83e 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -155,6 +155,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ bf->bf_buf_addr = 0;
++ bf->bf_mpdu = NULL;
+ }
+
+ /* Get a new beacon from mac80211 */
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index 1b90ed8..4f7843a 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
++ } else {
++ kfree_skb(skb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index a5c4ba8..0c65a09 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -1016,6 +1016,7 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+ int ar9003_paprd_init_table(struct ath_hw *ah);
+ bool ar9003_paprd_is_done(struct ath_hw *ah);
+ void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
++void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+
+ /* Hardware family op attach helpers */
+ void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index b4cbc82..d171a72 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -786,6 +786,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ return NULL;
+ }
+
++ list_del(&bf->list);
+ if (!bf->bf_mpdu)
+ return bf;
+
+@@ -1966,14 +1967,15 @@ requeue_drop_frag:
+ sc->rx.frag = NULL;
+ }
+ requeue:
++ list_add_tail(&bf->list, &sc->rx.rxbuf);
++ if (flush)
++ continue;
++
+ if (edma) {
+- list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+- list_move_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_buf_link(sc, bf);
+- if (!flush)
+- ath9k_hw_rxena(ah);
++ ath9k_hw_rxena(ah);
+ }
+ } while (1);
+
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+index 0d8a9cd..78c16eb 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+@@ -1484,9 +1484,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
+ #endif
+ t->ms = ms;
+ t->periodic = (bool) periodic;
+- t->set = true;
+-
+- atomic_inc(&t->wl->callbacks);
++ if (!t->set) {
++ t->set = true;
++ atomic_inc(&t->wl->callbacks);
++ }
+
+ ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
+ }
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index d34acf0..de94244 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -160,7 +160,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+- if (!card || card->adapter) {
++ if (!card || !card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index 1679c25..56e1c4a 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -53,8 +53,7 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
+ */
+ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+ {
+- bool cancel_flag = false;
+- int status = adapter->cmd_wait_q.status;
++ int status;
+ struct cmd_ctrl_node *cmd_queued;
+
+ if (!adapter->cmd_queued)
+@@ -70,15 +69,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+ queue_work(adapter->workqueue, &adapter->main_work);
+
+ /* Wait for completion */
+- wait_event_interruptible(adapter->cmd_wait_q.wait,
+- *(cmd_queued->condition));
+- if (!*(cmd_queued->condition))
+- cancel_flag = true;
+-
+- if (cancel_flag) {
+- mwifiex_cancel_pending_ioctl(adapter);
+- dev_dbg(adapter->dev, "cmd cancel\n");
++ status = wait_event_interruptible(adapter->cmd_wait_q.wait,
++ *(cmd_queued->condition));
++ if (status) {
++ dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
++ return status;
+ }
++
++ status = adapter->cmd_wait_q.status;
+ adapter->cmd_wait_q.status = 0;
+
+ return status;
+@@ -240,6 +238,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
++ if (netif_carrier_ok(priv->netdev))
++ netif_carrier_off(priv->netdev);
+
+ /* Clear any past association response stored for
+ * application retrieval */
+@@ -271,6 +271,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
++ if (netif_carrier_ok(priv->netdev))
++ netif_carrier_off(priv->netdev);
+
+ if (!ret) {
+ dev_dbg(adapter->dev, "info: network found in scan"
+@@ -421,8 +423,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
+ return false;
+ }
+
+- wait_event_interruptible(adapter->hs_activate_wait_q,
+- adapter->hs_activate_wait_q_woken);
++ if (wait_event_interruptible(adapter->hs_activate_wait_q,
++ adapter->hs_activate_wait_q_woken)) {
++ dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
++ return false;
++ }
+
+ return true;
+ }
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index 838f571..4fff912 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -44,8 +44,6 @@ extern int pciehp_poll_mode;
+ extern int pciehp_poll_time;
+ extern int pciehp_debug;
+ extern int pciehp_force;
+-extern struct workqueue_struct *pciehp_wq;
+-extern struct workqueue_struct *pciehp_ordered_wq;
+
+ #define dbg(format, arg...) \
+ do { \
+@@ -79,6 +77,7 @@ struct slot {
+ struct hotplug_slot *hotplug_slot;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
++ struct workqueue_struct *wq;
+ };
+
+ struct event_info {
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 7ac8358..9350af9 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -42,8 +42,6 @@ int pciehp_debug;
+ int pciehp_poll_mode;
+ int pciehp_poll_time;
+ int pciehp_force;
+-struct workqueue_struct *pciehp_wq;
+-struct workqueue_struct *pciehp_ordered_wq;
+
+ #define DRIVER_VERSION "0.4"
+ #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+@@ -341,33 +339,19 @@ static int __init pcied_init(void)
+ {
+ int retval = 0;
+
+- pciehp_wq = alloc_workqueue("pciehp", 0, 0);
+- if (!pciehp_wq)
+- return -ENOMEM;
+-
+- pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
+- if (!pciehp_ordered_wq) {
+- destroy_workqueue(pciehp_wq);
+- return -ENOMEM;
+- }
+-
+ pciehp_firmware_init();
+ retval = pcie_port_service_register(&hpdriver_portdrv);
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+- if (retval) {
+- destroy_workqueue(pciehp_ordered_wq);
+- destroy_workqueue(pciehp_wq);
++ if (retval)
+ dbg("Failure to register service\n");
+- }
++
+ return retval;
+ }
+
+ static void __exit pcied_cleanup(void)
+ {
+ dbg("unload_pciehpd()\n");
+- destroy_workqueue(pciehp_ordered_wq);
+- destroy_workqueue(pciehp_wq);
+ pcie_port_service_unregister(&hpdriver_portdrv);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+ }
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 085dbb5..38f0186 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+- queue_work(pciehp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+ }
+@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
+ kfree(info);
+ goto out;
+ }
+- queue_work(pciehp_ordered_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+ }
+@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
+ if (ATTN_LED(ctrl))
+ pciehp_set_attention_status(p_slot, 0);
+
+- queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
++ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
+ else
+ p_slot->state = POWERON_STATE;
+
+- queue_work(pciehp_ordered_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ }
+
+ static void interrupt_event_handler(struct work_struct *work)
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 7b14148..fef1748 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -789,24 +789,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
+ static int pcie_init_slot(struct controller *ctrl)
+ {
+ struct slot *slot;
++ char name[32];
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
++ snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
++ slot->wq = alloc_workqueue(name, 0, 0);
++ if (!slot->wq)
++ goto abort;
++
+ slot->ctrl = ctrl;
+ mutex_init(&slot->lock);
+ INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
+ ctrl->slot = slot;
+ return 0;
++abort:
++ kfree(slot);
++ return -ENOMEM;
+ }
+
+ static void pcie_cleanup_slot(struct controller *ctrl)
+ {
+ struct slot *slot = ctrl->slot;
+ cancel_delayed_work(&slot->work);
+- flush_workqueue(pciehp_wq);
+- flush_workqueue(pciehp_ordered_wq);
++ destroy_workqueue(slot->wq);
+ kfree(slot);
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
+index e0c90e6..2c2ac80 100644
+--- a/drivers/pci/hotplug/shpchp.h
++++ b/drivers/pci/hotplug/shpchp.h
+@@ -46,8 +46,6 @@
+ extern int shpchp_poll_mode;
+ extern int shpchp_poll_time;
+ extern int shpchp_debug;
+-extern struct workqueue_struct *shpchp_wq;
+-extern struct workqueue_struct *shpchp_ordered_wq;
+
+ #define dbg(format, arg...) \
+ do { \
+@@ -91,6 +89,7 @@ struct slot {
+ struct list_head slot_list;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
++ struct workqueue_struct *wq;
+ u8 hp_slot;
+ };
+
+diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
+index dd7e0c5..754a7cd 100644
+--- a/drivers/pci/hotplug/shpchp_core.c
++++ b/drivers/pci/hotplug/shpchp_core.c
+@@ -39,8 +39,6 @@
+ int shpchp_debug;
+ int shpchp_poll_mode;
+ int shpchp_poll_time;
+-struct workqueue_struct *shpchp_wq;
+-struct workqueue_struct *shpchp_ordered_wq;
+
+ #define DRIVER_VERSION "0.4"
+ #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+@@ -123,6 +121,14 @@ static int init_slots(struct controller *ctrl)
+ slot->device = ctrl->slot_device_offset + i;
+ slot->hpc_ops = ctrl->hpc_ops;
+ slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
++
++ snprintf(name, sizeof(name), "shpchp-%d", slot->number);
++ slot->wq = alloc_workqueue(name, 0, 0);
++ if (!slot->wq) {
++ retval = -ENOMEM;
++ goto error_info;
++ }
++
+ mutex_init(&slot->lock);
+ INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
+
+@@ -142,7 +148,7 @@ static int init_slots(struct controller *ctrl)
+ if (retval) {
+ ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
+ retval);
+- goto error_info;
++ goto error_slotwq;
+ }
+
+ get_power_status(hotplug_slot, &info->power_status);
+@@ -154,6 +160,8 @@ static int init_slots(struct controller *ctrl)
+ }
+
+ return 0;
++error_slotwq:
++ destroy_workqueue(slot->wq);
+ error_info:
+ kfree(info);
+ error_hpslot:
+@@ -174,8 +182,7 @@ void cleanup_slots(struct controller *ctrl)
+ slot = list_entry(tmp, struct slot, slot_list);
+ list_del(&slot->slot_list);
+ cancel_delayed_work(&slot->work);
+- flush_workqueue(shpchp_wq);
+- flush_workqueue(shpchp_ordered_wq);
++ destroy_workqueue(slot->wq);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+ }
+@@ -358,25 +365,12 @@ static struct pci_driver shpc_driver = {
+
+ static int __init shpcd_init(void)
+ {
+- int retval = 0;
+-
+- shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
+- if (!shpchp_wq)
+- return -ENOMEM;
+-
+- shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
+- if (!shpchp_ordered_wq) {
+- destroy_workqueue(shpchp_wq);
+- return -ENOMEM;
+- }
++ int retval;
+
+ retval = pci_register_driver(&shpc_driver);
+ dbg("%s: pci_register_driver = %d\n", __func__, retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+- if (retval) {
+- destroy_workqueue(shpchp_ordered_wq);
+- destroy_workqueue(shpchp_wq);
+- }
++
+ return retval;
+ }
+
+@@ -384,8 +378,6 @@ static void __exit shpcd_cleanup(void)
+ {
+ dbg("unload_shpchpd()\n");
+ pci_unregister_driver(&shpc_driver);
+- destroy_workqueue(shpchp_ordered_wq);
+- destroy_workqueue(shpchp_wq);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
+index b00b09b..3ffc1b2 100644
+--- a/drivers/pci/hotplug/shpchp_ctrl.c
++++ b/drivers/pci/hotplug/shpchp_ctrl.c
+@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+- queue_work(shpchp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+ }
+@@ -456,7 +456,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
+ kfree(info);
+ goto out;
+ }
+- queue_work(shpchp_ordered_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+ }
+@@ -504,7 +504,7 @@ static void handle_button_press_event(struct slot *p_slot)
+ p_slot->hpc_ops->green_led_blink(p_slot);
+ p_slot->hpc_ops->set_attention_status(p_slot, 0);
+
+- queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
++ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 9674e9f..ee82c55 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -637,6 +637,7 @@ static void aer_recover_work_func(struct work_struct *work)
+ continue;
+ }
+ do_recovery(pdev, entry.severity);
++ pci_dev_put(pdev);
+ }
+ }
+ #endif
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 2275162..c73ed00 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -790,6 +790,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
+ {
+ struct pci_dev *child;
+
++ if (aspm_force)
++ return;
++
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
+index 811d436..2704386 100644
+--- a/drivers/platform/x86/ibm_rtl.c
++++ b/drivers/platform/x86/ibm_rtl.c
+@@ -255,7 +255,7 @@ static int __init ibm_rtl_init(void) {
+ if (force)
+ pr_warn("module loaded by force\n");
+ /* first ensure that we are running on IBM HW */
+- else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
++ else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
+ return -ENODEV;
+
+ /* Get the address for the Extended BIOS Data Area */
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 21bc1a7..64e1f2d 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -22,6 +22,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/rfkill.h>
+ #include <linux/acpi.h>
++#include <linux/efi.h>
+
+ /*
+ * This driver is needed because a number of Samsung laptops do not hook
+@@ -603,6 +604,9 @@ static int __init samsung_init(void)
+ int loca;
+ int retval;
+
++ if (efi_enabled(EFI_BOOT))
++ return -ENODEV;
++
+ mutex_init(&sabi_mutex);
+ handle_backlight = true;
+
+diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
+index 8cba82d..32445a7 100644
+--- a/drivers/regulator/max8997.c
++++ b/drivers/regulator/max8997.c
+@@ -71,26 +71,26 @@ struct voltage_map_desc {
+ unsigned int n_bits;
+ };
+
+-/* Voltage maps in mV */
++/* Voltage maps in uV */
+ static const struct voltage_map_desc ldo_voltage_map_desc = {
+- .min = 800, .max = 3950, .step = 50, .n_bits = 6,
++ .min = 800000, .max = 3950000, .step = 50000, .n_bits = 6,
+ }; /* LDO1 ~ 18, 21 all */
+
+ static const struct voltage_map_desc buck1245_voltage_map_desc = {
+- .min = 650, .max = 2225, .step = 25, .n_bits = 6,
++ .min = 650000, .max = 2225000, .step = 25000, .n_bits = 6,
+ }; /* Buck1, 2, 4, 5 */
+
+ static const struct voltage_map_desc buck37_voltage_map_desc = {
+- .min = 750, .max = 3900, .step = 50, .n_bits = 6,
++ .min = 750000, .max = 3900000, .step = 50000, .n_bits = 6,
+ }; /* Buck3, 7 */
+
+-/* current map in mA */
++/* current map in uA */
+ static const struct voltage_map_desc charger_current_map_desc = {
+- .min = 200, .max = 950, .step = 50, .n_bits = 4,
++ .min = 200000, .max = 950000, .step = 50000, .n_bits = 4,
+ };
+
+ static const struct voltage_map_desc topoff_current_map_desc = {
+- .min = 50, .max = 200, .step = 10, .n_bits = 4,
++ .min = 50000, .max = 200000, .step = 10000, .n_bits = 4,
+ };
+
+ static const struct voltage_map_desc *reg_voltage_map[] = {
+@@ -199,7 +199,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
+ if (val > desc->max)
+ return -EINVAL;
+
+- return val * 1000;
++ return val;
+ }
+
+ static int max8997_get_enable_register(struct regulator_dev *rdev,
+@@ -501,7 +501,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+ {
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const struct voltage_map_desc *desc;
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask, ret;
+@@ -527,7 +526,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+
+ desc = reg_voltage_map[rid];
+
+- i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
++ i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
+ if (i < 0)
+ return i;
+
+@@ -546,7 +545,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+ /* If the voltage is increasing */
+ if (org < i)
+ udelay(DIV_ROUND_UP(desc->step * (i - org),
+- max8997->ramp_delay));
++ max8997->ramp_delay * 1000));
+ }
+
+ return ret;
+@@ -645,7 +644,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ const struct voltage_map_desc *desc;
+ int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
+ bool gpio_dvs_mode = false;
+- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+
+ if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
+ return -EINVAL;
+@@ -670,7 +668,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ selector);
+
+ desc = reg_voltage_map[rid];
+- new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
++ new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
+ if (new_val < 0)
+ return new_val;
+
+@@ -1002,8 +1000,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+ max8997->buck1_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+- pdata->buck1_voltage[i] / 1000,
+- pdata->buck1_voltage[i] / 1000 +
++ pdata->buck1_voltage[i],
++ pdata->buck1_voltage[i] +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+@@ -1011,8 +1009,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+ max8997->buck2_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+- pdata->buck2_voltage[i] / 1000,
+- pdata->buck2_voltage[i] / 1000 +
++ pdata->buck2_voltage[i],
++ pdata->buck2_voltage[i] +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+@@ -1020,8 +1018,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+ max8997->buck5_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+- pdata->buck5_voltage[i] / 1000,
+- pdata->buck5_voltage[i] / 1000 +
++ pdata->buck5_voltage[i],
++ pdata->buck5_voltage[i] +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
+index 41a1495..27fe1c6 100644
+--- a/drivers/regulator/max8998.c
++++ b/drivers/regulator/max8998.c
+@@ -497,7 +497,7 @@ buck2_exit:
+
+ difference = desc->min + desc->step*i - previous_vol/1000;
+ if (difference > 0)
+- udelay(difference / ((val & 0x0f) + 1));
++ udelay(DIV_ROUND_UP(difference, (val & 0x0f) + 1));
+
+ return ret;
+ }
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index 5c8b0dc..3784388 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -459,7 +459,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 4b63c73..f44d633 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2825,10 +2825,6 @@ static int __init init_sd(void)
+ if (err)
+ goto err_out;
+
+- err = scsi_register_driver(&sd_template.gendrv);
+- if (err)
+- goto err_out_class;
+-
+ sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
+ 0, 0, NULL);
+ if (!sd_cdb_cache) {
+@@ -2842,8 +2838,15 @@ static int __init init_sd(void)
+ goto err_out_cache;
+ }
+
++ err = scsi_register_driver(&sd_template.gendrv);
++ if (err)
++ goto err_out_driver;
++
+ return 0;
+
++err_out_driver:
++ mempool_destroy(sd_cdb_pool);
++
+ err_out_cache:
+ kmem_cache_destroy(sd_cdb_cache);
+
+@@ -2866,10 +2869,10 @@ static void __exit exit_sd(void)
+
+ SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+
++ scsi_unregister_driver(&sd_template.gendrv);
+ mempool_destroy(sd_cdb_pool);
+ kmem_cache_destroy(sd_cdb_cache);
+
+- scsi_unregister_driver(&sd_template.gendrv);
+ class_unregister(&sd_disk_class);
+
+ for (i = 0; i < SD_MAJORS; i++)
+diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
+index 4c77e50..da26630 100644
+--- a/drivers/staging/comedi/Kconfig
++++ b/drivers/staging/comedi/Kconfig
+@@ -424,6 +424,7 @@ config COMEDI_ADQ12B
+
+ config COMEDI_NI_AT_A2150
+ tristate "NI AT-A2150 ISA card support"
++ select COMEDI_FC
+ depends on COMEDI_NI_COMMON
+ depends on VIRT_TO_BUS
+ default N
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 9465bce..ab9f5ed 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2207,6 +2207,7 @@ int comedi_alloc_board_minor(struct device *hardware_device)
+ kfree(info);
+ return -ENOMEM;
+ }
++ info->hardware_device = hardware_device;
+ comedi_device_init(info->device);
+ spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i) {
+@@ -2295,6 +2296,23 @@ void comedi_free_board_minor(unsigned minor)
+ }
+ }
+
++int comedi_find_board_minor(struct device *hardware_device)
++{
++ int minor;
++ struct comedi_device_file_info *info;
++
++ for (minor = 0; minor < COMEDI_NUM_BOARD_MINORS; minor++) {
++ spin_lock(&comedi_file_info_table_lock);
++ info = comedi_file_info_table[minor];
++ if (info && info->hardware_device == hardware_device) {
++ spin_unlock(&comedi_file_info_table_lock);
++ return minor;
++ }
++ spin_unlock(&comedi_file_info_table_lock);
++ }
++ return -ENODEV;
++}
++
+ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+ {
+diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
+index 7a0d4bc..00d3c65 100644
+--- a/drivers/staging/comedi/comedidev.h
++++ b/drivers/staging/comedi/comedidev.h
+@@ -234,6 +234,7 @@ struct comedi_device_file_info {
+ struct comedi_device *device;
+ struct comedi_subdevice *read_subdevice;
+ struct comedi_subdevice *write_subdevice;
++ struct device *hardware_device;
+ };
+
+ #ifdef CONFIG_COMEDI_DEBUG
+diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
+index db1fd63..538b568 100644
+--- a/drivers/staging/comedi/drivers.c
++++ b/drivers/staging/comedi/drivers.c
+@@ -823,25 +823,14 @@ static int comedi_auto_config(struct device *hardware_device,
+ int minor;
+ struct comedi_device_file_info *dev_file_info;
+ int retval;
+- unsigned *private_data = NULL;
+
+- if (!comedi_autoconfig) {
+- dev_set_drvdata(hardware_device, NULL);
++ if (!comedi_autoconfig)
+ return 0;
+- }
+
+ minor = comedi_alloc_board_minor(hardware_device);
+ if (minor < 0)
+ return minor;
+
+- private_data = kmalloc(sizeof(unsigned), GFP_KERNEL);
+- if (private_data == NULL) {
+- retval = -ENOMEM;
+- goto cleanup;
+- }
+- *private_data = minor;
+- dev_set_drvdata(hardware_device, private_data);
+-
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ memset(&it, 0, sizeof(it));
+@@ -854,25 +843,22 @@ static int comedi_auto_config(struct device *hardware_device,
+ retval = comedi_device_attach(dev_file_info->device, &it);
+ mutex_unlock(&dev_file_info->device->mutex);
+
+-cleanup:
+- if (retval < 0) {
+- kfree(private_data);
++ if (retval < 0)
+ comedi_free_board_minor(minor);
+- }
+ return retval;
+ }
+
+ static void comedi_auto_unconfig(struct device *hardware_device)
+ {
+- unsigned *minor = (unsigned *)dev_get_drvdata(hardware_device);
+- if (minor == NULL)
+- return;
+-
+- BUG_ON(*minor >= COMEDI_NUM_BOARD_MINORS);
++ int minor;
+
+- comedi_free_board_minor(*minor);
+- dev_set_drvdata(hardware_device, NULL);
+- kfree(minor);
++ if (hardware_device == NULL)
++ return;
++ minor = comedi_find_board_minor(hardware_device);
++ if (minor < 0)
++ return;
++ BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
++ comedi_free_board_minor(minor);
+ }
+
+ int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name)
+diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
+index a804742..2567f9a 100644
+--- a/drivers/staging/comedi/drivers/comedi_test.c
++++ b/drivers/staging/comedi/drivers/comedi_test.c
+@@ -461,7 +461,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+ {
+ devpriv->timer_running = 0;
+- del_timer(&devpriv->timer);
++ del_timer_sync(&devpriv->timer);
+ return 0;
+ }
+
+diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
+index 9148abd..9fee2f2 100644
+--- a/drivers/staging/comedi/drivers/ni_pcimio.c
++++ b/drivers/staging/comedi/drivers/ni_pcimio.c
+@@ -1021,7 +1021,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1040,7 +1040,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1076,7 +1076,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1095,7 +1095,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1131,7 +1131,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1150,7 +1150,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1186,7 +1186,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+diff --git a/drivers/staging/comedi/internal.h b/drivers/staging/comedi/internal.h
+index 434ce34..4208fb4 100644
+--- a/drivers/staging/comedi/internal.h
++++ b/drivers/staging/comedi/internal.h
+@@ -7,6 +7,7 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int comedi_alloc_board_minor(struct device *hardware_device);
+ void comedi_free_board_minor(unsigned minor);
++int comedi_find_board_minor(struct device *hardware_device);
+ void comedi_reset_async_buf(struct comedi_async *async);
+ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long new_size);
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index f4b738f..88d1d35 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -66,6 +66,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
+ /* Belkin */
+ {USB_DEVICE(0x050D, 0x945A)},
++ /* ISY IWL - Belkin clone */
++ {USB_DEVICE(0x050D, 0x11F1)},
+ /* Corega */
+ {USB_DEVICE(0x07AA, 0x0047)},
+ /* D-Link */
+diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+index c241074..7843111 100644
+--- a/drivers/staging/speakup/synth.c
++++ b/drivers/staging/speakup/synth.c
+@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
+
+ mutex_lock(&spk_mutex);
+ /* First, check if we already have it loaded. */
+- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
++ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ if (strcmp(synths[i]->name, synth_name) == 0)
+ synth = synths[i];
+
+@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
+ int i;
+ int status = 0;
+ mutex_lock(&spk_mutex);
+- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
++ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ /* synth_remove() is responsible for rotating the array down */
+ if (in_synth == synths[i]) {
+ mutex_unlock(&spk_mutex);
+diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
+index 3b7a847..194e974 100644
+--- a/drivers/staging/usbip/usbip_common.c
++++ b/drivers/staging/usbip/usbip_common.c
+@@ -761,26 +761,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso);
+ * buffer and iso packets need to be stored and be in propeper endian in urb
+ * before calling this function
+ */
+-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
++void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+ {
+ int np = urb->number_of_packets;
+ int i;
+- int ret;
+ int actualoffset = urb->actual_length;
+
+ if (!usb_pipeisoc(urb->pipe))
+- return 0;
++ return;
+
+ /* if no packets or length of data is 0, then nothing to unpack */
+ if (np == 0 || urb->actual_length == 0)
+- return 0;
++ return;
+
+ /*
+ * if actual_length is transfer_buffer_length then no padding is
+ * present.
+ */
+ if (urb->actual_length == urb->transfer_buffer_length)
+- return 0;
++ return;
+
+ /*
+ * loop over all packets from last to first (to prevent overwritting
+@@ -792,8 +791,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+ urb->transfer_buffer + actualoffset,
+ urb->iso_frame_desc[i].actual_length);
+ }
+-
+- return ret;
+ }
+ EXPORT_SYMBOL_GPL(usbip_pad_iso);
+
+diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
+index be21617..e547dba 100644
+--- a/drivers/staging/usbip/usbip_common.h
++++ b/drivers/staging/usbip/usbip_common.h
+@@ -316,7 +316,7 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
+ void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
+ /* some members of urb must be substituted before. */
+ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
+-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
++void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
+ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
+
+ /* usbip_event.c */
+diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
+index 3872b8c..1a7afaa 100644
+--- a/drivers/staging/usbip/vhci_rx.c
++++ b/drivers/staging/usbip/vhci_rx.c
+@@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ return;
+
+ /* restore the padding in iso packets */
+- if (usbip_pad_iso(ud, urb) < 0)
+- return;
++ usbip_pad_iso(ud, urb);
+
+ if (usbip_dbg_flag_vhci_rx)
+ usbip_dump_urb(urb);
+diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
+index a8f97eb..991ce3e 100644
+--- a/drivers/staging/vt6656/bssdb.h
++++ b/drivers/staging/vt6656/bssdb.h
+@@ -92,7 +92,6 @@ typedef struct tagSRSNCapObject {
+ } SRSNCapObject, *PSRSNCapObject;
+
+ // BSS info(AP)
+-#pragma pack(1)
+ typedef struct tagKnownBSS {
+ // BSS info
+ BOOL bActive;
+diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
+index 3176c8d..c731b12 100644
+--- a/drivers/staging/vt6656/int.h
++++ b/drivers/staging/vt6656/int.h
+@@ -34,7 +34,6 @@
+ #include "device.h"
+
+ /*--------------------- Export Definitions -------------------------*/
+-#pragma pack(1)
+ typedef struct tagSINTData {
+ BYTE byTSR0;
+ BYTE byPkt0;
+diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
+index 22710ce..ae6e2d2 100644
+--- a/drivers/staging/vt6656/iocmd.h
++++ b/drivers/staging/vt6656/iocmd.h
+@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
+ // Ioctl interface structure
+ // Command structure
+ //
+-#pragma pack(1)
+ typedef struct tagSCmdRequest {
+ u8 name[16];
+ void *data;
+ u16 wResult;
+ u16 wCmdCode;
+-} SCmdRequest, *PSCmdRequest;
++} __packed SCmdRequest, *PSCmdRequest;
+
+ //
+ // Scan
+@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
+
+ u8 ssid[SSID_MAXLEN + 2];
+
+-} SCmdScan, *PSCmdScan;
++} __packed SCmdScan, *PSCmdScan;
+
+ //
+ // BSS Join
+@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
+ BOOL bPSEnable;
+ BOOL bShareKeyAuth;
+
+-} SCmdBSSJoin, *PSCmdBSSJoin;
++} __packed SCmdBSSJoin, *PSCmdBSSJoin;
+
+ //
+ // Zonetype Setting
+@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
+ BOOL bWrite;
+ WZONETYPE ZoneType;
+
+-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
++} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+
+ typedef struct tagSWPAResult {
+ char ifname[100];
+@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
+ u8 key_mgmt;
+ u8 eap_type;
+ BOOL authenticated;
+-} SWPAResult, *PSWPAResult;
++} __packed SWPAResult, *PSWPAResult;
+
+ typedef struct tagSCmdStartAP {
+
+@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
+ BOOL bShareKeyAuth;
+ u8 byBasicRate;
+
+-} SCmdStartAP, *PSCmdStartAP;
++} __packed SCmdStartAP, *PSCmdStartAP;
+
+ typedef struct tagSCmdSetWEP {
+
+@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
+ BOOL bWepKeyAvailable[WEP_NKEYS];
+ u32 auWepKeyLength[WEP_NKEYS];
+
+-} SCmdSetWEP, *PSCmdSetWEP;
++} __packed SCmdSetWEP, *PSCmdSetWEP;
+
+ typedef struct tagSBSSIDItem {
+
+@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
+ BOOL bWEPOn;
+ u32 uRSSI;
+
+-} SBSSIDItem;
++} __packed SBSSIDItem;
+
+
+ typedef struct tagSBSSIDList {
+
+ u32 uItem;
+ SBSSIDItem sBSSIDList[0];
+-} SBSSIDList, *PSBSSIDList;
++} __packed SBSSIDList, *PSBSSIDList;
+
+
+ typedef struct tagSNodeItem {
+@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
+ u32 uTxAttempts;
+ u16 wFailureRatio;
+
+-} SNodeItem;
++} __packed SNodeItem;
+
+
+ typedef struct tagSNodeList {
+@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
+ u32 uItem;
+ SNodeItem sNodeList[0];
+
+-} SNodeList, *PSNodeList;
++} __packed SNodeList, *PSNodeList;
+
+
+ typedef struct tagSCmdLinkStatus {
+@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
+ u32 uChannel;
+ u32 uLinkRate;
+
+-} SCmdLinkStatus, *PSCmdLinkStatus;
++} __packed SCmdLinkStatus, *PSCmdLinkStatus;
+
+ //
+ // 802.11 counter
+@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
+ u32 ReceivedFragmentCount;
+ u32 MulticastReceivedFrameCount;
+ u32 FCSErrorCount;
+-} SDot11MIBCount, *PSDot11MIBCount;
++} __packed SDot11MIBCount, *PSDot11MIBCount;
+
+
+
+@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
+ u32 ullTxBroadcastBytes[2];
+ u32 ullTxMulticastBytes[2];
+ u32 ullTxDirectedBytes[2];
+-} SStatMIBCount, *PSStatMIBCount;
++} __packed SStatMIBCount, *PSStatMIBCount;
+
+ typedef struct tagSCmdValue {
+
+ u32 dwValue;
+
+-} SCmdValue, *PSCmdValue;
++} __packed SCmdValue, *PSCmdValue;
+
+ //
+ // hostapd & viawget ioctl related
+@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
+ u8 ssid[32];
+ } scan_req;
+ } u;
+-};
++} __packed;
+
+ /*--------------------- Export Classes ----------------------------*/
+
+diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
+index 959c886..2522dde 100644
+--- a/drivers/staging/vt6656/iowpa.h
++++ b/drivers/staging/vt6656/iowpa.h
+@@ -67,12 +67,11 @@ enum {
+
+
+
+-#pragma pack(1)
+ typedef struct viawget_wpa_header {
+ u8 type;
+ u16 req_ie_len;
+ u16 resp_ie_len;
+-} viawget_wpa_header;
++} __packed viawget_wpa_header;
+
+ struct viawget_wpa_param {
+ u32 cmd;
+@@ -113,9 +112,8 @@ struct viawget_wpa_param {
+ u8 *buf;
+ } scan_results;
+ } u;
+-};
++} __packed;
+
+-#pragma pack(1)
+ struct viawget_scan_result {
+ u8 bssid[6];
+ u8 ssid[32];
+@@ -130,7 +128,7 @@ struct viawget_scan_result {
+ int noise;
+ int level;
+ int maxrate;
+-};
++} __packed;
+
+ /*--------------------- Export Classes ----------------------------*/
+
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 34d114a..9176b2e 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -4539,7 +4539,7 @@ int transport_send_check_condition_and_sense(
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT COMMUNICATION FAILURE */
+- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
++ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x08;
+ break;
+ }
+ /*
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 3f28fdb..ab0a3fa 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -390,11 +390,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+
+ tport = ft_tport_create(rdata->local_port);
+ if (!tport)
+- return 0; /* not a target for this local port */
++ goto not_target; /* not a target for this local port */
+
+ acl = ft_acl_get(tport->tpg, rdata);
+ if (!acl)
+- return 0;
++ goto not_target; /* no target for this remote */
+
+ if (!rspp)
+ goto fill;
+@@ -431,12 +431,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+
+ /*
+ * OR in our service parameters with other provider (initiator), if any.
+- * TBD XXX - indicate RETRY capability?
+ */
+ fill:
+ fcp_parm = ntohl(spp->spp_params);
++ fcp_parm &= ~FCP_SPPF_RETRY;
+ spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
+ return FC_SPP_RESP_ACK;
++
++not_target:
++ fcp_parm = ntohl(spp->spp_params);
++ fcp_parm &= ~FCP_SPPF_TARG_FCN;
++ spp->spp_params = htonl(fcp_parm);
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 70585b6..90dad17 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -316,6 +316,12 @@ static const struct serial8250_config uart_config[] = {
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR,
+ },
++ [PORT_BRCM_TRUMANAGE] = {
++ .name = "TruManage",
++ .fifo_size = 1,
++ .tx_loadsz = 1024,
++ .flags = UART_CAP_HFIFO,
++ },
+ };
+
+ #if defined(CONFIG_MIPS_ALCHEMY)
+@@ -1511,6 +1517,11 @@ static void transmit_chars(struct uart_8250_port *up)
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
++ if (up->capabilities & UART_CAP_HFIFO) {
++ if ((serial_in(up, UART_LSR) & BOTH_EMPTY) !=
++ BOTH_EMPTY)
++ break;
++ }
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
+index 6edf4a6..902adcd 100644
+--- a/drivers/tty/serial/8250.h
++++ b/drivers/tty/serial/8250.h
+@@ -43,6 +43,7 @@ struct serial8250_config {
+ #define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
+ #define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
+ #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
++#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
+
+ #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
+ #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
+diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250_dw.c
+index bf1fba6..b6278c1 100644
+--- a/drivers/tty/serial/8250_dw.c
++++ b/drivers/tty/serial/8250_dw.c
+@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
+ } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ /* Clear the USR and write the LCR again. */
+ (void)p->serial_in(p, UART_USR);
+- p->serial_out(p, d->last_lcr, UART_LCR);
++ p->serial_out(p, UART_LCR, d->last_lcr);
+
+ return 1;
+ }
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index e7d82c1..a753956 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1077,6 +1077,18 @@ pci_omegapci_setup(struct serial_private *priv,
+ return setup_port(priv, port, 2, idx * 8, 0);
+ }
+
++static int
++pci_brcm_trumanage_setup(struct serial_private *priv,
++ const struct pciserial_board *board,
++ struct uart_port *port, int idx)
++{
++ int ret = pci_default_setup(priv, board, port, idx);
++
++ port->type = PORT_BRCM_TRUMANAGE;
++ port->flags = (port->flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
++ return ret;
++}
++
+ static int skip_tx_en_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+@@ -1138,6 +1150,7 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
+ #define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
+ #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
++#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
+@@ -1672,6 +1685,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ .setup = pci_omegapci_setup,
+ },
+ /*
++ * Broadcom TruManage (NetXtreme)
++ */
++ {
++ .vendor = PCI_VENDOR_ID_BROADCOM,
++ .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_brcm_trumanage_setup,
++ },
++
++ /*
+ * Default "match everything" terminator entry
+ */
+ {
+@@ -1860,6 +1884,7 @@ enum pci_board_num_t {
+ pbn_ce4100_1_115200,
+ pbn_omegapci,
+ pbn_NETMOS9900_2s_115200,
++ pbn_brcm_trumanage,
+ };
+
+ /*
+@@ -2566,6 +2591,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
+ .num_ports = 2,
+ .base_baud = 115200,
+ },
++ [pbn_brcm_trumanage] = {
++ .flags = FL_BASE0,
++ .num_ports = 1,
++ .reg_shift = 2,
++ .base_baud = 115200,
++ },
+ };
+
+ static const struct pci_device_id softmodem_blacklist[] = {
+@@ -4108,6 +4139,13 @@ static struct pci_device_id serial_pci_tbl[] = {
+ pbn_omegapci },
+
+ /*
++ * Broadcom TruManage
++ */
++ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_brcm_trumanage },
++
++ /*
+ * These entries match devices with class COMMUNICATION_SERIAL,
+ * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+ */
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index 426434e..a6a6777 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -552,6 +552,7 @@ static void ifx_port_shutdown(struct tty_port *port)
+ container_of(port, struct ifx_spi_device, tty_port);
+
+ mrdy_set_low(ifx_dev);
++ del_timer(&ifx_dev->spi_timer);
+ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+ tasklet_kill(&ifx_dev->io_work_tasklet);
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index df7f15d..0cdff38 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1509,6 +1509,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
+ .driver_info = NO_UNION_NORMAL,
+ },
++ { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a9a74d2..0ff8e9a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -619,6 +619,60 @@ static int hub_hub_status(struct usb_hub *hub,
+ return ret;
+ }
+
++static int hub_set_port_link_state(struct usb_hub *hub, int port1,
++ unsigned int link_status)
++{
++ return set_port_feature(hub->hdev,
++ port1 | (link_status << 3),
++ USB_PORT_FEAT_LINK_STATE);
++}
++
++/*
++ * If USB 3.0 ports are placed into the Disabled state, they will no longer
++ * detect any device connects or disconnects. This is generally not what the
++ * USB core wants, since it expects a disabled port to produce a port status
++ * change event when a new device connects.
++ *
++ * Instead, set the link state to Disabled, wait for the link to settle into
++ * that state, clear any change bits, and then put the port into the RxDetect
++ * state.
++ */
++static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
++{
++ int ret;
++ int total_time;
++ u16 portchange, portstatus;
++
++ if (!hub_is_superspeed(hub->hdev))
++ return -EINVAL;
++
++ ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
++ if (ret) {
++ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
++ port1, ret);
++ return ret;
++ }
++
++ /* Wait for the link to enter the disabled state. */
++ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_DISABLED)
++ break;
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ break;
++ msleep(HUB_DEBOUNCE_STEP);
++ }
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
++ port1, total_time);
++
++ return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
++}
++
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ {
+ struct usb_device *hdev = hub->hdev;
+@@ -627,8 +681,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ if (hdev->children[port1-1] && set_state)
+ usb_set_device_state(hdev->children[port1-1],
+ USB_STATE_NOTATTACHED);
+- if (!hub->error && !hub_is_superspeed(hub->hdev))
+- ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
++ if (!hub->error) {
++ if (hub_is_superspeed(hub->hdev))
++ ret = hub_usb3_port_disable(hub, port1);
++ else
++ ret = clear_port_feature(hdev, port1,
++ USB_PORT_FEAT_ENABLE);
++ }
+ if (ret)
+ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+ port1, ret);
+@@ -2046,7 +2105,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ #define HUB_SHORT_RESET_TIME 10
+ #define HUB_BH_RESET_TIME 50
+ #define HUB_LONG_RESET_TIME 200
+-#define HUB_RESET_TIMEOUT 500
++#define HUB_RESET_TIMEOUT 800
+
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+@@ -2081,6 +2140,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if (ret < 0)
+ return ret;
+
++ /* The port state is unknown until the reset completes. */
++ if ((portstatus & USB_PORT_STAT_RESET))
++ goto delay;
++
+ /*
+ * Some buggy devices require a warm reset to be issued even
+ * when the port appears not to be connected.
+@@ -2126,11 +2189,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+ return -ENOTCONN;
+
+- /* if we`ve finished resetting, then break out of
+- * the loop
+- */
+- if (!(portstatus & USB_PORT_STAT_RESET) &&
+- (portstatus & USB_PORT_STAT_ENABLE)) {
++ if ((portstatus & USB_PORT_STAT_ENABLE)) {
+ if (hub_is_wusb(hub))
+ udev->speed = USB_SPEED_WIRELESS;
+ else if (hub_is_superspeed(hub->hdev))
+@@ -2144,10 +2203,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ return 0;
+ }
+ } else {
+- if (portchange & USB_PORT_STAT_C_BH_RESET)
+- return 0;
++ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
++ hub_port_warm_reset_required(hub,
++ portstatus))
++ return -ENOTCONN;
++
++ return 0;
+ }
+
++delay:
+ /* switch to the long delay after two short delay failures */
+ if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
+ delay = HUB_LONG_RESET_TIME;
+@@ -2171,14 +2235,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ msleep(10 + 40);
+ update_devnum(udev, 0);
+ hcd = bus_to_hcd(udev->bus);
+- if (hcd->driver->reset_device) {
+- *status = hcd->driver->reset_device(hcd, udev);
+- if (*status < 0) {
+- dev_err(&udev->dev, "Cannot reset "
+- "HCD device state\n");
+- break;
+- }
+- }
++ /* The xHC may think the device is already reset,
++ * so ignore the status.
++ */
++ if (hcd->driver->reset_device)
++ hcd->driver->reset_device(hcd, udev);
+ }
+ /* FALL THROUGH */
+ case -ENOTCONN:
+@@ -2186,16 +2247,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ clear_port_feature(hub->hdev,
+ port1, USB_PORT_FEAT_C_RESET);
+ /* FIXME need disconnect() for NOTATTACHED device */
+- if (warm) {
++ if (hub_is_superspeed(hub->hdev)) {
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+- } else {
++ }
++ if (!warm)
+ usb_set_device_state(udev, *status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+- }
+ break;
+ }
+ }
+@@ -2469,7 +2530,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ static int finish_port_resume(struct usb_device *udev)
+ {
+ int status = 0;
+- u16 devstatus;
++ u16 devstatus = 0;
+
+ /* caller owns the udev device lock */
+ dev_dbg(&udev->dev, "%s\n",
+@@ -2514,7 +2575,13 @@ static int finish_port_resume(struct usb_device *udev)
+ if (status) {
+ dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
+ status);
+- } else if (udev->actconfig) {
++ /*
++ * There are a few quirky devices which violate the standard
++ * by claiming to have remote wakeup enabled after a reset,
++ * which crash if the feature is cleared, hence check for
++ * udev->reset_resume
++ */
++ } else if (udev->actconfig && !udev->reset_resume) {
+ le16_to_cpus(&devstatus);
+ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
+ status = usb_control_msg(udev,
+@@ -3663,9 +3730,14 @@ static void hub_events(void)
+ * SS.Inactive state.
+ */
+ if (hub_port_warm_reset_required(hub, portstatus)) {
++ int status;
++
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- hub_port_reset(hub, i, NULL,
++ status = hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
++ if (status < 0)
++ hub_port_disable(hub, i, 1);
++ connect_change = 0;
+ }
+
+ if (connect_change)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 8b2a9d8..3f08c09 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -38,6 +38,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Creative SB Audigy 2 NX */
+ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Microsoft LifeCam-VX700 v2.0 */
++ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Logitech Webcam C200 */
+ { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 3700aa6..e9637f9 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1277,6 +1277,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
+
+ if (epnum == 0 || epnum == 1) {
+ dep->endpoint.maxpacket = 512;
++ dep->endpoint.maxburst = 1;
+ dep->endpoint.ops = &dwc3_gadget_ep0_ops;
+ if (!epnum)
+ dwc->gadget.ep0 = &dep->endpoint;
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index d584eaf..c7423a7 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -126,10 +126,7 @@ static const char ep0name [] = "ep0";
+ static const char *const ep_name [] = {
+ ep0name, /* everyone has ep0 */
+
+- /* act like a net2280: high speed, six configurable endpoints */
+- "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
+-
+- /* or like pxa250: fifteen fixed function endpoints */
++ /* act like a pxa250: fifteen fixed function endpoints */
+ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
+ "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
+ "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
+@@ -137,6 +134,10 @@ static const char *const ep_name [] = {
+
+ /* or like sa1100: two fixed function endpoints */
+ "ep1out-bulk", "ep2in-bulk",
++
++ /* and now some generic EPs so we have enough in multi config */
++ "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
++ "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
+ };
+ #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
+
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index c8ae199..b6989e4 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -448,6 +448,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ return IRQ_NONE;
+ uhci_writew(uhci, status, USBSTS); /* Clear it */
+
++ spin_lock(&uhci->lock);
++ if (unlikely(!uhci->is_initialized)) /* not yet configured */
++ goto done;
++
+ if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
+ if (status & USBSTS_HSE)
+ dev_err(uhci_dev(uhci), "host system error, "
+@@ -456,7 +460,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ dev_err(uhci_dev(uhci), "host controller process "
+ "error, something bad happened!\n");
+ if (status & USBSTS_HCH) {
+- spin_lock(&uhci->lock);
+ if (uhci->rh_state >= UHCI_RH_RUNNING) {
+ dev_err(uhci_dev(uhci),
+ "host controller halted, "
+@@ -474,15 +477,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ * pending unlinks */
+ mod_timer(&hcd->rh_timer, jiffies);
+ }
+- spin_unlock(&uhci->lock);
+ }
+ }
+
+- if (status & USBSTS_RD)
++ if (status & USBSTS_RD) {
++ spin_unlock(&uhci->lock);
+ usb_hcd_poll_rh_status(hcd);
+- else {
+- spin_lock(&uhci->lock);
++ } else {
+ uhci_scan_schedule(uhci);
++ done:
+ spin_unlock(&uhci->lock);
+ }
+
+@@ -660,9 +663,9 @@ static int uhci_start(struct usb_hcd *hcd)
+ */
+ mb();
+
++ spin_lock_irq(&uhci->lock);
+ configure_hc(uhci);
+ uhci->is_initialized = 1;
+- spin_lock_irq(&uhci->lock);
+ start_rh(uhci);
+ spin_unlock_irq(&uhci->lock);
+ return 0;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 978860b..24107a7 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -725,12 +725,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ break;
+ case USB_PORT_FEAT_LINK_STATE:
+ temp = xhci_readl(xhci, port_array[wIndex]);
++
++ /* Disable port */
++ if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
++ xhci_dbg(xhci, "Disable port %d\n", wIndex);
++ temp = xhci_port_state_to_neutral(temp);
++ /*
++ * Clear all change bits, so that we get a new
++ * connection event.
++ */
++ temp |= PORT_CSC | PORT_PEC | PORT_WRC |
++ PORT_OCC | PORT_RC | PORT_PLC |
++ PORT_CEC;
++ xhci_writel(xhci, temp | PORT_PE,
++ port_array[wIndex]);
++ temp = xhci_readl(xhci, port_array[wIndex]);
++ break;
++ }
++
++ /* Put link in RxDetect (enable port) */
++ if (link_state == USB_SS_PORT_LS_RX_DETECT) {
++ xhci_dbg(xhci, "Enable port %d\n", wIndex);
++ xhci_set_link_state(xhci, port_array, wIndex,
++ link_state);
++ temp = xhci_readl(xhci, port_array[wIndex]);
++ break;
++ }
++
+ /* Software should not attempt to set
+- * port link state above '5' (Rx.Detect) and the port
++ * port link state above '3' (U3) and the port
+ * must be enabled.
+ */
+ if ((temp & PORT_PE) == 0 ||
+- (link_state > USB_SS_PORT_LS_RX_DETECT)) {
++ (link_state > USB_SS_PORT_LS_U3)) {
+ xhci_warn(xhci, "Cannot set link state.\n");
+ goto error;
+ }
+@@ -877,6 +904,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ int max_ports;
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
++ bool reset_change = false;
+
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+@@ -903,6 +931,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ status = 1;
+ }
++ if ((temp & PORT_RC))
++ reset_change = true;
++ }
++ if (!status && !reset_change) {
++ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return status ? retval : 0;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5719c4d..ee5ec11 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1152,6 +1152,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+ static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+ {
++ if (ep->desc.bInterval == 0)
++ return 0;
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval, 0, 15);
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1ba98f5..2ed591d 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1661,6 +1661,15 @@ cleanup:
+ if (bogus_port_status)
+ return;
+
++ /*
++ * xHCI port-status-change events occur when the "or" of all the
++ * status-change bits in the portsc register changes from 0 to 1.
++ * New status changes won't cause an event if any other change
++ * bits are still set. When an event occurs, switch over to
++ * polling to avoid losing status changes.
++ */
++ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ spin_unlock(&xhci->lock);
+ /* Pass this up to the core */
+ usb_hcd_poll_rh_status(hcd);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 9dc5870..53c8be1 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -871,6 +871,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
+
++ /* Don't poll the roothubs on bus suspend. */
++ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ del_timer_sync(&hcd->rh_timer);
++
+ spin_lock_irq(&xhci->lock);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+@@ -1055,6 +1060,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ compliance_mode_recovery_timer_init(xhci);
+
++ /* Re-enable port polling. */
++ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ usb_hcd_poll_rh_status(hcd);
++
+ return retval;
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 920f04e..641caf8 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2372,10 +2372,7 @@ static int __init musb_init(void)
+ if (usb_disabled())
+ return 0;
+
+- pr_info("%s: version " MUSB_VERSION ", "
+- "?dma?"
+- ", "
+- "otg (peripheral+host)",
++ pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
+ musb_driver_name);
+ return platform_driver_probe(&musb_driver, musb_probe);
+ }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 3f989d6..2cc7c18 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -881,6 +881,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
++ /* Crucible Devices */
++ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index aedf65f..dd6edf8 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1259,3 +1259,9 @@
+ * ATI command output: Cinterion MC55i
+ */
+ #define FTDI_CINTERION_MC55I_PID 0xA951
++
++/*
++ * Product: Comet Caller ID decoder
++ * Manufacturer: Crucible Technologies
++ */
++#define FTDI_CT_COMET_PID 0x8e08
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 8a90d58..3de751d 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -558,6 +558,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
+ wait_queue_t wait;
+ unsigned long flags;
+
++ if (!tty)
++ return;
++
+ if (!timeout)
+ timeout = (HZ * EDGE_CLOSING_WAIT)/100;
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 872807b..9db3e23 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -288,6 +288,7 @@ static void option_instat_callback(struct urb *urb);
+ #define ALCATEL_VENDOR_ID 0x1bbb
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
+ #define ALCATEL_PRODUCT_X220_X500D 0x0017
++#define ALCATEL_PRODUCT_L100V 0x011e
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -429,9 +430,12 @@ static void option_instat_callback(struct urb *urb);
+ #define MEDIATEK_VENDOR_ID 0x0e8d
+ #define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+ #define MEDIATEK_PRODUCT_DC_4COM 0x00a5
++#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
+ #define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+ #define MEDIATEK_PRODUCT_7208_1COM 0x7101
+ #define MEDIATEK_PRODUCT_7208_2COM 0x7102
++#define MEDIATEK_PRODUCT_7103_2COM 0x7103
++#define MEDIATEK_PRODUCT_7106_2COM 0x7106
+ #define MEDIATEK_PRODUCT_FP_1COM 0x0003
+ #define MEDIATEK_PRODUCT_FP_2COM 0x0023
+ #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+@@ -441,6 +445,14 @@ static void option_instat_callback(struct urb *urb);
+ #define CELLIENT_VENDOR_ID 0x2692
+ #define CELLIENT_PRODUCT_MEN200 0x9005
+
++/* Hyundai Petatel Inc. products */
++#define PETATEL_VENDOR_ID 0x1ff4
++#define PETATEL_PRODUCT_NP10T 0x600e
++
++/* TP-LINK Incorporated products */
++#define TPLINK_VENDOR_ID 0x2357
++#define TPLINK_PRODUCT_MA180 0x0201
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -922,8 +934,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1190,6 +1204,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+@@ -1294,7 +1310,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 6873bb6..2263144 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -226,6 +226,8 @@ compose_mount_options_out:
+ compose_mount_options_err:
+ kfree(mountdata);
+ mountdata = ERR_PTR(rc);
++ kfree(*devname);
++ *devname = NULL;
+ goto compose_mount_options_out;
+ }
+
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index a86873e..31df53e 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -289,11 +289,31 @@ out_nofree:
+ return mnt;
+ }
+
++static int
++nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
++{
++ if (NFS_FH(dentry->d_inode)->size != 0)
++ return nfs_getattr(mnt, dentry, stat);
++ generic_fillattr(dentry->d_inode, stat);
++ return 0;
++}
++
++static int
++nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ if (NFS_FH(dentry->d_inode)->size != 0)
++ return nfs_setattr(dentry, attr);
++ return -EACCES;
++}
++
+ const struct inode_operations nfs_mountpoint_inode_operations = {
+ .getattr = nfs_getattr,
++ .setattr = nfs_setattr,
+ };
+
+ const struct inode_operations nfs_referral_inode_operations = {
++ .getattr = nfs_namespace_getattr,
++ .setattr = nfs_namespace_setattr,
+ };
+
+ static void nfs_expire_automounts(struct work_struct *work)
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 07354b7..b2e1136 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1583,8 +1583,18 @@ static int nfs4_reset_session(struct nfs_client *clp)
+
+ nfs4_begin_drain_session(clp);
+ status = nfs4_proc_destroy_session(clp->cl_session);
+- if (status && status != -NFS4ERR_BADSESSION &&
+- status != -NFS4ERR_DEADSESSION) {
++ switch (status) {
++ case 0:
++ case -NFS4ERR_BADSESSION:
++ case -NFS4ERR_DEADSESSION:
++ break;
++ case -NFS4ERR_BACK_CHAN_BUSY:
++ case -NFS4ERR_DELAY:
++ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
++ status = 0;
++ ssleep(1);
++ goto out;
++ default:
+ status = nfs4_recovery_handle_error(clp, status);
+ goto out;
+ }
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 574d4ee..b367581 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -88,11 +88,11 @@ xfs_destroy_ioend(
+ }
+
+ if (ioend->io_iocb) {
++ inode_dio_done(ioend->io_inode);
+ if (ioend->io_isasync) {
+ aio_complete(ioend->io_iocb, ioend->io_error ?
+ ioend->io_error : ioend->io_result, 0);
+ }
+- inode_dio_done(ioend->io_inode);
+ }
+
+ mempool_free(ioend, xfs_ioend_pool);
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 1328d8c..1721c41 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -364,17 +364,30 @@ extern int __init efi_setup_pcdp_console(char *);
+ #endif
+
+ /*
+- * We play games with efi_enabled so that the compiler will, if possible, remove
+- * EFI-related code altogether.
++ * We play games with efi_enabled so that the compiler will, if
++ * possible, remove EFI-related code altogether.
+ */
++#define EFI_BOOT 0 /* Were we booted from EFI? */
++#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
++#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
++#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
++#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
++#define EFI_64BIT 5 /* Is the firmware 64-bit? */
++
+ #ifdef CONFIG_EFI
+ # ifdef CONFIG_X86
+- extern int efi_enabled;
++extern int efi_enabled(int facility);
+ # else
+-# define efi_enabled 1
++static inline int efi_enabled(int facility)
++{
++ return 1;
++}
+ # endif
+ #else
+-# define efi_enabled 0
++static inline int efi_enabled(int facility)
++{
++ return 0;
++}
+ #endif
+
+ /*
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index eadf33d..8bec265 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -47,7 +47,8 @@
+ #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
+ #define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
+ #define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+-#define PORT_MAX_8250 21 /* max port ID */
++#define PORT_BRCM_TRUMANAGE 22
++#define PORT_MAX_8250 22 /* max port ID */
+
+ /*
+ * ARM specific type numbers. These are not currently guaranteed
+diff --git a/include/linux/syslog.h b/include/linux/syslog.h
+index 3891139..ce4c665 100644
+--- a/include/linux/syslog.h
++++ b/include/linux/syslog.h
+@@ -47,6 +47,12 @@
+ #define SYSLOG_FROM_CALL 0
+ #define SYSLOG_FROM_FILE 1
+
++/*
++ * Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>'
++ * See RFC5424 for details
++*/
++#define SYSLOG_PRI_MAX_LENGTH 5
++
+ int do_syslog(int type, char __user *buf, int count, bool from_file);
+
+ #endif /* _LINUX_SYSLOG_H */
+diff --git a/init/main.c b/init/main.c
+index cb08fea2..5d0eb1d 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -606,7 +606,7 @@ asmlinkage void __init start_kernel(void)
+ pidmap_init();
+ anon_vma_init();
+ #ifdef CONFIG_X86
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_enter_virtual_mode();
+ #endif
+ thread_info_cache_init();
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 7982a0a..c0d12ea 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -633,8 +633,19 @@ static void call_console_drivers(unsigned start, unsigned end)
+ start_print = start;
+ while (cur_index != end) {
+ if (msg_level < 0 && ((end - cur_index) > 2)) {
++ /*
++ * prepare buf_prefix, as a contiguous array,
++ * to be processed by log_prefix function
++ */
++ char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1];
++ unsigned i;
++ for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) {
++ buf_prefix[i] = LOG_BUF(cur_index + i);
++ }
++ buf_prefix[i] = '\0'; /* force '\0' as last string character */
++
+ /* strip log prefix */
+- cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL);
++ cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL);
+ start_print = cur_index;
+ }
+ while (cur_index != end) {
+diff --git a/kernel/smp.c b/kernel/smp.c
+index db197d6..9e800b2 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -31,6 +31,7 @@ struct call_function_data {
+ struct call_single_data csd;
+ atomic_t refs;
+ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_ipi;
+ };
+
+ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
+@@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return notifier_from_errno(-ENOMEM);
++ if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
++ cpu_to_node(cpu)))
++ return notifier_from_errno(-ENOMEM);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_cpumask_var(cfd->cpumask);
++ free_cpumask_var(cfd->cpumask_ipi);
+ break;
+ #endif
+ };
+@@ -524,6 +529,12 @@ void smp_call_function_many(const struct cpumask *mask,
+ return;
+ }
+
++ /*
++ * After we put an entry into the list, data->cpumask
++ * may be cleared again when another CPU sends another IPI for
++ * a SMP function call, so data->cpumask will be zero.
++ */
++ cpumask_copy(data->cpumask_ipi, data->cpumask);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
+ /*
+ * Place entry at the _HEAD_ of the list, so that any cpu still
+@@ -547,7 +558,7 @@ void smp_call_function_many(const struct cpumask *mask,
+ smp_mb();
+
+ /* Send a message to all CPUs in the map */
+- arch_send_call_function_ipi_mask(data->cpumask);
++ arch_send_call_function_ipi_mask(data->cpumask_ipi);
+
+ /* Optionally wait for the CPUs to complete */
+ if (wait)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 54dba59..4b1a96b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3482,7 +3482,7 @@ static int ftrace_module_notify(struct notifier_block *self,
+
+ struct notifier_block ftrace_module_nb = {
+ .notifier_call = ftrace_module_notify,
+- .priority = 0,
++ .priority = INT_MAX, /* Run before anything that can use kprobes */
+ };
+
+ extern unsigned long __start_mcount_loc[];
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 46973fb..5f8ec82 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -714,14 +714,12 @@ static int compact_node(int nid)
+ }
+
+ /* Compact all nodes in the system */
+-static int compact_nodes(void)
++static void compact_nodes(void)
+ {
+ int nid;
+
+ for_each_online_node(nid)
+ compact_node(nid);
+-
+- return COMPACT_COMPLETE;
+ }
+
+ /* The written value is actually unused, all memory is compacted */
+@@ -732,7 +730,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+ {
+ if (write)
+- return compact_nodes();
++ compact_nodes();
+
+ return 0;
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a88dded..4d3a697 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5532,7 +5532,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
+ pfn &= (PAGES_PER_SECTION-1);
+ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
+ #else
+- pfn = pfn - zone->zone_start_pfn;
++ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
+ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
+ #endif /* CONFIG_SPARSEMEM */
+ }
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 075a3e9..0274157 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -945,7 +945,7 @@ static int hidp_setup_hid(struct hidp_session *session,
+ hid->version = req->version;
+ hid->country = req->country;
+
+- strncpy(hid->name, req->name, 128);
++ strncpy(hid->name, req->name, sizeof(req->name) - 1);
+ strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
+ strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
+
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 1c775f0..488600c 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1021,7 +1021,7 @@ void sta_info_init(struct ieee80211_local *local)
+
+ void sta_info_stop(struct ieee80211_local *local)
+ {
+- del_timer(&local->sta_cleanup);
++ del_timer_sync(&local->sta_cleanup);
+ sta_info_flush(local, NULL);
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 56c3f85..18c5a50 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -918,8 +918,7 @@ static void rpc_async_release(struct work_struct *work)
+
+ static void rpc_release_resources_task(struct rpc_task *task)
+ {
+- if (task->tk_rqstp)
+- xprt_release(task);
++ xprt_release(task);
+ if (task->tk_msg.rpc_cred) {
+ put_rpccred(task->tk_msg.rpc_cred);
+ task->tk_msg.rpc_cred = NULL;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index ffba207..6c91208 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1132,10 +1132,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+ void xprt_release(struct rpc_task *task)
+ {
+ struct rpc_xprt *xprt;
+- struct rpc_rqst *req;
++ struct rpc_rqst *req = task->tk_rqstp;
+
+- if (!(req = task->tk_rqstp))
++ if (req == NULL) {
++ if (task->tk_client) {
++ rcu_read_lock();
++ xprt = rcu_dereference(task->tk_client->cl_xprt);
++ if (xprt->snd_task == task)
++ xprt_release_write(xprt, task);
++ rcu_read_unlock();
++ }
+ return;
++ }
+
+ xprt = req->rq_xprt;
+ rpc_count_iostats(task);
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index 8738def..e76a470 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -175,9 +175,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+- }
+- else if (rc == -ENODATA)
++ } else if (rc == -ENODATA && inode->i_op->removexattr) {
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
++ }
+ return rc;
+ }
+
+diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
+index d1aa421..52a4318 100644
+--- a/sound/arm/pxa2xx-ac97-lib.c
++++ b/sound/arm/pxa2xx-ac97-lib.c
+@@ -17,6 +17,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/module.h>
++#include <linux/gpio.h>
+
+ #include <sound/ac97_codec.h>
+ #include <sound/pxa2xx-lib.h>
+@@ -147,6 +148,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
+
+ static inline void pxa_ac97_cold_pxa27x(void)
+ {
++ unsigned int timeout;
++
+ GCR &= GCR_COLD_RST; /* clear everything but nCRST */
+ GCR &= ~GCR_COLD_RST; /* then assert nCRST */
+
+@@ -156,8 +159,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
+ clk_enable(ac97conf_clk);
+ udelay(5);
+ clk_disable(ac97conf_clk);
+- GCR = GCR_COLD_RST;
+- udelay(50);
++ GCR = GCR_COLD_RST | GCR_WARM_RST;
++ timeout = 100; /* wait for the codec-ready bit to be set */
++ while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
++ mdelay(1);
+ }
+ #endif
+
+@@ -339,8 +344,21 @@ int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev)
+ }
+
+ if (cpu_is_pxa27x()) {
+- /* Use GPIO 113 as AC97 Reset on Bulverde */
++ /*
++ * This gpio is needed for a work-around to a bug in the ac97
++ * controller during warm reset. The direction and level is set
++ * here so that it is an output driven high when switching from
++ * AC97_nRESET alt function to generic gpio.
++ */
++ ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
++ "pxa27x ac97 reset");
++ if (ret < 0) {
++ pr_err("%s: gpio_request_one() failed: %d\n",
++ __func__, ret);
++ goto err_conf;
++ }
+ pxa27x_assert_ac97reset(reset_gpio, 0);
++
+ ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
+ if (IS_ERR(ac97conf_clk)) {
+ ret = PTR_ERR(ac97conf_clk);
+@@ -383,6 +401,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
+
+ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
+ {
++ if (cpu_is_pxa27x())
++ gpio_free(reset_gpio);
+ GCR |= GCR_ACLINK_OFF;
+ free_irq(IRQ_AC97, NULL);
+ if (ac97conf_clk) {
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a1e312f..a166a85 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -569,29 +569,43 @@ static char *driver_short_names[] __devinitdata = {
+ #define get_azx_dev(substream) (substream->runtime->private_data)
+
+ #ifdef CONFIG_X86
+-static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on)
++static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
+ {
++ int pages;
++
+ if (azx_snoop(chip))
+ return;
+- if (addr && size) {
+- int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (!dmab || !dmab->area || !dmab->bytes)
++ return;
++
++#ifdef CONFIG_SND_DMA_SGBUF
++ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
++ struct snd_sg_buf *sgbuf = dmab->private_data;
+ if (on)
+- set_memory_wc((unsigned long)addr, pages);
++ set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
+ else
+- set_memory_wb((unsigned long)addr, pages);
++ set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
++ return;
+ }
++#endif
++
++ pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (on)
++ set_memory_wc((unsigned long)dmab->area, pages);
++ else
++ set_memory_wb((unsigned long)dmab->area, pages);
+ }
+
+ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
+ bool on)
+ {
+- __mark_pages_wc(chip, buf->area, buf->bytes, on);
++ __mark_pages_wc(chip, buf, on);
+ }
+ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
+- struct snd_pcm_runtime *runtime, bool on)
++ struct snd_pcm_substream *substream, bool on)
+ {
+ if (azx_dev->wc_marked != on) {
+- __mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on);
++ __mark_pages_wc(chip, substream->runtime->dma_buffer_p, on);
+ azx_dev->wc_marked = on;
+ }
+ }
+@@ -602,7 +616,7 @@ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
+ {
+ }
+ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
+- struct snd_pcm_runtime *runtime, bool on)
++ struct snd_pcm_substream *substream, bool on)
+ {
+ }
+ #endif
+@@ -1776,11 +1790,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
+ {
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx *chip = apcm->chip;
+- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ int ret;
+
+- mark_runtime_wc(chip, azx_dev, runtime, false);
++ mark_runtime_wc(chip, azx_dev, substream, false);
+ azx_dev->bufsize = 0;
+ azx_dev->period_bytes = 0;
+ azx_dev->format_val = 0;
+@@ -1788,7 +1801,7 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
+- mark_runtime_wc(chip, azx_dev, runtime, true);
++ mark_runtime_wc(chip, azx_dev, substream, true);
+ return ret;
+ }
+
+@@ -1797,7 +1810,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ struct azx *chip = apcm->chip;
+- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+
+ /* reset BDL address */
+@@ -1810,7 +1822,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
+
+ snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
+
+- mark_runtime_wc(chip, azx_dev, runtime, false);
++ mark_runtime_wc(chip, azx_dev, substream, false);
+ return snd_pcm_lib_free_pages(substream);
+ }
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 498b62e..c9269ce 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -592,24 +592,12 @@ static int conexant_build_controls(struct hda_codec *codec)
+ return 0;
+ }
+
+-#ifdef CONFIG_SND_HDA_POWER_SAVE
+-static int conexant_suspend(struct hda_codec *codec, pm_message_t state)
+-{
+- snd_hda_shutup_pins(codec);
+- return 0;
+-}
+-#endif
+-
+ static const struct hda_codec_ops conexant_patch_ops = {
+ .build_controls = conexant_build_controls,
+ .build_pcms = conexant_build_pcms,
+ .init = conexant_init,
+ .free = conexant_free,
+ .set_power_state = conexant_set_power,
+-#ifdef CONFIG_SND_HDA_POWER_SAVE
+- .suspend = conexant_suspend,
+-#endif
+- .reboot_notify = snd_hda_shutup_pins,
+ };
+
+ #ifdef CONFIG_SND_HDA_INPUT_BEEP
+@@ -4429,10 +4417,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
+ .init = cx_auto_init,
+ .free = conexant_free,
+ .unsol_event = cx_auto_unsol_event,
+-#ifdef CONFIG_SND_HDA_POWER_SAVE
+- .suspend = conexant_suspend,
+-#endif
+- .reboot_notify = snd_hda_shutup_pins,
+ };
+
+ /*
+@@ -4614,6 +4598,18 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
+ .patch = patch_conexant_auto },
+ { .id = 0x14f150b9, .name = "CX20665",
+ .patch = patch_conexant_auto },
++ { .id = 0x14f1510f, .name = "CX20751/2",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15110, .name = "CX20751/2",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15111, .name = "CX20753/4",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15113, .name = "CX20755",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15114, .name = "CX20756",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15115, .name = "CX20757",
++ .patch = patch_conexant_auto },
+ {} /* terminator */
+ };
+
+@@ -4634,6 +4630,12 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
+ MODULE_ALIAS("snd-hda-codec-id:14f150ac");
+ MODULE_ALIAS("snd-hda-codec-id:14f150b8");
+ MODULE_ALIAS("snd-hda-codec-id:14f150b9");
++MODULE_ALIAS("snd-hda-codec-id:14f1510f");
++MODULE_ALIAS("snd-hda-codec-id:14f15110");
++MODULE_ALIAS("snd-hda-codec-id:14f15111");
++MODULE_ALIAS("snd-hda-codec-id:14f15113");
++MODULE_ALIAS("snd-hda-codec-id:14f15114");
++MODULE_ALIAS("snd-hda-codec-id:14f15115");
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Conexant HD-audio codec");
+diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
+index a3b9cbb..ba03dc2 100644
+--- a/sound/soc/codecs/wm2000.c
++++ b/sound/soc/codecs/wm2000.c
+@@ -224,9 +224,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
+
+ ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
+ if (wm2000->speech_clarity)
+- ret &= ~WM2000_SPEECH_CLARITY;
+- else
+ ret |= WM2000_SPEECH_CLARITY;
++ else
++ ret &= ~WM2000_SPEECH_CLARITY;
+ wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
+
+ wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
+diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
+index 42d9039..a0cda1b 100644
+--- a/sound/soc/codecs/wm5100.c
++++ b/sound/soc/codecs/wm5100.c
+@@ -1446,15 +1446,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ case SND_SOC_DAIFMT_DSP_A:
+ mask = 0;
+ break;
+- case SND_SOC_DAIFMT_DSP_B:
+- mask = 1;
+- break;
+ case SND_SOC_DAIFMT_I2S:
+ mask = 2;
+ break;
+- case SND_SOC_DAIFMT_LEFT_J:
+- mask = 3;
+- break;
+ default:
+ dev_err(codec->dev, "Unsupported DAI format %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 24c5114..9ab2b3e 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -148,10 +148,8 @@ void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
+ int i;
+
+ /* stop urbs (to be sure) */
+- if (!subs->stream->chip->shutdown) {
+- deactivate_urbs(subs, force, 1);
+- wait_clear_urbs(subs);
+- }
++ deactivate_urbs(subs, force, 1);
++ wait_clear_urbs(subs);
+
+ for (i = 0; i < MAX_URBS; i++)
+ release_urb_ctx(&subs->dataurb[i]);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 6730a33..9121dee 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1239,16 +1239,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
+ }
+ channels = (hdr->bLength - 7) / csize - 1;
+ bmaControls = hdr->bmaControls;
++ if (hdr->bLength < 7 + csize) {
++ snd_printk(KERN_ERR "usbaudio: unit %u: "
++ "invalid UAC_FEATURE_UNIT descriptor\n",
++ unitid);
++ return -EINVAL;
++ }
+ } else {
+ struct uac2_feature_unit_descriptor *ftr = _ftr;
+ csize = 4;
+ channels = (hdr->bLength - 6) / 4 - 1;
+ bmaControls = ftr->bmaControls;
+- }
+-
+- if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) {
+- snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid);
+- return -EINVAL;
++ if (hdr->bLength < 6 + csize) {
++ snd_printk(KERN_ERR "usbaudio: unit %u: "
++ "invalid UAC_FEATURE_UNIT descriptor\n",
++ unitid);
++ return -EINVAL;
++ }
+ }
+
+ /* parse the source unit */
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a3ddac0..1b275f0 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -383,11 +383,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
+ * rules
+ */
+ err = usb_driver_set_configuration(dev, 2);
+- if (err < 0) {
++ if (err < 0)
+ snd_printdd("error usb_driver_set_configuration: %d\n",
+ err);
+- return -ENODEV;
+- }
++ /* Always return an error, so that we stop creating a device
++ that will just be destroyed and recreated with a new
++ configuration */
++ return -ENODEV;
+ } else
+ snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
+
diff --git a/3.2.54/1038_linux-3.2.39.patch b/3.2.54/1038_linux-3.2.39.patch
new file mode 100644
index 0000000..5639e92
--- /dev/null
+++ b/3.2.54/1038_linux-3.2.39.patch
@@ -0,0 +1,2660 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 82d7fa6..83f156e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2584,7 +2584,7 @@ S: Maintained
+ F: drivers/net/ethernet/i825xx/eexpress.*
+
+ ETHERNET BRIDGE
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: bridge@lists.linux-foundation.org
+ L: netdev@vger.kernel.org
+ W: http://www.linuxfoundation.org/en/Net:Bridge
+@@ -4475,7 +4475,7 @@ S: Supported
+ F: drivers/infiniband/hw/nes/
+
+ NETEM NETWORK EMULATOR
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: netem@lists.linux-foundation.org
+ S: Maintained
+ F: net/sched/sch_netem.c
+@@ -5993,7 +5993,7 @@ S: Maintained
+ F: drivers/usb/misc/sisusbvga/
+
+ SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/ethernet/marvell/sk*
+diff --git a/Makefile b/Makefile
+index c8c9d02..0fceb8b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index a6253ec..95b4eb3 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -208,7 +208,7 @@ sysexit_from_sys_call:
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+- sti
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%esi /* second arg, syscall return value */
+ cmpl $0,%eax /* is it < 0? */
+ setl %al /* 1 if so, 0 if not */
+@@ -218,7 +218,7 @@ sysexit_from_sys_call:
+ GET_THREAD_INFO(%r10)
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+- cli
++ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+ testl %edi,TI_flags(%r10)
+ jz \exit
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index c346d11..d4f278e 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -157,6 +157,34 @@ static int enable_single_step(struct task_struct *child)
+ return 1;
+ }
+
++static void set_task_blockstep(struct task_struct *task, bool on)
++{
++ unsigned long debugctl;
++
++ /*
++ * Ensure irq/preemption can't change debugctl in between.
++ * Note also that both TIF_BLOCKSTEP and debugctl should
++ * be changed atomically wrt preemption.
++ *
++ * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
++ * task is current or it can't be running, otherwise we can race
++ * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
++ * PTRACE_KILL is not safe.
++ */
++ local_irq_disable();
++ debugctl = get_debugctlmsr();
++ if (on) {
++ debugctl |= DEBUGCTLMSR_BTF;
++ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
++ } else {
++ debugctl &= ~DEBUGCTLMSR_BTF;
++ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
++ }
++ if (task == current)
++ update_debugctlmsr(debugctl);
++ local_irq_enable();
++}
++
+ /*
+ * Enable single or block step.
+ */
+@@ -169,19 +197,10 @@ static void enable_step(struct task_struct *child, bool block)
+ * So no one should try to use debugger block stepping in a program
+ * that uses user-mode single stepping itself.
+ */
+- if (enable_single_step(child) && block) {
+- unsigned long debugctl = get_debugctlmsr();
+-
+- debugctl |= DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
+- set_tsk_thread_flag(child, TIF_BLOCKSTEP);
+- } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+- unsigned long debugctl = get_debugctlmsr();
+-
+- debugctl &= ~DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
+- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+- }
++ if (enable_single_step(child) && block)
++ set_task_blockstep(child, true);
++ else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
++ set_task_blockstep(child, false);
+ }
+
+ void user_enable_single_step(struct task_struct *child)
+@@ -199,13 +218,8 @@ void user_disable_single_step(struct task_struct *child)
+ /*
+ * Make sure block stepping (BTF) is disabled.
+ */
+- if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+- unsigned long debugctl = get_debugctlmsr();
+-
+- debugctl &= ~DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
+- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+- }
++ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
++ set_task_blockstep(child, false);
+
+ /* Always clear TIF_SINGLESTEP... */
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index b040b0e..7328f71 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -88,11 +88,11 @@ ENTRY(xen_iret)
+ */
+ #ifdef CONFIG_SMP
+ GET_THREAD_INFO(%eax)
+- movl TI_cpu(%eax), %eax
+- movl __per_cpu_offset(,%eax,4), %eax
+- mov xen_vcpu(%eax), %eax
++ movl %ss:TI_cpu(%eax), %eax
++ movl %ss:__per_cpu_offset(,%eax,4), %eax
++ mov %ss:xen_vcpu(%eax), %eax
+ #else
+- movl xen_vcpu, %eax
++ movl %ss:xen_vcpu, %eax
+ #endif
+
+ /* check IF state we're restoring */
+@@ -105,11 +105,11 @@ ENTRY(xen_iret)
+ * resuming the code, so we don't have to be worried about
+ * being preempted to another CPU.
+ */
+- setz XEN_vcpu_info_mask(%eax)
++ setz %ss:XEN_vcpu_info_mask(%eax)
+ xen_iret_start_crit:
+
+ /* check for unmasked and pending */
+- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
++ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
+
+ /*
+ * If there's something pending, mask events again so we can
+@@ -117,7 +117,7 @@ xen_iret_start_crit:
+ * touch XEN_vcpu_info_mask.
+ */
+ jne 1f
+- movb $1, XEN_vcpu_info_mask(%eax)
++ movb $1, %ss:XEN_vcpu_info_mask(%eax)
+
+ 1: popl %eax
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index b07edc4..62c1325 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -52,7 +52,9 @@
+ #define DRV_VERSION "3.0"
+
+ enum {
+- AHCI_PCI_BAR = 5,
++ AHCI_PCI_BAR_STA2X11 = 0,
++ AHCI_PCI_BAR_ENMOTUS = 2,
++ AHCI_PCI_BAR_STANDARD = 5,
+ };
+
+ enum board_ids {
+@@ -375,6 +377,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
+ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+
++ /* ST Microelectronics */
++ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
++
+ /* Marvell */
+ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
+ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+@@ -400,6 +405,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
++ /* Enmotus */
++ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
++
+ /* Generic, PCI class code for AHCI */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+@@ -629,6 +637,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+ {
+ int rc;
+
++ /*
++ * If the device fixup already set the dma_mask to some non-standard
++ * value, don't extend it here. This happens on STA2X11, for example.
++ */
++ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
++ return 0;
++
+ if (using_dac &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+@@ -1033,6 +1048,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ int n_ports, i, rc;
++ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
+
+ VPRINTK("ENTER\n");
+
+@@ -1064,6 +1080,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ dev_info(&pdev->dev,
+ "PDC42819 can only drive SATA devices with this driver\n");
+
++ /* Both Connext and Enmotus devices use non-standard BARs */
++ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
++ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
++ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
++ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
++
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+@@ -1072,7 +1094,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
++ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+@@ -1115,7 +1137,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
+ pci_intx(pdev, 1);
+
+- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
++ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+@@ -1179,8 +1201,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
++ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
++ ata_port_pbar_desc(ap, ahci_pci_bar,
+ 0x100 + ap->port_no * 0x80, "port");
+
+ /* set enclosure management message type */
+diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
+index 6a0955e..53ecac5 100644
+--- a/drivers/atm/iphase.h
++++ b/drivers/atm/iphase.h
+@@ -636,82 +636,82 @@ struct rx_buf_desc {
+ #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
+ #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
+
+-typedef volatile u_int freg_t;
++typedef volatile u_int ffreg_t;
+ typedef u_int rreg_t;
+
+ typedef struct _ffredn_t {
+- freg_t idlehead_high; /* Idle cell header (high) */
+- freg_t idlehead_low; /* Idle cell header (low) */
+- freg_t maxrate; /* Maximum rate */
+- freg_t stparms; /* Traffic Management Parameters */
+- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+- freg_t rm_type; /* */
+- u_int filler5[0x17 - 0x06];
+- freg_t cmd_reg; /* Command register */
+- u_int filler18[0x20 - 0x18];
+- freg_t cbr_base; /* CBR Pointer Base */
+- freg_t vbr_base; /* VBR Pointer Base */
+- freg_t abr_base; /* ABR Pointer Base */
+- freg_t ubr_base; /* UBR Pointer Base */
+- u_int filler24;
+- freg_t vbrwq_base; /* VBR Wait Queue Base */
+- freg_t abrwq_base; /* ABR Wait Queue Base */
+- freg_t ubrwq_base; /* UBR Wait Queue Base */
+- freg_t vct_base; /* Main VC Table Base */
+- freg_t vcte_base; /* Extended Main VC Table Base */
+- u_int filler2a[0x2C - 0x2A];
+- freg_t cbr_tab_beg; /* CBR Table Begin */
+- freg_t cbr_tab_end; /* CBR Table End */
+- freg_t cbr_pointer; /* CBR Pointer */
+- u_int filler2f[0x30 - 0x2F];
+- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
+- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
+- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+- u_int filler38[0x40 - 0x38];
+- freg_t queue_base; /* Base address for PRQ and TCQ */
+- freg_t desc_base; /* Base address of descriptor table */
+- u_int filler42[0x45 - 0x42];
+- freg_t mode_reg_0; /* Mode register 0 */
+- freg_t mode_reg_1; /* Mode register 1 */
+- freg_t intr_status_reg;/* Interrupt Status register */
+- freg_t mask_reg; /* Mask Register */
+- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
+- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+- freg_t state_reg; /* Status register */
+- u_int filler4c[0x58 - 0x4c];
+- freg_t curr_desc_num; /* Contains the current descriptor num */
+- freg_t next_desc; /* Next descriptor */
+- freg_t next_vc; /* Next VC */
+- u_int filler5b[0x5d - 0x5b];
+- freg_t present_slot_cnt;/* Present slot count */
+- u_int filler5e[0x6a - 0x5e];
+- freg_t new_desc_num; /* New descriptor number */
+- freg_t new_vc; /* New VC */
+- freg_t sched_tbl_ptr; /* Schedule table pointer */
+- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
+- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
+- freg_t abrwq_wptr; /* ABR wait queue write pointer */
+- freg_t abrwq_rptr; /* ABR wait queue read pointer */
+- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
+- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
+- freg_t cbr_vc; /* CBR VC */
+- freg_t vbr_sb_vc; /* VBR SB VC */
+- freg_t abr_sb_vc; /* ABR SB VC */
+- freg_t ubr_sb_vc; /* UBR SB VC */
+- freg_t vbr_next_link; /* VBR next link */
+- freg_t abr_next_link; /* ABR next link */
+- freg_t ubr_next_link; /* UBR next link */
+- u_int filler7a[0x7c-0x7a];
+- freg_t out_rate_head; /* Out of rate head */
+- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
++ ffreg_t idlehead_high; /* Idle cell header (high) */
++ ffreg_t idlehead_low; /* Idle cell header (low) */
++ ffreg_t maxrate; /* Maximum rate */
++ ffreg_t stparms; /* Traffic Management Parameters */
++ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
++ ffreg_t rm_type; /* */
++ u_int filler5[0x17 - 0x06];
++ ffreg_t cmd_reg; /* Command register */
++ u_int filler18[0x20 - 0x18];
++ ffreg_t cbr_base; /* CBR Pointer Base */
++ ffreg_t vbr_base; /* VBR Pointer Base */
++ ffreg_t abr_base; /* ABR Pointer Base */
++ ffreg_t ubr_base; /* UBR Pointer Base */
++ u_int filler24;
++ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
++ ffreg_t abrwq_base; /* ABR Wait Queue Base */
++ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
++ ffreg_t vct_base; /* Main VC Table Base */
++ ffreg_t vcte_base; /* Extended Main VC Table Base */
++ u_int filler2a[0x2C - 0x2A];
++ ffreg_t cbr_tab_beg; /* CBR Table Begin */
++ ffreg_t cbr_tab_end; /* CBR Table End */
++ ffreg_t cbr_pointer; /* CBR Pointer */
++ u_int filler2f[0x30 - 0x2F];
++ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
++ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
++ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
++ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
++ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
++ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
++ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
++ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
++ u_int filler38[0x40 - 0x38];
++ ffreg_t queue_base; /* Base address for PRQ and TCQ */
++ ffreg_t desc_base; /* Base address of descriptor table */
++ u_int filler42[0x45 - 0x42];
++ ffreg_t mode_reg_0; /* Mode register 0 */
++ ffreg_t mode_reg_1; /* Mode register 1 */
++ ffreg_t intr_status_reg;/* Interrupt Status register */
++ ffreg_t mask_reg; /* Mask Register */
++ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
++ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
++ ffreg_t state_reg; /* Status register */
++ u_int filler4c[0x58 - 0x4c];
++ ffreg_t curr_desc_num; /* Contains the current descriptor num */
++ ffreg_t next_desc; /* Next descriptor */
++ ffreg_t next_vc; /* Next VC */
++ u_int filler5b[0x5d - 0x5b];
++ ffreg_t present_slot_cnt;/* Present slot count */
++ u_int filler5e[0x6a - 0x5e];
++ ffreg_t new_desc_num; /* New descriptor number */
++ ffreg_t new_vc; /* New VC */
++ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
++ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
++ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
++ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
++ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
++ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
++ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
++ ffreg_t cbr_vc; /* CBR VC */
++ ffreg_t vbr_sb_vc; /* VBR SB VC */
++ ffreg_t abr_sb_vc; /* ABR SB VC */
++ ffreg_t ubr_sb_vc; /* UBR SB VC */
++ ffreg_t vbr_next_link; /* VBR next link */
++ ffreg_t abr_next_link; /* ABR next link */
++ ffreg_t ubr_next_link; /* UBR next link */
++ u_int filler7a[0x7c-0x7a];
++ ffreg_t out_rate_head; /* Out of rate head */
++ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
++ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
++ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
++ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ } ffredn_t;
+
+ typedef struct _rfredn_t {
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 8e3c46d..7795d1e 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1789,7 +1789,8 @@ static void virtcons_remove(struct virtio_device *vdev)
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
+- cancel_work_sync(&portdev->control_work);
++ if (use_multiport(portdev))
++ cancel_work_sync(&portdev->control_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index c05e825..7817429 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7156,8 +7156,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ OUT_RING(pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+-
+- intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7193,6 +7191,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+ pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
++
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 1b98338..ec36dd9 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -2455,6 +2455,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
++ /* RV100 board with external TDMS bit mis-set.
++ * Actually uses internal TMDS, clear the bit.
++ */
++ if (dev->pdev->device == 0x5159 &&
++ dev->pdev->subsystem_vendor == 0x1014 &&
++ dev->pdev->subsystem_device == 0x029A) {
++ tmp &= ~(1 << 4);
++ }
+ if ((tmp >> 4) & 0x1) {
+ devices |= ATOM_DEVICE_DFP2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index aec8e0c..63e7143 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1110,8 +1110,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ }
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+- if (radeon_fb == NULL)
++ if (radeon_fb == NULL) {
++ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 49d5820..65be5e8 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -306,6 +306,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+ {
+ int r;
+
++ /* make sure we aren't trying to allocate more space than there is on the ring */
++ if (ndw > (rdev->cp.ring_size / 4))
++ return -ENOMEM;
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 2d41336..c15c38e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -278,6 +278,9 @@
+ #define USB_VENDOR_ID_EZKEY 0x0518
+ #define USB_DEVICE_ID_BTC_8193 0x0002
+
++#define USB_VENDOR_ID_FORMOSA 0x147a
++#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
++
+ #define USB_VENDOR_ID_FREESCALE 0x15A2
+ #define USB_DEVICE_ID_FREESCALE_MX28 0x004F
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index aec3fa3..e26eddf 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
+index fd17bb3..08c2329 100644
+--- a/drivers/isdn/gigaset/capi.c
++++ b/drivers/isdn/gigaset/capi.c
+@@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
+ CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
+ CAPIMSG_CONTROL(data));
+ l -= 12;
++ if (l <= 0)
++ return;
+ dbgline = kmalloc(3*l, GFP_ATOMIC);
+ if (!dbgline)
+ return;
+diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
+index 4fe51fd..acaef66 100644
+--- a/drivers/media/video/gspca/kinect.c
++++ b/drivers/media/video/gspca/kinect.c
+@@ -390,6 +390,7 @@ static const struct sd_desc sd_desc = {
+ /* -- module initialisation -- */
+ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x045e, 0x02ae)},
++ {USB_DEVICE(0x045e, 0x02bf)},
+ {}
+ };
+
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 21a3d77..64647d4 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -446,8 +446,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ IFX_WRITE_LOW_16BIT(mask));
++
++ /* According to C_CAN documentation, the reserved bit
++ * in IFx_MASK2 register is fixed 1
++ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+- IFX_WRITE_HIGH_16BIT(mask));
++ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 01bc102..c86fa50 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -1135,14 +1135,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+ }
+
+-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+- MII_TG3_AUXCTL_ACTL_TX_6DB)
++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
++{
++ u32 val;
++ int err;
+
+-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+- MII_TG3_AUXCTL_ACTL_TX_6DB);
++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++
++ if (err)
++ return err;
++ if (enable)
++
++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++ else
++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++
++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
++
++ return err;
++}
+
+ static int tg3_bmcr_reset(struct tg3 *tp)
+ {
+@@ -2087,7 +2099,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+
+ otp = tp->phy_otp;
+
+- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
++ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
+ return;
+
+ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+@@ -2112,7 +2124,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+@@ -2148,9 +2160,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+
+ if (!tp->setlpicnt) {
+ if (current_link_up == 1 &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2166,11 +2178,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2314,7 +2326,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ tg3_writephy(tp, MII_CTRL1000,
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ if (err)
+ return err;
+
+@@ -2335,7 +2347,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+
+ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+@@ -2424,10 +2436,10 @@ static int tg3_phy_reset(struct tg3 *tp)
+
+ out:
+ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+ tg3_phydsp_write(tp, 0x000a, 0x0323);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+@@ -2436,14 +2448,14 @@ out:
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, 0x000a, 0x310b);
+ tg3_phydsp_write(tp, 0x201f, 0x9506);
+ tg3_phydsp_write(tp, 0x401f, 0x14e2);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+@@ -2452,7 +2464,7 @@ out:
+ } else
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ }
+
+@@ -3639,7 +3651,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ tw32(TG3_CPMU_EEE_MODE,
+ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ if (!err) {
+ u32 err2;
+
+@@ -3671,7 +3683,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ }
+
+- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
+ if (!err)
+ err = err2;
+ }
+@@ -6353,6 +6365,9 @@ static void tg3_poll_controller(struct net_device *dev)
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
++ if (tg3_irq_sync(tp))
++ return;
++
+ for (i = 0; i < tp->irq_cnt; i++)
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+ }
+@@ -15388,6 +15403,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
+ tp->pm_cap = pm_cap;
+ tp->rx_mode = TG3_DEF_RX_MODE;
+ tp->tx_mode = TG3_DEF_TX_MODE;
++ tp->irq_sync = 1;
+
+ if (tg3_debug > 0)
+ tp->msg_enable = tg3_debug;
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+index a8259cc..5674145 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+- for (j = 0; j < cmd_buf->frag_count; j++) {
++ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index da5204d..4a238a4 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1924,10 +1924,12 @@ unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
++ nf->dma = 0ULL;
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
++ nf->dma = 0ULL;
+
+ out_err:
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index b8db4cd..a6153f1 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5829,13 +5829,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
+-
+- /* Work around for AMD plateform. */
+- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+- desc->opts2 = 0;
+- cur_rx++;
+- }
+ }
+
+ count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 4ce9e5f..d0893e4 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -78,6 +78,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
+
+ skb_orphan(skb);
+
++ /* Before queueing this packet to netif_rx(),
++ * make sure dst is refcounted.
++ */
++ skb_dst_force(skb);
++
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* it's OK to use per_cpu_ptr() because BHs are off */
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
+index 8d3ab37..6618dd6 100644
+--- a/drivers/net/wireless/mwifiex/scan.c
++++ b/drivers/net/wireless/mwifiex/scan.c
+@@ -1594,7 +1594,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
+ scan_rsp->number_of_sets);
+ ret = -1;
+- goto done;
++ goto check_next_scan;
+ }
+
+ bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
+@@ -1663,7 +1663,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ if (!beacon_size || beacon_size > bytes_left) {
+ bss_info += bytes_left;
+ bytes_left = 0;
+- return -1;
++ ret = -1;
++ goto check_next_scan;
+ }
+
+ /* Initialize the current working beacon pointer for this BSS
+@@ -1716,7 +1717,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ dev_err(priv->adapter->dev, "%s: in processing"
+ " IE, bytes left < IE length\n",
+ __func__);
+- goto done;
++ goto check_next_scan;
+ }
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(u8 *) (current_ptr +
+@@ -1782,6 +1783,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ }
+ }
+
++check_next_scan:
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ if (list_empty(&adapter->scan_pending_q)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+@@ -1812,7 +1814,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ }
+
+-done:
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
+index 22ed6df..2be9880 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.c
++++ b/drivers/net/wireless/rt2x00/rt2500usb.c
+@@ -1921,7 +1921,7 @@ static struct usb_device_id rt2500usb_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1706) },
+ { USB_DEVICE(0x0b05, 0x1707) },
+ /* Belkin */
+- { USB_DEVICE(0x050d, 0x7050) },
++ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */
+ { USB_DEVICE(0x050d, 0x7051) },
+ /* Cisco Systems */
+ { USB_DEVICE(0x13b1, 0x000d) },
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index b66a61b..3d4ea1f 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -959,6 +959,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x07d1, 0x3c15) },
+ { USB_DEVICE(0x07d1, 0x3c16) },
+ { USB_DEVICE(0x2001, 0x3c1b) },
++ { USB_DEVICE(0x2001, 0x3c1e) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712) },
+ /* DVICO */
+@@ -1090,6 +1091,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x177f, 0x0153) },
+ { USB_DEVICE(0x177f, 0x0302) },
+ { USB_DEVICE(0x177f, 0x0313) },
++ { USB_DEVICE(0x177f, 0x0323) },
+ /* U-Media */
+ { USB_DEVICE(0x157e, 0x300e) },
+ { USB_DEVICE(0x157e, 0x3013) },
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
+index 2ad468d..9e724eb 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.c
++++ b/drivers/net/wireless/rt2x00/rt73usb.c
+@@ -2421,6 +2421,7 @@ static struct usb_device_id rt73usb_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1723) },
+ { USB_DEVICE(0x0b05, 0x1724) },
+ /* Belkin */
++ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */
+ { USB_DEVICE(0x050d, 0x705a) },
+ { USB_DEVICE(0x050d, 0x905b) },
+ { USB_DEVICE(0x050d, 0x905c) },
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index a49e848..30dd0a9 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -503,8 +503,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+ WARN_ON(skb_queue_empty(&rx_queue));
+ while (!skb_queue_empty(&rx_queue)) {
+ _skb = skb_dequeue(&rx_queue);
+- _rtl_usb_rx_process_agg(hw, skb);
+- ieee80211_rx_irqsafe(hw, skb);
++ _rtl_usb_rx_process_agg(hw, _skb);
++ ieee80211_rx_irqsafe(hw, _skb);
+ }
+ }
+
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 94b79c3..9d7f172 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+ /* Notify xenvif that ring now has space to send an skb to the frontend */
+ void xenvif_notify_tx_completion(struct xenvif *vif);
+
++/* Prevent the device from generating any further traffic. */
++void xenvif_carrier_off(struct xenvif *vif);
++
+ /* Returns number of ring slots required to send an skb to the frontend */
+ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 1825629..5925e0b 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -342,17 +342,22 @@ err:
+ return err;
+ }
+
+-void xenvif_disconnect(struct xenvif *vif)
++void xenvif_carrier_off(struct xenvif *vif)
+ {
+ struct net_device *dev = vif->dev;
+- if (netif_carrier_ok(dev)) {
+- rtnl_lock();
+- netif_carrier_off(dev); /* discard queued packets */
+- if (netif_running(dev))
+- xenvif_down(vif);
+- rtnl_unlock();
+- xenvif_put(vif);
+- }
++
++ rtnl_lock();
++ netif_carrier_off(dev); /* discard queued packets */
++ if (netif_running(dev))
++ xenvif_down(vif);
++ rtnl_unlock();
++ xenvif_put(vif);
++}
++
++void xenvif_disconnect(struct xenvif *vif)
++{
++ if (netif_carrier_ok(vif->dev))
++ xenvif_carrier_off(vif);
+
+ atomic_dec(&vif->refcnt);
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 15e332d..b802bb3 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
+ atomic_dec(&netbk->netfront_count);
+ }
+
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++ u8 status);
+ static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st);
+@@ -851,7 +852,7 @@ static void netbk_tx_err(struct xenvif *vif,
+
+ do {
+ make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- if (cons >= end)
++ if (cons == end)
+ break;
+ txp = RING_GET_REQUEST(&vif->tx, cons++);
+ } while (1);
+@@ -860,6 +861,13 @@ static void netbk_tx_err(struct xenvif *vif,
+ xenvif_put(vif);
+ }
+
++static void netbk_fatal_tx_err(struct xenvif *vif)
++{
++ netdev_err(vif->dev, "fatal error; disabling device\n");
++ xenvif_carrier_off(vif);
++ xenvif_put(vif);
++}
++
+ static int netbk_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp,
+@@ -873,19 +881,22 @@ static int netbk_count_requests(struct xenvif *vif,
+
+ do {
+ if (frags >= work_to_do) {
+- netdev_dbg(vif->dev, "Need more frags\n");
++ netdev_err(vif->dev, "Need more frags\n");
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- netdev_dbg(vif->dev, "Too many frags\n");
++ netdev_err(vif->dev, "Too many frags\n");
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+- netdev_dbg(vif->dev, "Frags galore\n");
++ netdev_err(vif->dev, "Frag is bigger than frame.\n");
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+
+@@ -893,8 +904,9 @@ static int netbk_count_requests(struct xenvif *vif,
+ frags++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
++ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+@@ -938,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ pending_idx = netbk->pending_ring[index];
+ page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ if (!page)
+- return NULL;
++ goto err;
+
+ netbk->mmap_pages[pending_idx] = page;
+
+@@ -962,6 +974,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ }
+
+ return gop;
++err:
++ /* Unwind, freeing all pages and sending error responses. */
++ while (i-- > start) {
++ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
++ XEN_NETIF_RSP_ERROR);
++ }
++ /* The head too, if necessary. */
++ if (start)
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
++
++ return NULL;
+ }
+
+ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+@@ -970,30 +993,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ {
+ struct gnttab_copy *gop = *gopp;
+ u16 pending_idx = *((u16 *)skb->data);
+- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+- struct xenvif *vif = pending_tx_info[pending_idx].vif;
+- struct xen_netif_tx_request *txp;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
+
+ /* Check status of header. */
+ err = gop->status;
+- if (unlikely(err)) {
+- pending_ring_idx_t index;
+- index = pending_index(netbk->pending_prod++);
+- txp = &pending_tx_info[pending_idx].req;
+- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- netbk->pending_ring[index] = pending_idx;
+- xenvif_put(vif);
+- }
++ if (unlikely(err))
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
+- pending_ring_idx_t index;
+
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+
+@@ -1002,16 +1015,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ continue;
+ }
+
+ /* Error on this fragment: respond to client with an error. */
+- txp = &netbk->pending_tx_info[pending_idx].req;
+- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- index = pending_index(netbk->pending_prod++);
+- netbk->pending_ring[index] = pending_idx;
+- xenvif_put(vif);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Not the first error? Preceding frags already invalidated. */
+ if (err)
+@@ -1019,10 +1028,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+
+ /* First error: invalidate header and preceding fragments. */
+ pending_idx = *((u16 *)skb->data);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ for (j = start; j < i; j++) {
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+
+ /* Remember the error: invalidate all subsequent fragments. */
+@@ -1056,7 +1065,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+
+ /* Take an extra reference to offset xen_netbk_idx_release */
+ get_page(netbk->mmap_pages[pending_idx]);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+ }
+
+@@ -1069,7 +1078,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+
+ do {
+ if (unlikely(work_to_do-- <= 0)) {
+- netdev_dbg(vif->dev, "Missing extra info\n");
++ netdev_err(vif->dev, "Missing extra info\n");
++ netbk_fatal_tx_err(vif);
+ return -EBADR;
+ }
+
+@@ -1078,8 +1088,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ vif->tx.req_cons = ++cons;
+- netdev_dbg(vif->dev,
++ netdev_err(vif->dev,
+ "Invalid extra type: %d\n", extra.type);
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+@@ -1095,13 +1106,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
+ struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
++ netdev_err(vif->dev, "GSO size must not be zero.\n");
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+@@ -1238,9 +1251,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+
+ /* Get a netif from the list with work to do. */
+ vif = poll_net_schedule_list(netbk);
++ /* This can sometimes happen because the test of
++ * list_empty(net_schedule_list) at the top of the
++ * loop is unlocked. Just go back and have another
++ * look.
++ */
+ if (!vif)
+ continue;
+
++ if (vif->tx.sring->req_prod - vif->tx.req_cons >
++ XEN_NETIF_TX_RING_SIZE) {
++ netdev_err(vif->dev,
++ "Impossible number of requests. "
++ "req_prod %d, req_cons %d, size %ld\n",
++ vif->tx.sring->req_prod, vif->tx.req_cons,
++ XEN_NETIF_TX_RING_SIZE);
++ netbk_fatal_tx_err(vif);
++ continue;
++ }
++
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ if (!work_to_do) {
+ xenvif_put(vif);
+@@ -1268,17 +1297,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ work_to_do = xen_netbk_get_extras(vif, extras,
+ work_to_do);
+ idx = vif->tx.req_cons;
+- if (unlikely(work_to_do < 0)) {
+- netbk_tx_err(vif, &txreq, idx);
++ if (unlikely(work_to_do < 0))
+ continue;
+- }
+ }
+
+ ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+- if (unlikely(ret < 0)) {
+- netbk_tx_err(vif, &txreq, idx - ret);
++ if (unlikely(ret < 0))
+ continue;
+- }
++
+ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+@@ -1290,11 +1316,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+- netdev_dbg(vif->dev,
++ netdev_err(vif->dev,
+ "txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+ (txreq.offset&~PAGE_MASK) + txreq.size);
+- netbk_tx_err(vif, &txreq, idx);
++ netbk_fatal_tx_err(vif);
+ continue;
+ }
+
+@@ -1322,8 +1348,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (netbk_set_skb_gso(vif, skb, gso)) {
++ /* Failure in netbk_set_skb_gso is fatal. */
+ kfree_skb(skb);
+- netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+ }
+@@ -1424,7 +1450,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+ txp->size -= data_len;
+ } else {
+ /* Schedule a response immediately. */
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+
+ if (txp->flags & XEN_NETTXF_csum_blank)
+@@ -1479,7 +1505,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
+
+ }
+
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++ u8 status)
+ {
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+@@ -1493,7 +1520,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+
+ vif = pending_tx_info->vif;
+
+- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
++ make_tx_response(vif, &pending_tx_info->req, status);
+
+ index = pending_index(netbk->pending_prod++);
+ netbk->pending_ring[index] = pending_idx;
+diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
+index da8beb8..627b66a 100644
+--- a/drivers/rtc/rtc-isl1208.c
++++ b/drivers/rtc/rtc-isl1208.c
+@@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data)
+ {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ struct i2c_client *client = data;
++ struct rtc_device *rtc = i2c_get_clientdata(client);
+ int handled = 0, sr, err;
+
+ /*
+@@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data)
+ if (sr & ISL1208_REG_SR_ALM) {
+ dev_dbg(&client->dev, "alarm!\n");
+
++ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
++
+ /* Clear the alarm */
+ sr &= ~ISL1208_REG_SR_ALM;
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index 1e80a48..73816d8 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -44,6 +44,7 @@
+ #define RTC_YMR 0x34 /* Year match register */
+ #define RTC_YLR 0x38 /* Year data load register */
+
++#define RTC_CR_EN (1 << 0) /* counter enable bit */
+ #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
+
+ #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
+@@ -312,7 +313,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ int ret;
+ struct pl031_local *ldata;
+ struct rtc_class_ops *ops = id->data;
+- unsigned long time;
++ unsigned long time, data;
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret)
+@@ -339,10 +340,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer);
+ dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
+
++ data = readl(ldata->base + RTC_CR);
+ /* Enable the clockwatch on ST Variants */
+ if (ldata->hw_designer == AMBA_VENDOR_ST)
+- writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
+- ldata->base + RTC_CR);
++ data |= RTC_CR_CWEN;
++ writel(data | RTC_CR_EN, ldata->base + RTC_CR);
+
+ /*
+ * On ST PL031 variants, the RTC reset value does not provide correct
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 34655d0..08e470f 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+ }
+
+ static const unsigned char
+-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+
+ /* carryover low/fullspeed bandwidth that crosses uframe boundries */
+ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 5cc401b..c7cfbce 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ "defaulting to EHCI.\n");
+ dev_warn(&xhci_pdev->dev,
+ "USB 3.0 devices will work at USB 2.0 speeds.\n");
++ usb_disable_xhci_ports(xhci_pdev);
+ return;
+ }
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2ed591d..5c1f9e7 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2504,6 +2504,8 @@ cleanup:
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE))
+ xhci_urb_free_priv(xhci, urb_priv);
++ else
++ kfree(urb_priv);
+
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+@@ -3032,7 +3034,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+ * running_total.
+ */
+ packets_transferred = (running_total + trb_buff_len) /
+- usb_endpoint_maxp(&urb->ep->desc);
++ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+
+ if ((total_packet_count - packets_transferred) > 31)
+ return 31 << 17;
+@@ -3594,7 +3596,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+ total_packet_count = DIV_ROUND_UP(td_len,
+- usb_endpoint_maxp(&urb->ep->desc));
++ GET_MAX_PACKET(
++ usb_endpoint_maxp(&urb->ep->desc)));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+ total_packet_count++;
+@@ -3617,9 +3620,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ td = urb_priv->td[i];
+ for (j = 0; j < trbs_per_td; j++) {
+ u32 remainder = 0;
+- field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
++ field = 0;
+
+ if (first_trb) {
++ field = TRB_TBC(burst_count) |
++ TRB_TLBPC(residue);
+ /* Queue the isoc TRB */
+ field |= TRB_TYPE(TRB_ISOC);
+ /* Assume URB_ISO_ASAP is set */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 2cc7c18..d644a66 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -590,6 +590,7 @@ static struct usb_device_id id_table_combined [] = {
+ /*
+ * ELV devices:
+ */
++ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+@@ -676,6 +677,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index dd6edf8..97e0a6b 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -147,6 +147,11 @@
+ #define XSENS_CONVERTER_6_PID 0xD38E
+ #define XSENS_CONVERTER_7_PID 0xD38F
+
++/**
++ * Zolix (www.zolix.com.cb) product ids
++ */
++#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
++
+ /*
+ * NDI (www.ndigital.com) product ids
+ */
+@@ -204,7 +209,7 @@
+
+ /*
+ * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+- * All of these devices use FTDI's vendor ID (0x0403).
++ * Almost all of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
+ *
+ * The previously included PID for the UO 100 module was incorrect.
+@@ -212,6 +217,8 @@
+ *
+ * Armin Laeuger originally sent the PID for the UM 100 module.
+ */
++#define FTDI_ELV_VID 0x1B1F /* ELV AG */
++#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
+ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
+ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
+ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 9db3e23..52cd814 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_DUAL 0x1005
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
++#define TELIT_PRODUCT_LE920 0x1200
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);
+ #define TPLINK_VENDOR_ID 0x2357
+ #define TPLINK_PRODUCT_MA180 0x0201
+
++/* Changhong products */
++#define CHANGHONG_VENDOR_ID 0x2077
++#define CHANGHONG_PRODUCT_CH690 0x7001
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info telit_le920_blacklist = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
++ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 6634477..14c4a82 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
+ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
+ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
+
+ /* Gobi 2000 devices */
+ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 105d900..16b0bf0 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
+ return 0;
+ }
+
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us)
++/* This places the HUAWEI usb dongles in multi-port mode */
++static int usb_stor_huawei_feature_init(struct us_data *us)
+ {
+ int result;
+
+@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
+ US_DEBUGP("Huawei mode set result is %d\n", result);
+ return 0;
+ }
++
++/*
++ * It will send a scsi switch command called rewind' to huawei dongle.
++ * When the dongle receives this command at the first time,
++ * it will reboot immediately. After rebooted, it will ignore this command.
++ * So it is unnecessary to read its response.
++ */
++static int usb_stor_huawei_scsi_init(struct us_data *us)
++{
++ int result = 0;
++ int act_len = 0;
++ struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
++ char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
++
++ bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
++ bcbw->Tag = 0;
++ bcbw->DataTransferLength = 0;
++ bcbw->Flags = bcbw->Lun = 0;
++ bcbw->Length = sizeof(rewind_cmd);
++ memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
++ memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
++
++ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
++ US_BULK_CB_WRAP_LEN, &act_len);
++ US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
++ return result;
++}
++
++/*
++ * It tries to find the supported Huawei USB dongles.
++ * In Huawei, they assign the following product IDs
++ * for all of their mobile broadband dongles,
++ * including the new dongles in the future.
++ * So if the product ID is not included in this list,
++ * it means it is not Huawei's mobile broadband dongles.
++ */
++static int usb_stor_huawei_dongles_pid(struct us_data *us)
++{
++ struct usb_interface_descriptor *idesc;
++ int idProduct;
++
++ idesc = &us->pusb_intf->cur_altsetting->desc;
++ idProduct = us->pusb_dev->descriptor.idProduct;
++ /* The first port is CDROM,
++ * means the dongle in the single port mode,
++ * and a switch command is required to be sent. */
++ if (idesc && idesc->bInterfaceNumber == 0) {
++ if ((idProduct == 0x1001)
++ || (idProduct == 0x1003)
++ || (idProduct == 0x1004)
++ || (idProduct >= 0x1401 && idProduct <= 0x1500)
++ || (idProduct >= 0x1505 && idProduct <= 0x1600)
++ || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int usb_stor_huawei_init(struct us_data *us)
++{
++ int result = 0;
++
++ if (usb_stor_huawei_dongles_pid(us)) {
++ if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++ result = usb_stor_huawei_scsi_init(us);
++ else
++ result = usb_stor_huawei_feature_init(us);
++ }
++ return result;
++}
+diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
+index 529327f..5376d4f 100644
+--- a/drivers/usb/storage/initializers.h
++++ b/drivers/usb/storage/initializers.h
+@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
+ * flash reader */
+ int usb_stor_ucr61s2b_init(struct us_data *us);
+
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us);
++/* This places the HUAWEI usb dongles in multi-port mode */
++int usb_stor_huawei_init(struct us_data *us);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index fa8a1b2..12640ef 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1515,335 +1515,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
+ /* Reported by fangxiaozhi <huananhu@huawei.com>
+ * This brings the HUAWEI data card devices into multi-port mode
+ */
+-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
++UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
+ 0),
+
+ /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index db51ba1..d582af4 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
+ .useTransport = use_transport, \
+ }
+
++#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
++ vendor_name, product_name, use_protocol, use_transport, \
++ init_function, Flags) \
++{ \
++ .vendorName = vendor_name, \
++ .productName = product_name, \
++ .useProtocol = use_protocol, \
++ .useTransport = use_transport, \
++ .initFunction = init_function, \
++}
++
+ static struct us_unusual_dev us_unusual_dev_list[] = {
+ # include "unusual_devs.h"
+ { } /* Terminating entry */
+@@ -128,6 +139,7 @@ static struct us_unusual_dev us_unusual_dev_list[] = {
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+
+
+ #ifdef CONFIG_PM /* Minimal support for suspend and resume */
+diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
+index b969279..a9b5f2e 100644
+--- a/drivers/usb/storage/usual-tables.c
++++ b/drivers/usb/storage/usual-tables.c
+@@ -46,6 +46,20 @@
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
+ .driver_info = ((useType)<<24) }
+
++/* Define the device is matched with Vendor ID and interface descriptors */
++#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
++ vendorName, productName, useProtocol, useTransport, \
++ initFunction, flags) \
++{ \
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++ | USB_DEVICE_ID_MATCH_VENDOR, \
++ .idVendor = (id_vendor), \
++ .bInterfaceClass = (cl), \
++ .bInterfaceSubClass = (sc), \
++ .bInterfaceProtocol = (pr), \
++ .driver_info = (flags) \
++}
++
+ struct usb_device_id usb_storage_usb_ids[] = {
+ # include "unusual_devs.h"
+ { } /* Terminating entry */
+@@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+
+
+ /*
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index c598cfb..2b5e695 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
+ if (ret < 0)
+ printk(KERN_ERR "NILFS: GC failed during preparation: "
+ "cannot read source blocks: err=%d\n", ret);
+- else
++ else {
++ if (nilfs_sb_need_update(nilfs))
++ set_nilfs_discontinued(nilfs);
+ ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
++ }
+
+ nilfs_remove_all_gcinodes(nilfs);
+ clear_nilfs_gc_running(nilfs);
+diff --git a/fs/splice.c b/fs/splice.c
+index 014fcb4..58ab918 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -697,8 +697,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
+ return -EINVAL;
+
+ more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+- if (sd->len < sd->total_len)
++
++ if (sd->len < sd->total_len && pipe->nrbufs > 1)
+ more |= MSG_SENDPAGE_NOTLAST;
++
+ return file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1e86bb4..8204898 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2597,7 +2597,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ extern void recalc_sigpending_and_wake(struct task_struct *t);
+ extern void recalc_sigpending(void);
+
+-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
++
++static inline void signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
++}
++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
++}
+
+ /*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 78ab24a..67fedad 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
+ * TASK_KILLABLE sleeps.
+ */
+ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
+- signal_wake_up(child, task_is_traced(child));
++ ptrace_signal_wake_up(child, true);
+
+ spin_unlock(&child->sighand->siglock);
+ }
+
++/* Ensure that nothing can wake it up, even SIGKILL */
++static bool ptrace_freeze_traced(struct task_struct *task)
++{
++ bool ret = false;
++
++ /* Lockless, nobody but us can set this flag */
++ if (task->jobctl & JOBCTL_LISTENING)
++ return ret;
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++ task->state = __TASK_TRACED;
++ ret = true;
++ }
++ spin_unlock_irq(&task->sighand->siglock);
++
++ return ret;
++}
++
++static void ptrace_unfreeze_traced(struct task_struct *task)
++{
++ if (task->state != __TASK_TRACED)
++ return;
++
++ WARN_ON(!task->ptrace || task->parent != current);
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (__fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++ else
++ task->state = TASK_TRACED;
++ spin_unlock_irq(&task->sighand->siglock);
++}
++
+ /**
+ * ptrace_check_attach - check whether ptracee is ready for ptrace operation
+ * @child: ptracee to check for
+@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ * be changed by us so it's not changing right after this.
+ */
+ read_lock(&tasklist_lock);
+- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
++ if (child->ptrace && child->parent == current) {
++ WARN_ON(child->state == __TASK_TRACED);
+ /*
+ * child->sighand can't be NULL, release_task()
+ * does ptrace_unlink() before __exit_signal().
+ */
+- spin_lock_irq(&child->sighand->siglock);
+- WARN_ON_ONCE(task_is_stopped(child));
+- if (ignore_state || (task_is_traced(child) &&
+- !(child->jobctl & JOBCTL_LISTENING)))
++ if (ignore_state || ptrace_freeze_traced(child))
+ ret = 0;
+- spin_unlock_irq(&child->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
+
+- if (!ret && !ignore_state)
+- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
++ if (!ret && !ignore_state) {
++ if (!wait_task_inactive(child, __TASK_TRACED)) {
++ /*
++ * This can only happen if may_ptrace_stop() fails and
++ * ptrace_stop() changes ->state back to TASK_RUNNING,
++ * so we should not worry about leaking __TASK_TRACED.
++ */
++ WARN_ON(child->state == __TASK_TRACED);
++ ret = -ESRCH;
++ }
++ }
+
+- /* All systems go.. */
+ return ret;
+ }
+
+@@ -307,7 +346,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ */
+ if (task_is_stopped(task) &&
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+- signal_wake_up(task, 1);
++ signal_wake_up_state(task, __TASK_STOPPED);
+
+ spin_unlock(&task->sighand->siglock);
+
+@@ -736,7 +775,7 @@ int ptrace_request(struct task_struct *child, long request,
+ * tracee into STOP.
+ */
+ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
+- signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
++ ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+@@ -762,7 +801,7 @@ int ptrace_request(struct task_struct *child, long request,
+ * start of this trap and now. Trigger re-trap.
+ */
+ if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+- signal_wake_up(child, true);
++ ptrace_signal_wake_up(child, true);
+ ret = 0;
+ }
+ unlock_task_sighand(child, &flags);
+@@ -899,6 +938,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ goto out_put_task_struct;
+
+ ret = arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
+
+ out_put_task_struct:
+ put_task_struct(child);
+@@ -1038,8 +1079,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
+- if (!ret)
++ if (!ret) {
+ ret = compat_arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
++ }
+
+ out_put_task_struct:
+ put_task_struct(child);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 7640b3a..08aa28e 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -757,6 +757,7 @@ static void __init __reserve_region_with_split(struct resource *root,
+ struct resource *parent = root;
+ struct resource *conflict;
+ struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
++ struct resource *next_res = NULL;
+
+ if (!res)
+ return;
+@@ -766,21 +767,46 @@ static void __init __reserve_region_with_split(struct resource *root,
+ res->end = end;
+ res->flags = IORESOURCE_BUSY;
+
+- conflict = __request_resource(parent, res);
+- if (!conflict)
+- return;
++ while (1) {
+
+- /* failed, split and try again */
+- kfree(res);
++ conflict = __request_resource(parent, res);
++ if (!conflict) {
++ if (!next_res)
++ break;
++ res = next_res;
++ next_res = NULL;
++ continue;
++ }
+
+- /* conflict covered whole area */
+- if (conflict->start <= start && conflict->end >= end)
+- return;
++ /* conflict covered whole area */
++ if (conflict->start <= res->start &&
++ conflict->end >= res->end) {
++ kfree(res);
++ WARN_ON(next_res);
++ break;
++ }
++
++ /* failed, split and try again */
++ if (conflict->start > res->start) {
++ end = res->end;
++ res->end = conflict->start - 1;
++ if (conflict->end < end) {
++ next_res = kzalloc(sizeof(*next_res),
++ GFP_ATOMIC);
++ if (!next_res) {
++ kfree(res);
++ break;
++ }
++ next_res->name = name;
++ next_res->start = conflict->end + 1;
++ next_res->end = end;
++ next_res->flags = IORESOURCE_BUSY;
++ }
++ } else {
++ res->start = conflict->end + 1;
++ }
++ }
+
+- if (conflict->start > start)
+- __reserve_region_with_split(root, start, conflict->start-1, name);
+- if (conflict->end < end)
+- __reserve_region_with_split(root, conflict->end+1, end, name);
+ }
+
+ void __init reserve_region_with_split(struct resource *root,
+diff --git a/kernel/sched.c b/kernel/sched.c
+index fcc893f..eeeec4e 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2924,7 +2924,8 @@ out:
+ */
+ int wake_up_process(struct task_struct *p)
+ {
+- return try_to_wake_up(p, TASK_ALL, 0);
++ WARN_ON(task_is_stopped_or_traced(p));
++ return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 78fcacf..6ad4fb3 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -384,7 +384,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+ static int do_balance_runtime(struct rt_rq *rt_rq)
+ {
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+- struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
++ struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
+ int i, weight, more = 0;
+ u64 rt_period;
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 08e0b97..d2f55ea 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -676,23 +676,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
+ */
+-void signal_wake_up(struct task_struct *t, int resume)
++void signal_wake_up_state(struct task_struct *t, unsigned int state)
+ {
+- unsigned int mask;
+-
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+-
+ /*
+- * For SIGKILL, we want to wake it up in the stopped/traced/killable
++ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+ * executing another processor and just now entering stopped state.
+ * By using wake_up_state, we ensure the process will wake up and
+ * handle its death signal.
+ */
+- mask = TASK_INTERRUPTIBLE;
+- if (resume)
+- mask |= TASK_WAKEKILL;
+- if (!wake_up_state(t, mask))
++ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
+ kick_process(t);
+ }
+
+@@ -841,7 +835,7 @@ static void ptrace_trap_notify(struct task_struct *t)
+ assert_spin_locked(&t->sighand->siglock);
+
+ task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+- signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
++ ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+ }
+
+ /*
+@@ -1765,6 +1759,10 @@ static inline int may_ptrace_stop(void)
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_state != NULL. Otherwise it
+ * is safe to enter schedule().
++ *
++ * This is almost outdated, a task with the pending SIGKILL can't
++ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
++ * after SIGKILL was already dequeued.
+ */
+ if (unlikely(current->mm->core_state) &&
+ unlikely(current->mm == current->parent->mm))
+@@ -1890,6 +1888,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+ if (gstop_done)
+ do_notify_parent_cldstop(current, false, why);
+
++ /* tasklist protects us from ptrace_freeze_traced() */
+ __set_current_state(TASK_RUNNING);
+ if (clear_code)
+ current->exit_code = 0;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 6033f02..7a157b3 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1972,7 +1972,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
+- if (ev->ncmd) {
++ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (!skb_queue_empty(&hdev->cmd_q))
+ tasklet_schedule(&hdev->cmd_task);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 1849ee0..9ab60e6 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -642,6 +642,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+
+ skb_pull(skb, sizeof(code));
+
++ /*
++ * The SMP context must be initialized for all other PDUs except
++ * pairing and security requests. If we get any other PDU when
++ * not initialized simply disconnect (done if this function
++ * returns an error).
++ */
++ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
++ !conn->smp_chan) {
++ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
++ kfree_skb(skb);
++ return -ENOTSUPP;
++ }
++
+ switch (code) {
+ case SMP_CMD_PAIRING_REQ:
+ reason = smp_cmd_pairing_req(conn, skb);
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 577ea5d..7c1745d 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -245,6 +245,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
+ struct net_device *dev = skb->dev;
+ u32 len;
+
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ goto inhdr_error;
++
+ iph = ip_hdr(skb);
+ opt = &(IPCB(skb)->opt);
+
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 7bc9991..2ef7da0 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1803,10 +1803,13 @@ static ssize_t pktgen_thread_write(struct file *file,
+ return -EFAULT;
+ i += len;
+ mutex_lock(&pktgen_thread_lock);
+- pktgen_add_device(t, f);
++ ret = pktgen_add_device(t, f);
+ mutex_unlock(&pktgen_thread_lock);
+- ret = count;
+- sprintf(pg_result, "OK: add_device=%s", f);
++ if (!ret) {
++ ret = count;
++ sprintf(pg_result, "OK: add_device=%s", f);
++ } else
++ sprintf(pg_result, "ERROR: can not add device %s", f);
+ goto out;
+ }
+
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 0106d25..3b36002 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -600,7 +600,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
+ case IP_TTL:
+ if (optlen < 1)
+ goto e_inval;
+- if (val != -1 && (val < 0 || val > 255))
++ if (val != -1 && (val < 1 || val > 255))
+ goto e_inval;
+ inet->uc_ttl = val;
+ break;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index aab8f08..e865ed1 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3655,6 +3655,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
+ }
+ } else {
+ if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
++ if (!tcp_packets_in_flight(tp)) {
++ tcp_enter_frto_loss(sk, 2, flag);
++ return true;
++ }
++
+ /* Prevent sending of new data. */
+ tp->snd_cwnd = min(tp->snd_cwnd,
+ tcp_packets_in_flight(tp));
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index aef80d7..b27baed 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1739,7 +1739,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+ continue;
+ if ((rt->rt6i_flags & flags) != flags)
+ continue;
+- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
++ if ((rt->rt6i_flags & noflags) != 0)
+ continue;
+ dst_hold(&rt->dst);
+ break;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ae98e09..3ccd9b2 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1284,10 +1284,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ cork->length = 0;
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
++ exthdrlen = (opt ? opt->opt_flen : 0);
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
+- dst_exthdrlen = rt->dst.header_len;
++ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
+ } else {
+ rt = (struct rt6_info *)cork->dst;
+ fl6 = &inet->cork.fl.u.ip6;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 19724bd..791c1fa 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -819,7 +819,8 @@ restart:
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+
+- if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
++ if (!dst_get_neighbour_raw(&rt->dst)
++ && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
+ nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ else if (!(rt->dst.flags & DST_HOST))
+ nrt = rt6_alloc_clone(rt, &fl6->daddr);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 85afc13..835fcea 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2422,13 +2422,15 @@ static int packet_release(struct socket *sock)
+
+ packet_flush_mclist(sk);
+
+- memset(&req_u, 0, sizeof(req_u));
+-
+- if (po->rx_ring.pg_vec)
++ if (po->rx_ring.pg_vec) {
++ memset(&req_u, 0, sizeof(req_u));
+ packet_set_ring(sk, &req_u, 1, 0);
++ }
+
+- if (po->tx_ring.pg_vec)
++ if (po->tx_ring.pg_vec) {
++ memset(&req_u, 0, sizeof(req_u));
+ packet_set_ring(sk, &req_u, 1, 1);
++ }
+
+ fanout_release(sk);
+
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index c8cc24e..dbe5870a 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
+ /* Final destructor for endpoint. */
+ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ {
++ int i;
++
+ SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
+
+ /* Free up the HMAC transform. */
+@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ sctp_inq_free(&ep->base.inqueue);
+ sctp_bind_addr_free(&ep->base.bind_addr);
+
++ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
++ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
++
+ /* Remove and free the port */
+ if (sctp_sk(ep->base.sk)->bind_hash)
+ sctp_put_port(ep->base.sk);
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index cfeb1d4..96eb168 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+
+ /* Free the outqueue structure and any related pending chunks.
+ */
+-void sctp_outq_teardown(struct sctp_outq *q)
++static void __sctp_outq_teardown(struct sctp_outq *q)
+ {
+ struct sctp_transport *transport;
+ struct list_head *lchunk, *temp;
+@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ sctp_chunk_free(chunk);
+ }
+
+- q->error = 0;
+-
+ /* Throw away any leftover control chunks. */
+ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+ list_del_init(&chunk->list);
+@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ }
+ }
+
++void sctp_outq_teardown(struct sctp_outq *q)
++{
++ __sctp_outq_teardown(q);
++ sctp_outq_init(q->asoc, q);
++}
++
+ /* Free the outqueue structure and any related pending chunks. */
+ void sctp_outq_free(struct sctp_outq *q)
+ {
+ /* Throw away leftover chunks. */
+- sctp_outq_teardown(q);
++ __sctp_outq_teardown(q);
+
+ /* If we were kmalloc()'d, free the memory. */
+ if (q->malloced)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index fa8333b..5e0d86e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+
+ ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
+ out:
+- kfree(authkey);
++ kzfree(authkey);
+ return ret;
+ }
+
diff --git a/3.2.54/1039_linux-3.2.40.patch b/3.2.54/1039_linux-3.2.40.patch
new file mode 100644
index 0000000..f26b39c
--- /dev/null
+++ b/3.2.54/1039_linux-3.2.40.patch
@@ -0,0 +1,6295 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 81c287f..ddbf18e 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -552,6 +552,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ UART at the specified I/O port or MMIO address,
+ switching to the matching ttyS device later. The
+ options are the same as for ttyS, above.
++ hvc<n> Use the hypervisor console device <n>. This is for
++ both Xen and PowerPC hypervisors.
+
+ If the device connected to the port is not a TTY but a braille
+ device, prepend "brl," before the device type, for instance
+@@ -703,6 +705,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ earlyprintk= [X86,SH,BLACKFIN]
+ earlyprintk=vga
++ earlyprintk=xen
+ earlyprintk=serial[,ttySn[,baudrate]]
+ earlyprintk=ttySn[,baudrate]
+ earlyprintk=dbgp[debugController#]
+@@ -720,6 +723,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ The VGA output is eventually overwritten by the real
+ console.
+
++ The xen output can only be used by Xen PV guests.
++
+ ekgdboc= [X86,KGDB] Allow early kernel console debugging
+ ekgdboc=kbd
+
+diff --git a/Makefile b/Makefile
+index 0fceb8b..47af1e9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 39
++SUBLEVEL = 40
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h
+index b7de471..b802f28 100644
+--- a/arch/arm/mach-pxa/include/mach/smemc.h
++++ b/arch/arm/mach-pxa/include/mach/smemc.h
+@@ -37,6 +37,7 @@
+ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */
+ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */
+ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */
++#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */
+
+ /*
+ * More handy macros for PCMCIA
+diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
+index 7992305..f38aa89 100644
+--- a/arch/arm/mach-pxa/smemc.c
++++ b/arch/arm/mach-pxa/smemc.c
+@@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void)
+ __raw_writel(csadrcfg[1], CSADRCFG1);
+ __raw_writel(csadrcfg[2], CSADRCFG2);
+ __raw_writel(csadrcfg[3], CSADRCFG3);
++ /* CSMSADRCFG wakes up in its default state (0), so we need to set it */
++ __raw_writel(0x2, CSMSADRCFG);
+ }
+
+ static struct syscore_ops smemc_syscore_ops = {
+@@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = {
+
+ static int __init smemc_init(void)
+ {
+- if (cpu_is_pxa3xx())
++ if (cpu_is_pxa3xx()) {
++ /*
++ * The only documentation we have on the
++ * Chip Select Configuration Register (CSMSADRCFG) is that
++ * it must be programmed to 0x2.
++ * Moreover, in the bit definitions, the second bit
++ * (CSMSADRCFG[1]) is called "SETALWAYS".
++ * Other bits are reserved in this register.
++ */
++ __raw_writel(0x2, CSMSADRCFG);
++
+ register_syscore_ops(&smemc_syscore_ops);
++ }
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-s3c2410/include/mach/debug-macro.S b/arch/arm/mach-s3c2410/include/mach/debug-macro.S
+index 4135de8..13ed33c 100644
+--- a/arch/arm/mach-s3c2410/include/mach/debug-macro.S
++++ b/arch/arm/mach-s3c2410/include/mach/debug-macro.S
+@@ -40,17 +40,17 @@
+ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ bic \rd, \rd, #0xff000
+- ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++ ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ and \rd, \rd, #0x00ff0000
+ teq \rd, #0x00440000 @ is it 2440?
+ 1004:
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ moveq \rd, \rd, lsr #SHIFT_2440TXF
+ tst \rd, #S3C2410_UFSTAT_TXFULL
+ .endm
+
+ .macro fifo_full_s3c2410 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S3C2410_UFSTAT_TXFULL
+ .endm
+
+@@ -68,18 +68,18 @@
+ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ bic \rd, \rd, #0xff000
+- ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++ ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ and \rd, \rd, #0x00ff0000
+ teq \rd, #0x00440000 @ is it 2440?
+
+ 10000:
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ andne \rd, \rd, #S3C2410_UFSTAT_TXMASK
+ andeq \rd, \rd, #S3C2440_UFSTAT_TXMASK
+ .endm
+
+ .macro fifo_level_s3c2410 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S3C2410_UFSTAT_TXMASK
+ .endm
+
+diff --git a/arch/arm/mach-s3c2410/include/mach/entry-macro.S b/arch/arm/mach-s3c2410/include/mach/entry-macro.S
+index 473b3cd..ef2287b 100644
+--- a/arch/arm/mach-s3c2410/include/mach/entry-macro.S
++++ b/arch/arm/mach-s3c2410/include/mach/entry-macro.S
+@@ -34,10 +34,10 @@
+
+ @@ try the interrupt offset register, since it is there
+
+- ldr \irqstat, [ \base, #INTPND ]
++ ldr \irqstat, [\base, #INTPND ]
+ teq \irqstat, #0
+ beq 1002f
+- ldr \irqnr, [ \base, #INTOFFSET ]
++ ldr \irqnr, [\base, #INTOFFSET ]
+ mov \tmp, #1
+ tst \irqstat, \tmp, lsl \irqnr
+ bne 1001f
+diff --git a/arch/arm/mach-s3c2410/pm-h1940.S b/arch/arm/mach-s3c2410/pm-h1940.S
+index c93bf2d..6183a68 100644
+--- a/arch/arm/mach-s3c2410/pm-h1940.S
++++ b/arch/arm/mach-s3c2410/pm-h1940.S
+@@ -30,4 +30,4 @@
+
+ h1940_pm_return:
+ mov r0, #S3C2410_PA_GPIO
+- ldr pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ]
++ ldr pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO]
+diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
+index dd5b638..65200ae 100644
+--- a/arch/arm/mach-s3c2410/sleep.S
++++ b/arch/arm/mach-s3c2410/sleep.S
+@@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend)
+ ldr r4, =S3C2410_REFRESH
+ ldr r5, =S3C24XX_MISCCR
+ ldr r6, =S3C2410_CLKCON
+- ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB)
+- ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB)
+- ldr r9, [ r6 ] @ get CLKCON (and ensure in TLB)
++ ldr r7, [r4] @ get REFRESH (and ensure in TLB)
++ ldr r8, [r5] @ get MISCCR (and ensure in TLB)
++ ldr r9, [r6] @ get CLKCON (and ensure in TLB)
+
+ orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command
+ orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
+@@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend)
+ @@ align next bit of code to cache line
+ .align 5
+ s3c2410_do_sleep:
+- streq r7, [ r4 ] @ SDRAM sleep command
+- streq r8, [ r5 ] @ SDRAM power-down config
+- streq r9, [ r6 ] @ CPU sleep
++ streq r7, [r4] @ SDRAM sleep command
++ streq r8, [r5] @ SDRAM power-down config
++ streq r9, [r6] @ CPU sleep
+ 1: beq 1b
+ mov pc, r14
+diff --git a/arch/arm/mach-s3c2412/sleep.S b/arch/arm/mach-s3c2412/sleep.S
+index c82418e..5adaceb 100644
+--- a/arch/arm/mach-s3c2412/sleep.S
++++ b/arch/arm/mach-s3c2412/sleep.S
+@@ -57,12 +57,12 @@ s3c2412_sleep_enter1:
+ * retry, as simply returning causes the system to lock.
+ */
+
+- ldrne r9, [ r1 ]
+- strne r9, [ r1 ]
+- ldrne r9, [ r2 ]
+- strne r9, [ r2 ]
+- ldrne r9, [ r3 ]
+- strne r9, [ r3 ]
++ ldrne r9, [r1]
++ strne r9, [r1]
++ ldrne r9, [r2]
++ strne r9, [r2]
++ ldrne r9, [r3]
++ strne r9, [r3]
+ bne s3c2412_sleep_enter1
+
+ mov pc, r14
+diff --git a/arch/arm/mach-w90x900/include/mach/entry-macro.S b/arch/arm/mach-w90x900/include/mach/entry-macro.S
+index d39aca5..08436cf 100644
+--- a/arch/arm/mach-w90x900/include/mach/entry-macro.S
++++ b/arch/arm/mach-w90x900/include/mach/entry-macro.S
+@@ -22,8 +22,8 @@
+
+ mov \base, #AIC_BA
+
+- ldr \irqnr, [ \base, #AIC_IPER]
+- ldr \irqnr, [ \base, #AIC_ISNR]
++ ldr \irqnr, [\base, #AIC_IPER]
++ ldr \irqnr, [\base, #AIC_ISNR]
+ cmp \irqnr, #0
+
+ .endm
+diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
+index 207e275..f3a9cff 100644
+--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
++++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
+@@ -14,12 +14,12 @@
+ /* The S5PV210/S5PC110 implementations are as belows. */
+
+ .macro fifo_level_s5pv210 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S5PV210_UFSTAT_TXMASK
+ .endm
+
+ .macro fifo_full_s5pv210 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S5PV210_UFSTAT_TXFULL
+ .endm
+
+@@ -27,7 +27,7 @@
+ * most widely re-used */
+
+ .macro fifo_level_s3c2440 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S3C2440_UFSTAT_TXMASK
+ .endm
+
+@@ -36,7 +36,7 @@
+ #endif
+
+ .macro fifo_full_s3c2440 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S3C2440_UFSTAT_TXFULL
+ .endm
+
+@@ -45,11 +45,11 @@
+ #endif
+
+ .macro senduart,rd,rx
+- strb \rd, [\rx, # S3C2410_UTXH ]
++ strb \rd, [\rx, # S3C2410_UTXH]
+ .endm
+
+ .macro busyuart, rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFCON ]
++ ldr \rd, [\rx, # S3C2410_UFCON]
+ tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
+ beq 1001f @
+ @ FIFO enabled...
+@@ -60,7 +60,7 @@
+
+ 1001:
+ @ busy waiting for non fifo
+- ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
++ ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ tst \rd, #S3C2410_UTRSTAT_TXFE
+ beq 1001b
+
+@@ -68,7 +68,7 @@
+ .endm
+
+ .macro waituart,rd,rx
+- ldr \rd, [ \rx, # S3C2410_UFCON ]
++ ldr \rd, [\rx, # S3C2410_UFCON]
+ tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
+ beq 1001f @
+ @ FIFO enabled...
+@@ -79,7 +79,7 @@
+ b 1002f
+ 1001:
+ @ idle waiting for non fifo
+- ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
++ ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ tst \rd, #S3C2410_UTRSTAT_TXFE
+ beq 1001b
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 22dadeb..9d35a3e 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -12,11 +12,10 @@
+
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
++#include <linux/mm_types.h>
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+
+-struct vm_area_struct;
+-
+ /*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+@@ -40,7 +39,14 @@ struct vm_area_struct;
+ do{ \
+ *(pteptr) = (pteval); \
+ } while(0)
+-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
++
++extern void purge_tlb_entries(struct mm_struct *, unsigned long);
++
++#define set_pte_at(mm, addr, ptep, pteval) \
++ do { \
++ set_pte(ptep, pteval); \
++ purge_tlb_entries(mm, addr); \
++ } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+
+@@ -464,6 +470,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ old = pte_val(*ptep);
+ new = pte_val(pte_wrprotect(__pte (old)));
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old);
++ purge_tlb_entries(mm, addr);
+ #else
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 83335f3..5241698 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -421,6 +421,24 @@ void kunmap_parisc(void *addr)
+ EXPORT_SYMBOL(kunmap_parisc);
+ #endif
+
++void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++ unsigned long flags;
++
++ /* Note: purge_tlb_entries can be called at startup with
++ no context. */
++
++ /* Disable preemption while we play with %sr1. */
++ preempt_disable();
++ mtsp(mm->context, 1);
++ purge_tlb_start(flags);
++ pdtlb(addr);
++ pitlb(addr);
++ purge_tlb_end(flags);
++ preempt_enable();
++}
++EXPORT_SYMBOL(purge_tlb_entries);
++
+ void __flush_tlb_range(unsigned long sid, unsigned long start,
+ unsigned long end)
+ {
+diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
+index 66ea9b8..21165a4 100644
+--- a/arch/powerpc/include/asm/eeh.h
++++ b/arch/powerpc/include/asm/eeh.h
+@@ -61,6 +61,7 @@ void __init pci_addr_cache_build(void);
+ */
+ void eeh_add_device_tree_early(struct device_node *);
+ void eeh_add_device_tree_late(struct pci_bus *);
++void eeh_add_sysfs_files(struct pci_bus *);
+
+ /**
+ * eeh_remove_device_recursive - undo EEH for device & children.
+@@ -105,6 +106,8 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+
+ static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
++static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
++
+ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+ #define EEH_POSSIBLE_ERROR(val, type) (0)
+ #define EEH_IO_ERROR_VALUE(size) (-1UL)
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
+index 26ccbf7..4c0908d 100644
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0;
+ static void kexec_smp_down(void *arg)
+ {
+ local_irq_disable();
++ hard_irq_disable();
++
+ mb(); /* make sure our irqs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ while(kexec_all_irq_disabled == 0)
+@@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void)
+ wake_offline_cpus();
+ smp_call_function(kexec_smp_down, NULL, /* wait */0);
+ local_irq_disable();
++ hard_irq_disable();
++
+ mb(); /* make sure IRQs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+
+@@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void)
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(0, 0);
+ local_irq_disable();
++ hard_irq_disable();
+ }
+
+ #endif /* SMP */
+diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
+index e1612df..b10beef 100644
+--- a/arch/powerpc/kernel/of_platform.c
++++ b/arch/powerpc/kernel/of_platform.c
+@@ -91,6 +91,9 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
++ /* sysfs files should only be added after devices are added */
++ eeh_add_sysfs_files(phb->bus);
++
+ return 0;
+ }
+
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index 458ed3b..a3cd949 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -1536,11 +1536,14 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
+ pcibios_allocate_bus_resources(bus);
+ pcibios_claim_one_bus(bus);
+
++ /* Fixup EEH */
++ eeh_add_device_tree_late(bus);
++
+ /* Add new devices to global lists. Register in proc, sysfs. */
+ pci_bus_add_devices(bus);
+
+- /* Fixup EEH */
+- eeh_add_device_tree_late(bus);
++ /* sysfs files should only be added after devices are added */
++ eeh_add_sysfs_files(bus);
+ }
+ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
+
+diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
+index 5658690..389e06b 100644
+--- a/arch/powerpc/platforms/pseries/eeh.c
++++ b/arch/powerpc/platforms/pseries/eeh.c
+@@ -1238,7 +1238,6 @@ static void eeh_add_device_late(struct pci_dev *dev)
+ pdn->pcidev = dev;
+
+ pci_addr_cache_insert_device(dev);
+- eeh_sysfs_add_device(dev);
+ }
+
+ void eeh_add_device_tree_late(struct pci_bus *bus)
+@@ -1257,6 +1256,29 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
+ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
+
+ /**
++ * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
++ * @bus: PCI bus
++ *
++ * This routine must be used to add EEH sysfs files for PCI
++ * devices which are attached to the indicated PCI bus. The PCI bus
++ * is added after system boot through hotplug or dlpar.
++ */
++void eeh_add_sysfs_files(struct pci_bus *bus)
++{
++ struct pci_dev *dev;
++
++ list_for_each_entry(dev, &bus->devices, bus_list) {
++ eeh_sysfs_add_device(dev);
++ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
++ struct pci_bus *subbus = dev->subordinate;
++ if (subbus)
++ eeh_add_sysfs_files(subbus);
++ }
++ }
++}
++EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
++
++/**
+ * eeh_remove_device - undo EEH setup for the indicated pci device
+ * @dev: pci device to be removed
+ *
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 8644366..b2f44de 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -121,6 +121,9 @@ static int s390_next_ktime(ktime_t expires,
+ nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
+ do_div(nsecs, 125);
+ S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
++ /* Program the maximum value if we have an overflow (== year 2042) */
++ if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
++ S390_lowcore.clock_comparator = -1ULL;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ return 0;
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index dffcaa4..4db9b1e 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -597,6 +597,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ } else
+ prefix = 0;
+
++ /*
++ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
++ * copying in vcpu load/put. Lets update our copies before we save
++ * it into the save area
++ */
++ save_fp_regs(&vcpu->arch.guest_fpregs);
++ save_access_regs(vcpu->arch.guest_acrs);
++
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
+ vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ return -EFAULT;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index efb4294..9a42703 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1150,7 +1150,7 @@ config DIRECT_GBPAGES
+ config NUMA
+ bool "Numa Memory Allocation and Scheduler Support"
+ depends on SMP
+- depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
++ depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
+ default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
+ ---help---
+ Enable NUMA (Non Uniform Memory Access) support.
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 884507e..6be9909 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index f5373df..db4f704 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -20,12 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
+ }
+ early_param("x2apic_phys", set_x2apic_phys_mode);
+
++static bool x2apic_fadt_phys(void)
++{
++ if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
++ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
++ printk(KERN_DEBUG "System requires x2apic physical mode\n");
++ return true;
++ }
++ return false;
++}
++
+ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+ {
+- if (x2apic_phys)
+- return x2apic_enabled();
+- else
+- return 0;
++ return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
+ }
+
+ static void
+@@ -108,7 +115,7 @@ static void init_x2apic_ldr(void)
+
+ static int x2apic_phys_probe(void)
+ {
+- if (x2apic_mode && x2apic_phys)
++ if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
+ return 1;
+
+ return apic == &apic_x2apic_phys;
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 0a630dd..646d192 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void)
+ printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
+
+- clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
++ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
++ clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+ }
+
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
+index af0699b..f6c4674 100644
+--- a/arch/x86/kernel/head.c
++++ b/arch/x86/kernel/head.c
+@@ -5,8 +5,6 @@
+ #include <asm/setup.h>
+ #include <asm/bios_ebda.h>
+
+-#define BIOS_LOWMEM_KILOBYTES 0x413
+-
+ /*
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+@@ -16,17 +14,30 @@
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.
++ *
++ * This functions is deliberately very conservative. Losing
++ * memory in the bottom megabyte is rarely a problem, as long
++ * as we have enough memory to install the trampoline. Using
++ * memory that is in use by the BIOS or by some DMA device
++ * the BIOS didn't shut down *is* a big problem.
+ */
++
++#define BIOS_LOWMEM_KILOBYTES 0x413
++#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
++#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
++
+ void __init reserve_ebda_region(void)
+ {
+ unsigned int lowmem, ebda_addr;
+
+- /* To determine the position of the EBDA and the */
+- /* end of conventional memory, we need to look at */
+- /* the BIOS data area. In a paravirtual environment */
+- /* that area is absent. We'll just have to assume */
+- /* that the paravirt case can handle memory setup */
+- /* correctly, without our help. */
++ /*
++ * To determine the position of the EBDA and the
++ * end of conventional memory, we need to look at
++ * the BIOS data area. In a paravirtual environment
++ * that area is absent. We'll just have to assume
++ * that the paravirt case can handle memory setup
++ * correctly, without our help.
++ */
+ if (paravirt_enabled())
+ return;
+
+@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
+ /* start of EBDA area */
+ ebda_addr = get_bios_ebda();
+
+- /* Fixup: bios puts an EBDA in the top 64K segment */
+- /* of conventional memory, but does not adjust lowmem. */
+- if ((lowmem - ebda_addr) <= 0x10000)
+- lowmem = ebda_addr;
++ /*
++ * Note: some old Dells seem to need 4k EBDA without
++ * reporting so, so just consider the memory above 0x9f000
++ * to be off limits (bugzilla 2990).
++ */
++
++ /* If the EBDA address is below 128K, assume it is bogus */
++ if (ebda_addr < INSANE_CUTOFF)
++ ebda_addr = LOWMEM_CAP;
+
+- /* Fixup: bios does not report an EBDA at all. */
+- /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
+- if ((ebda_addr == 0) && (lowmem >= 0x9f000))
+- lowmem = 0x9f000;
++ /* If lowmem is less than 128K, assume it is bogus */
++ if (lowmem < INSANE_CUTOFF)
++ lowmem = LOWMEM_CAP;
+
+- /* Paranoia: should never happen, but... */
+- if ((lowmem == 0) || (lowmem >= 0x100000))
+- lowmem = 0x9f000;
++ /* Use the lower of the lowmem and EBDA markers as the cutoff */
++ lowmem = min(lowmem, ebda_addr);
++ lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+
+ /* reserve all memory between lowmem and the 1MB mark */
+ memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 5db0490..7b73c88 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -738,13 +738,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ return;
+ }
+ #endif
++ /* Kernel addresses are always protection faults: */
++ if (address >= TASK_SIZE)
++ error_code |= PF_PROT;
+
+- if (unlikely(show_unhandled_signals))
++ if (likely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
+
+- /* Kernel addresses are always protection faults: */
+ tsk->thread.cr2 = address;
+- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index bbaaa00..44b93da 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
+ if (pud_none(*pud))
+ return 0;
+
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index bef9991..1de542b 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -83,9 +83,10 @@ int efi_enabled(int facility)
+ }
+ EXPORT_SYMBOL(efi_enabled);
+
++static bool disable_runtime = false;
+ static int __init setup_noefi(char *arg)
+ {
+- clear_bit(EFI_BOOT, &x86_efi_facility);
++ disable_runtime = true;
+ return 0;
+ }
+ early_param("noefi", setup_noefi);
+@@ -549,35 +550,37 @@ void __init efi_init(void)
+
+ set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
+
+- /*
+- * Check out the runtime services table. We need to map
+- * the runtime services table so that we can grab the physical
+- * address of several of the EFI runtime functions, needed to
+- * set the firmware into virtual mode.
+- */
+- runtime = early_ioremap((unsigned long)efi.systab->runtime,
+- sizeof(efi_runtime_services_t));
+- if (runtime != NULL) {
+- /*
+- * We will only need *early* access to the following
+- * two EFI runtime services before set_virtual_address_map
+- * is invoked.
+- */
+- efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
+- efi_phys.set_virtual_address_map =
+- (efi_set_virtual_address_map_t *)
+- runtime->set_virtual_address_map;
++ if (!disable_runtime) {
+ /*
+- * Make efi_get_time can be called before entering
+- * virtual mode.
++ * Check out the runtime services table. We need to map
++ * the runtime services table so that we can grab the physical
++ * address of several of the EFI runtime functions, needed to
++ * set the firmware into virtual mode.
+ */
+- efi.get_time = phys_efi_get_time;
+-
+- set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+- } else
+- printk(KERN_ERR "Could not map the EFI runtime service "
+- "table!\n");
+- early_iounmap(runtime, sizeof(efi_runtime_services_t));
++ runtime = early_ioremap((unsigned long)efi.systab->runtime,
++ sizeof(efi_runtime_services_t));
++ if (runtime != NULL) {
++ /*
++ * We will only need *early* access to the following
++ * two EFI runtime services before set_virtual_address_map
++ * is invoked.
++ */
++ efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
++ efi_phys.set_virtual_address_map =
++ (efi_set_virtual_address_map_t *)
++ runtime->set_virtual_address_map;
++ /*
++ * Make efi_get_time can be called before entering
++ * virtual mode.
++ */
++ efi.get_time = phys_efi_get_time;
++
++ set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
++ } else
++ printk(KERN_ERR "Could not map the EFI runtime service "
++ "table!\n");
++ early_iounmap(runtime, sizeof(efi_runtime_services_t));
++ }
+
+ /* Map the EFI memory map */
+ memmap.map = early_ioremap((unsigned long)memmap.phys_map,
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index d69cc6c..67bc7ba 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+ if (per_cpu(lock_spinners, cpu) == xl) {
+ ADD_STATS(released_slow_kicked, 1);
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+- break;
+ }
+ }
+ }
+diff --git a/block/genhd.c b/block/genhd.c
+index 4927476..6edf228 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -26,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
+ struct kobject *block_depr;
+
+ /* for extended dynamic devt allocation, currently only one major is used */
+-#define MAX_EXT_DEVT (1 << MINORBITS)
++#define NR_EXT_DEVT (1 << MINORBITS)
+
+ /* For extended devt allocation. ext_devt_mutex prevents look up
+ * results from going away underneath its user.
+@@ -421,17 +421,18 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+ do {
+ if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
+ return -ENOMEM;
++ mutex_lock(&ext_devt_mutex);
+ rc = idr_get_new(&ext_devt_idr, part, &idx);
++ if (!rc && idx >= NR_EXT_DEVT) {
++ idr_remove(&ext_devt_idr, idx);
++ rc = -EBUSY;
++ }
++ mutex_unlock(&ext_devt_mutex);
+ } while (rc == -EAGAIN);
+
+ if (rc)
+ return rc;
+
+- if (idx > MAX_EXT_DEVT) {
+- idr_remove(&ext_devt_idr, idx);
+- return -EBUSY;
+- }
+-
+ *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
+ return 0;
+ }
+@@ -645,7 +646,6 @@ void del_gendisk(struct gendisk *disk)
+ disk_part_iter_exit(&piter);
+
+ invalidate_partition(disk, 0);
+- blk_free_devt(disk_to_dev(disk)->devt);
+ set_capacity(disk, 0);
+ disk->flags &= ~GENHD_FL_UP;
+
+@@ -663,6 +663,7 @@ void del_gendisk(struct gendisk *disk)
+ if (!sysfs_deprecated)
+ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ device_del(disk_to_dev(disk));
++ blk_free_devt(disk_to_dev(disk)->devt);
+ }
+ EXPORT_SYMBOL(del_gendisk);
+
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index d790791..cc9d020 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -156,6 +156,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ },
+ {
+ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW41E_H",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW21E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 69ac373..df47397 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -321,6 +321,41 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Panther Point) */
+ { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (DH89xxCC) */
++ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++
+ { } /* terminate list */
+ };
+
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 000e7b2..8b8e8c0 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -289,7 +289,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
+ struct device *dev;
+ int error = 0;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return -EINVAL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+@@ -323,7 +323,7 @@ struct device *bus_find_device(struct bus_type *bus,
+ struct klist_iter i;
+ struct device *dev;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return NULL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 86848c6..40a0fcb 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -584,12 +584,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ struct request sreq;
+
+ dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n");
++ if (!lo->sock)
++ return -EINVAL;
+
++ mutex_unlock(&lo->tx_lock);
++ fsync_bdev(bdev);
++ mutex_lock(&lo->tx_lock);
+ blk_rq_init(NULL, &sreq);
+ sreq.cmd_type = REQ_TYPE_SPECIAL;
+ nbd_cmd(&sreq) = NBD_CMD_DISC;
++
++ /* Check again after getting mutex back. */
+ if (!lo->sock)
+ return -EINVAL;
++
+ nbd_send_req(lo, &sreq);
+ return 0;
+ }
+@@ -603,6 +611,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ nbd_clear_que(lo);
+ BUG_ON(!list_empty(&lo->queue_head));
+ BUG_ON(!list_empty(&lo->waiting_queue));
++ kill_bdev(bdev);
+ if (file)
+ fput(file);
+ return 0;
+@@ -683,6 +692,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->file = NULL;
+ nbd_clear_que(lo);
+ dev_warn(disk_to_dev(lo->disk), "queue cleared\n");
++ kill_bdev(bdev);
+ if (file)
+ fput(file);
+ lo->bytesize = 0;
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 48e8fee..94f6ae2 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+ int op_len, err;
+ void *req_buf;
+
+- if (!(((u64)1 << ((u64)op - 1)) & port->operations))
++ if (!(((u64)1 << (u64)op) & port->operations))
+ return -EOPNOTSUPP;
+
+ switch (op) {
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index f759ad4..674e3c2 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -364,6 +364,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
+ be->blkif = NULL;
+ }
+
++ kfree(be->mode);
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+@@ -513,6 +514,7 @@ static void backend_changed(struct xenbus_watch *watch,
+ = container_of(watch, struct backend_info, backend_watch);
+ struct xenbus_device *dev = be->dev;
+ int cdrom = 0;
++ unsigned long handle;
+ char *device_type;
+
+ DPRINTK("");
+@@ -532,10 +534,10 @@ static void backend_changed(struct xenbus_watch *watch,
+ return;
+ }
+
+- if ((be->major || be->minor) &&
+- ((be->major != major) || (be->minor != minor))) {
+- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+- be->major, be->minor, major, minor);
++ if (be->major | be->minor) {
++ if (be->major != major || be->minor != minor)
++ pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
++ be->major, be->minor, major, minor);
+ return;
+ }
+
+@@ -553,36 +555,33 @@ static void backend_changed(struct xenbus_watch *watch,
+ kfree(device_type);
+ }
+
+- if (be->major == 0 && be->minor == 0) {
+- /* Front end dir is a number, which is used as the handle. */
+-
+- char *p = strrchr(dev->otherend, '/') + 1;
+- long handle;
+- err = strict_strtoul(p, 0, &handle);
+- if (err)
+- return;
++ /* Front end dir is a number, which is used as the handle. */
++ err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
++ if (err)
++ return;
+
+- be->major = major;
+- be->minor = minor;
++ be->major = major;
++ be->minor = minor;
+
+- err = xen_vbd_create(be->blkif, handle, major, minor,
+- (NULL == strchr(be->mode, 'w')), cdrom);
+- if (err) {
+- be->major = 0;
+- be->minor = 0;
+- xenbus_dev_fatal(dev, err, "creating vbd structure");
+- return;
+- }
++ err = xen_vbd_create(be->blkif, handle, major, minor,
++ !strchr(be->mode, 'w'), cdrom);
+
++ if (err)
++ xenbus_dev_fatal(dev, err, "creating vbd structure");
++ else {
+ err = xenvbd_sysfs_addif(dev);
+ if (err) {
+ xen_vbd_free(&be->blkif->vbd);
+- be->major = 0;
+- be->minor = 0;
+ xenbus_dev_fatal(dev, err, "creating sysfs entries");
+- return;
+ }
++ }
+
++ if (err) {
++ kfree(be->mode);
++ be->mode = NULL;
++ be->major = 0;
++ be->minor = 0;
++ } else {
+ /* We're potentially connected now */
+ xen_update_blkif_status(be->blkif);
+ }
+diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
+index bc6f5fa..819dfda 100644
+--- a/drivers/dca/dca-core.c
++++ b/drivers/dca/dca-core.c
+@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
++ if (list_empty(&dca_domains)) {
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
++ return;
++ }
++
+ list_del(&dca->node);
+
+ pci_rc = dca_pci_rc_from_dev(dev);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index f3b890d..1f3dd51 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -995,6 +995,10 @@ static void fw_device_init(struct work_struct *work)
+ ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+ idr_get_new(&fw_device_idr, device, &minor) :
+ -ENOMEM;
++ if (minor >= 1 << MINORBITS) {
++ idr_remove(&fw_device_idr, minor);
++ minor = -ENOSPC;
++ }
+ up_write(&fw_device_rwsem);
+
+ if (ret < 0)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index bb95d59..9080eb7 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -87,9 +87,6 @@ static struct edid_quirk {
+ int product_id;
+ u32 quirks;
+ } edid_quirk_list[] = {
+- /* ASUS VW222S */
+- { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+-
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+@@ -1743,7 +1740,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+- num_modes += add_inferred_modes(connector, edid);
++ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
++ num_modes += add_inferred_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+index 445003f..471f453 100644
+--- a/drivers/gpu/drm/drm_usb.c
++++ b/drivers/gpu/drm/drm_usb.c
+@@ -19,7 +19,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+- dev->dev = &usbdev->dev;
++ dev->dev = &interface->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 10fe480..5620192 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -756,7 +756,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+
+ seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+- seq_printf(m, "Kernel: " UTS_RELEASE);
++ seq_printf(m, "Kernel: " UTS_RELEASE "\n");
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7817429..2303c2b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -138,8 +138,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+- .m1 = { .min = 10, .max = 22 },
+- .m2 = { .min = 5, .max = 9 },
++ .m1 = { .min = 8, .max = 18 },
++ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+@@ -3242,6 +3242,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
++ u32 pctl;
+
+ if (!intel_crtc->active)
+ return;
+@@ -3257,6 +3258,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
++
++ /* Disable pannel fitter if it is on this pipe. */
++ pctl = I915_READ(PFIT_CONTROL);
++ if ((pctl & PFIT_ENABLE) &&
++ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
++ I915_WRITE(PFIT_CONTROL, 0);
++
+ intel_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 0977849..60d13fe 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1137,6 +1137,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ blackout &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
++ /* wait for the MC to settle */
++ udelay(100);
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 279b863d..a23b63a 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1900,6 +1900,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c15c38e..25f3290 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -506,6 +506,9 @@
+ #define USB_VENDOR_ID_MADCATZ 0x0738
+ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+
++#define USB_VENDOR_ID_MASTERKIT 0x16c0
++#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
++
+ #define USB_VENDOR_ID_MCC 0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+ #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index dffdca8..f44a067 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4140,13 +4140,19 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+ /*
+ * Mobile 4 Series Chipset neglects to set RWBF capability,
+- * but needs it:
++ * but needs it. Same seems to hold for the desktop versions.
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
+ }
+
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
+
+ #define GGC 0x52
+ #define GGC_MEMORY_SIZE_MASK (0xf << 8)
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 29f9000..a47ba33 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -774,9 +774,12 @@ static ssize_t show_protocols(struct device *device,
+ if (dev->driver_type == RC_DRIVER_SCANCODE) {
+ enabled = dev->rc_map.rc_type;
+ allowed = dev->allowed_protos;
+- } else {
++ } else if (dev->raw) {
+ enabled = dev->raw->enabled_protocols;
+ allowed = ir_raw_get_allowed_protocols();
++ } else {
++ mutex_unlock(&dev->lock);
++ return -ENODEV;
+ }
+
+ IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
+diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
+index ee0d0b3..d345215 100644
+--- a/drivers/media/video/omap/omap_vout.c
++++ b/drivers/media/video/omap/omap_vout.c
+@@ -206,19 +206,21 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+- vma = find_vma(mm, virtp);
+ /* For kernel direct-mapped memory, take the easy way */
+- if (virtp >= PAGE_OFFSET) {
+- physp = virt_to_phys((void *) virtp);
+- } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
++ if (virtp >= PAGE_OFFSET)
++ return virt_to_phys((void *) virtp);
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(mm, virtp);
++ if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated, mmaped-to-usermode
+ addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
++ up_read(&current->mm->mmap_sem);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+- down_read(&current->mm->mmap_sem);
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 0, &pages, NULL);
+diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
+index 0edd618..8b0777f 100644
+--- a/drivers/media/video/v4l2-device.c
++++ b/drivers/media/video/v4l2-device.c
+@@ -159,31 +159,21 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ sd->v4l2_dev = v4l2_dev;
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+- if (err) {
+- module_put(sd->owner);
+- return err;
+- }
++ if (err)
++ goto error_module;
+ }
+
+ /* This just returns 0 if either of the two args is NULL */
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
+- if (err) {
+- if (sd->internal_ops && sd->internal_ops->unregistered)
+- sd->internal_ops->unregistered(sd);
+- module_put(sd->owner);
+- return err;
+- }
++ if (err)
++ goto error_unregister;
+
+ #if defined(CONFIG_MEDIA_CONTROLLER)
+ /* Register the entity. */
+ if (v4l2_dev->mdev) {
+ err = media_device_register_entity(v4l2_dev->mdev, entity);
+- if (err < 0) {
+- if (sd->internal_ops && sd->internal_ops->unregistered)
+- sd->internal_ops->unregistered(sd);
+- module_put(sd->owner);
+- return err;
+- }
++ if (err < 0)
++ goto error_unregister;
+ }
+ #endif
+
+@@ -192,6 +182,14 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ spin_unlock(&v4l2_dev->lock);
+
+ return 0;
++
++error_unregister:
++ if (sd->internal_ops && sd->internal_ops->unregistered)
++ sd->internal_ops->unregistered(sd);
++error_module:
++ module_put(sd->owner);
++ sd->v4l2_dev = NULL;
++ return err;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 1b47937..85a074f 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -232,15 +232,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+
+ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+ {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct pltfm_imx_data *imx_data = pltfm_host->priv;
++
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+- u16 val = readw(host->ioaddr + (reg ^ 2));
+- /*
+- * uSDHC supports SDHCI v3.0, but it's encoded as value
+- * 0x3 in host controller version register, which violates
+- * SDHCI_SPEC_300 definition. Work it around here.
+- */
+- if ((val & SDHCI_SPEC_VER_MASK) == 3)
+- return --val;
++ reg ^= 2;
++ if (is_imx6q_usdhc(imx_data)) {
++ /*
++ * The usdhc register returns a wrong host version.
++ * Correct it here.
++ */
++ return SDHCI_SPEC_300;
++ }
+ }
+
+ return readw(host->ioaddr + reg);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index cf177b8..df5a09a 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4559,11 +4559,13 @@ void igb_update_stats(struct igb_adapter *adapter,
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+- u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
++ u32 rqdpc = rd32(E1000_RQDPC(i));
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+- ring->rx_stats.drops += rqdpc_tmp;
+- net_stats->rx_fifo_errors += rqdpc_tmp;
++ if (rqdpc) {
++ ring->rx_stats.drops += rqdpc;
++ net_stats->rx_fifo_errors += rqdpc;
++ }
+
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 315b96e..9fdd198 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
+
+ /* DMA engine tuning knobs */
+ #define B43_TXRING_SLOTS 256
+-#define B43_RXRING_SLOTS 64
++#define B43_RXRING_SLOTS 256
+ #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
+ #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
+
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 7ca84c3..564218c 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
+ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
+- {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
++ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
+ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index a99be2d0..0984dcf 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -295,6 +295,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ /* RTL8188CUS-VL */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ /* 8188 Combo for BC4 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+@@ -372,9 +373,15 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+
+ MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+
++static int rtl8192cu_probe(struct usb_interface *intf,
++ const struct usb_device_id *id)
++{
++ return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg);
++}
++
+ static struct usb_driver rtl8192cu_driver = {
+ .name = "rtl8192cu",
+- .probe = rtl_usb_probe,
++ .probe = rtl8192cu_probe,
+ .disconnect = rtl_usb_disconnect,
+ .id_table = rtl8192c_usb_ids,
+
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 30dd0a9..c04ee92 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -44,8 +44,12 @@
+
+ static void usbctrl_async_callback(struct urb *urb)
+ {
+- if (urb)
+- kfree(urb->context);
++ if (urb) {
++ /* free dr */
++ kfree(urb->setup_packet);
++ /* free databuf */
++ kfree(urb->transfer_buffer);
++ }
+ }
+
+ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+@@ -57,38 +61,46 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+ u8 reqtype;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+- struct rtl819x_async_write_data {
+- u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+- struct usb_ctrlrequest dr;
+- } *buf;
++ const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
++ u8 *databuf;
++
++ if (WARN_ON_ONCE(len > databuf_maxlen))
++ len = databuf_maxlen;
+
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+
+- buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+- if (!buf)
++ dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
++ if (!dr)
+ return -ENOMEM;
+
++ databuf = kmalloc(databuf_maxlen, GFP_ATOMIC);
++ if (!databuf) {
++ kfree(dr);
++ return -ENOMEM;
++ }
++
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+- kfree(buf);
++ kfree(databuf);
++ kfree(dr);
+ return -ENOMEM;
+ }
+
+- dr = &buf->dr;
+-
+ dr->bRequestType = reqtype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(len);
+- memcpy(buf, pdata, len);
++ memcpy(databuf, pdata, len);
+ usb_fill_control_urb(urb, udev, pipe,
+- (unsigned char *)dr, buf, len,
+- usbctrl_async_callback, buf);
++ (unsigned char *)dr, databuf, len,
++ usbctrl_async_callback, NULL);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+- if (rc < 0)
+- kfree(buf);
++ if (rc < 0) {
++ kfree(databuf);
++ kfree(dr);
++ }
+ usb_free_urb(urb);
+ return rc;
+ }
+@@ -894,7 +906,8 @@ static struct rtl_intf_ops rtl_usb_ops = {
+ };
+
+ int __devinit rtl_usb_probe(struct usb_interface *intf,
+- const struct usb_device_id *id)
++ const struct usb_device_id *id,
++ struct rtl_hal_cfg *rtl_hal_cfg)
+ {
+ int err;
+ struct ieee80211_hw *hw = NULL;
+@@ -928,7 +941,7 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
+ usb_set_intfdata(intf, hw);
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_USB;
+- rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
++ rtlpriv->cfg = rtl_hal_cfg;
+ rtlpriv->intf_ops = &rtl_usb_ops;
+ rtl_dbgp_flag_init(hw);
+ /* Init IO handler */
+diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
+index d2a63fb..4dc4b1c 100644
+--- a/drivers/net/wireless/rtlwifi/usb.h
++++ b/drivers/net/wireless/rtlwifi/usb.h
+@@ -158,7 +158,8 @@ struct rtl_usb_priv {
+
+
+ int __devinit rtl_usb_probe(struct usb_interface *intf,
+- const struct usb_device_id *id);
++ const struct usb_device_id *id,
++ struct rtl_hal_cfg *rtl92cu_hal_cfg);
+ void rtl_usb_disconnect(struct usb_interface *intf);
+ int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
+ int rtl_usb_resume(struct usb_interface *pusb_intf);
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 5925e0b..8eaf0e2 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
+ static void xenvif_down(struct xenvif *vif)
+ {
+ disable_irq(vif->irq);
++ del_timer_sync(&vif->credit_timeout);
+ xen_netbk_deschedule_xenvif(vif);
+ xen_netbk_remove_xenvif(vif);
+ }
+@@ -362,8 +363,6 @@ void xenvif_disconnect(struct xenvif *vif)
+ atomic_dec(&vif->refcnt);
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+
+- del_timer_sync(&vif->credit_timeout);
+-
+ if (vif->irq)
+ unbind_from_irqhandler(vif->irq, vif);
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index b802bb3..185a0eb 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -883,13 +883,13 @@ static int netbk_count_requests(struct xenvif *vif,
+ if (frags >= work_to_do) {
+ netdev_err(vif->dev, "Need more frags\n");
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -ENODATA;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+ netdev_err(vif->dev, "Too many frags\n");
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -E2BIG;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+@@ -897,7 +897,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ if (txp->size > first->size) {
+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -EIO;
+ }
+
+ first->size -= txp->size;
+@@ -907,7 +907,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -EINVAL;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+ return frags;
+diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
+index 7f87bee..f53da9e 100644
+--- a/drivers/pci/remove.c
++++ b/drivers/pci/remove.c
+@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)
+
+ static void pci_stop_dev(struct pci_dev *dev)
+ {
++ pci_pme_active(dev, false);
++
+ if (dev->is_added) {
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
+index 86e4a1a..6bb02ab 100644
+--- a/drivers/pcmcia/vrc4171_card.c
++++ b/drivers/pcmcia/vrc4171_card.c
+@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
+ socket = &vrc4171_sockets[slot];
+ socket->csc_irq = search_nonuse_irq();
+ socket->io_irq = search_nonuse_irq();
++ spin_lock_init(&socket->lock);
+
+ return 0;
+ }
+diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
+index f75a4c8..3a09460 100644
+--- a/drivers/platform/x86/asus-laptop.c
++++ b/drivers/platform/x86/asus-laptop.c
+@@ -820,8 +820,10 @@ static ssize_t show_infos(struct device *dev,
+ /*
+ * The HWRS method return informations about the hardware.
+ * 0x80 bit is for WLAN, 0x100 for Bluetooth.
++ * 0x40 for WWAN, 0x10 for WIMAX.
+ * The significance of others is yet to be found.
+- * If we don't find the method, we assume the device are present.
++ * We don't currently use this for device detection, and it
++ * takes several seconds to run on some systems.
+ */
+ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
+ if (!ACPI_FAILURE(rv))
+@@ -1591,7 +1593,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
+ {
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *model = NULL;
+- unsigned long long bsts_result, hwrs_result;
++ unsigned long long bsts_result;
+ char *string = NULL;
+ acpi_status status;
+
+@@ -1653,17 +1655,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
+ if (*string)
+ pr_notice(" %s model detected\n", string);
+
+- /*
+- * The HWRS method return informations about the hardware.
+- * 0x80 bit is for WLAN, 0x100 for Bluetooth,
+- * 0x40 for WWAN, 0x10 for WIMAX.
+- * The significance of others is yet to be found.
+- */
+- status =
+- acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
+- if (!ACPI_FAILURE(status))
+- pr_notice(" HWRS returned %x", (int)hwrs_result);
+-
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+
+diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
+index 79451f2..60cee9e 100644
+--- a/drivers/pps/clients/pps-ldisc.c
++++ b/drivers/pps/clients/pps-ldisc.c
+@@ -31,7 +31,7 @@
+ static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
+ struct pps_event_time *ts)
+ {
+- struct pps_device *pps = (struct pps_device *)tty->disc_data;
++ struct pps_device *pps = pps_lookup_dev(tty);
+
+ BUG_ON(pps == NULL);
+
+@@ -67,9 +67,9 @@ static int pps_tty_open(struct tty_struct *tty)
+ pr_err("cannot register PPS source \"%s\"\n", info.path);
+ return -ENOMEM;
+ }
+- tty->disc_data = pps;
++ pps->lookup_cookie = tty;
+
+- /* Should open N_TTY ldisc too */
++ /* Now open the base class N_TTY ldisc */
+ ret = alias_n_tty_open(tty);
+ if (ret < 0) {
+ pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+@@ -81,7 +81,6 @@ static int pps_tty_open(struct tty_struct *tty)
+ return 0;
+
+ err_unregister:
+- tty->disc_data = NULL;
+ pps_unregister_source(pps);
+ return ret;
+ }
+@@ -90,11 +89,10 @@ static void (*alias_n_tty_close)(struct tty_struct *tty);
+
+ static void pps_tty_close(struct tty_struct *tty)
+ {
+- struct pps_device *pps = (struct pps_device *)tty->disc_data;
++ struct pps_device *pps = pps_lookup_dev(tty);
+
+ alias_n_tty_close(tty);
+
+- tty->disc_data = NULL;
+ dev_info(pps->dev, "removed\n");
+ pps_unregister_source(pps);
+ }
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 2baadd2..e83669f 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -247,12 +247,15 @@ static int pps_cdev_open(struct inode *inode, struct file *file)
+ struct pps_device *pps = container_of(inode->i_cdev,
+ struct pps_device, cdev);
+ file->private_data = pps;
+-
++ kobject_get(&pps->dev->kobj);
+ return 0;
+ }
+
+ static int pps_cdev_release(struct inode *inode, struct file *file)
+ {
++ struct pps_device *pps = container_of(inode->i_cdev,
++ struct pps_device, cdev);
++ kobject_put(&pps->dev->kobj);
+ return 0;
+ }
+
+@@ -274,8 +277,10 @@ static void pps_device_destruct(struct device *dev)
+ {
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+- /* release id here to protect others from using it while it's
+- * still in use */
++ cdev_del(&pps->cdev);
++
++ /* Now we can release the ID for re-use */
++ pr_debug("deallocating pps%d\n", pps->id);
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ mutex_unlock(&pps_idr_lock);
+@@ -330,6 +335,7 @@ int pps_register_cdev(struct pps_device *pps)
+ if (IS_ERR(pps->dev))
+ goto del_cdev;
+
++ /* Override the release function with our own */
+ pps->dev->release = pps_device_destruct;
+
+ pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
+@@ -350,11 +356,44 @@ free_idr:
+
+ void pps_unregister_cdev(struct pps_device *pps)
+ {
++ pr_debug("unregistering pps%d\n", pps->id);
++ pps->lookup_cookie = NULL;
+ device_destroy(pps_class, pps->dev->devt);
+- cdev_del(&pps->cdev);
+ }
+
+ /*
++ * Look up a pps device by magic cookie.
++ * The cookie is usually a pointer to some enclosing device, but this
++ * code doesn't care; you should never be dereferencing it.
++ *
++ * This is a bit of a kludge that is currently used only by the PPS
++ * serial line discipline. It may need to be tweaked when a second user
++ * is found.
++ *
++ * There is no function interface for setting the lookup_cookie field.
++ * It's initialized to NULL when the pps device is created, and if a
++ * client wants to use it, just fill it in afterward.
++ *
++ * The cookie is automatically set to NULL in pps_unregister_source()
++ * so that it will not be used again, even if the pps device cannot
++ * be removed from the idr due to pending references holding the minor
++ * number in use.
++ */
++struct pps_device *pps_lookup_dev(void const *cookie)
++{
++ struct pps_device *pps;
++ unsigned id;
++
++ rcu_read_lock();
++ idr_for_each_entry(&pps_idr, pps, id)
++ if (cookie == pps->lookup_cookie)
++ break;
++ rcu_read_unlock();
++ return pps;
++}
++EXPORT_SYMBOL(pps_lookup_dev);
++
++/*
+ * Module stuff
+ */
+
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index 73816d8..1f94073 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -344,7 +344,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ /* Enable the clockwatch on ST Variants */
+ if (ldata->hw_designer == AMBA_VENDOR_ST)
+ data |= RTC_CR_CWEN;
+- writel(data | RTC_CR_EN, ldata->base + RTC_CR);
++ else
++ data |= RTC_CR_EN;
++ writel(data, ldata->base + RTC_CR);
+
+ /*
+ * On ST PL031 variants, the RTC reset value does not provide correct
+diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
+index 94f49ff..b1e8f6c 100644
+--- a/drivers/s390/kvm/kvm_virtio.c
++++ b/drivers/s390/kvm/kvm_virtio.c
+@@ -414,6 +414,26 @@ static void kvm_extint_handler(unsigned int ext_int_code,
+ }
+
+ /*
++ * For s390-virtio, we expect a page above main storage containing
++ * the virtio configuration. Try to actually load from this area
++ * in order to figure out if the host provides this page.
++ */
++static int __init test_devices_support(unsigned long addr)
++{
++ int ret = -EIO;
++
++ asm volatile(
++ "0: lura 0,%1\n"
++ "1: xgr %0,%0\n"
++ "2:\n"
++ EX_TABLE(0b,2b)
++ EX_TABLE(1b,2b)
++ : "+d" (ret)
++ : "a" (addr)
++ : "0", "cc");
++ return ret;
++}
++/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+@@ -424,21 +444,23 @@ static int __init kvm_devices_init(void)
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+
++ if (test_devices_support(real_memory_size) < 0)
++ return -ENODEV;
++
++ rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
++ if (rc)
++ return rc;
++
++ kvm_devices = (void *) real_memory_size;
++
+ kvm_root = root_device_register("kvm_s390");
+ if (IS_ERR(kvm_root)) {
+ rc = PTR_ERR(kvm_root);
+ printk(KERN_ERR "Could not register kvm_s390 root device");
++ vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+ return rc;
+ }
+
+- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+- if (rc) {
+- root_device_unregister(kvm_root);
+- return rc;
+- }
+-
+- kvm_devices = (void *) real_memory_size;
+-
+ INIT_WORK(&hotplug_work, hotplug_devices);
+
+ service_subclass_irq_register();
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index ab9f5ed..a023f52 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -136,6 +136,11 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ /* Device config is special, because it must work on
+ * an unconfigured device. */
+ if (cmd == COMEDI_DEVCONFIG) {
++ if (minor >= COMEDI_NUM_BOARD_MINORS) {
++ /* Device config not appropriate on non-board minors. */
++ rc = -ENOTTY;
++ goto done;
++ }
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
+ goto done;
+@@ -1569,7 +1574,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
+
+ mask = 0;
+ read_subdev = comedi_get_read_subdevice(dev_file_info);
+- if (read_subdev) {
++ if (read_subdev && read_subdev->async) {
+ poll_wait(file, &read_subdev->async->wait_head, wait);
+ if (!read_subdev->busy
+ || comedi_buf_read_n_available(read_subdev->async) > 0
+@@ -1579,7 +1584,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
+ }
+ }
+ write_subdev = comedi_get_write_subdevice(dev_file_info);
+- if (write_subdev) {
++ if (write_subdev && write_subdev->async) {
+ poll_wait(file, &write_subdev->async->wait_head, wait);
+ comedi_buf_write_alloc(write_subdev->async,
+ write_subdev->async->prealloc_bufsz);
+@@ -1621,7 +1626,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ }
+
+ s = comedi_get_write_subdevice(dev_file_info);
+- if (s == NULL) {
++ if (s == NULL || s->async == NULL) {
+ retval = -EIO;
+ goto done;
+ }
+@@ -1732,7 +1737,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ }
+
+ s = comedi_get_read_subdevice(dev_file_info);
+- if (s == NULL) {
++ if (s == NULL || s->async == NULL) {
+ retval = -EIO;
+ goto done;
+ }
+diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
+index 721b2be..0517a23 100644
+--- a/drivers/staging/comedi/drivers/ni_labpc.c
++++ b/drivers/staging/comedi/drivers/ni_labpc.c
+@@ -1264,7 +1264,9 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ else
+ channel = CR_CHAN(cmd->chanlist[0]);
+ /* munge channel bits for differential / scan disabled mode */
+- if (labpc_ai_scan_mode(cmd) != MODE_SINGLE_CHAN && aref == AREF_DIFF)
++ if ((labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN ||
++ labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN_INTERVAL) &&
++ aref == AREF_DIFF)
+ channel *= 2;
+ devpriv->command1_bits |= ADC_CHAN_BITS(channel);
+ devpriv->command1_bits |= thisboard->ai_range_code[range];
+@@ -1280,21 +1282,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ devpriv->write_byte(devpriv->command1_bits,
+ dev->iobase + COMMAND1_REG);
+ }
+- /* setup any external triggering/pacing (command4 register) */
+- devpriv->command4_bits = 0;
+- if (cmd->convert_src != TRIG_EXT)
+- devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
+- /* XXX should discard first scan when using interval scanning
+- * since manual says it is not synced with scan clock */
+- if (labpc_use_continuous_mode(cmd) == 0) {
+- devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
+- if (cmd->scan_begin_src == TRIG_EXT)
+- devpriv->command4_bits |= EXT_SCAN_EN_BIT;
+- }
+- /* single-ended/differential */
+- if (aref == AREF_DIFF)
+- devpriv->command4_bits |= ADC_DIFF_BIT;
+- devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
+
+ devpriv->write_byte(cmd->chanlist_len,
+ dev->iobase + INTERVAL_COUNT_REG);
+@@ -1374,6 +1361,22 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT;
+ devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG);
+
++ /* setup any external triggering/pacing (command4 register) */
++ devpriv->command4_bits = 0;
++ if (cmd->convert_src != TRIG_EXT)
++ devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
++ /* XXX should discard first scan when using interval scanning
++ * since manual says it is not synced with scan clock */
++ if (labpc_use_continuous_mode(cmd) == 0) {
++ devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
++ if (cmd->scan_begin_src == TRIG_EXT)
++ devpriv->command4_bits |= EXT_SCAN_EN_BIT;
++ }
++ /* single-ended/differential */
++ if (aref == AREF_DIFF)
++ devpriv->command4_bits |= ADC_DIFF_BIT;
++ devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
++
+ /* startup acquisition */
+
+ /* command2 reg */
+diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
+index b5130c8..e2f5c81 100644
+--- a/drivers/staging/speakup/speakup_soft.c
++++ b/drivers/staging/speakup/speakup_soft.c
+@@ -46,7 +46,7 @@ static int misc_registered;
+ static struct var_t vars[] = {
+ { CAPS_START, .u.s = {"\x01+3p" } },
+ { CAPS_STOP, .u.s = {"\x01-3p" } },
+- { RATE, .u.n = {"\x01%ds", 5, 0, 9, 0, 0, NULL } },
++ { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } },
+ { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } },
+ { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
+ { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 09de99f..2594a31 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+
+ if (is_partial_io(bvec)) {
+ /* Use a temporary buffer to decompress the page */
+- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ return -ENOMEM;
+@@ -338,7 +338,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
+ */
+- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ ret = -ENOMEM;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index a0143a0..5def359 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1439,24 +1439,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
+
+ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
++ struct se_node_acl *nacl,
+ u32 mapped_lun,
+- char *initiatorname,
+ int *ret)
+ {
+ struct se_lun_acl *lacl;
+- struct se_node_acl *nacl;
+
+- if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
++ if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
+ pr_err("%s InitiatorName exceeds maximum size.\n",
+ tpg->se_tpg_tfo->get_fabric_name());
+ *ret = -EOVERFLOW;
+ return NULL;
+ }
+- nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+- if (!nacl) {
+- *ret = -EINVAL;
+- return NULL;
+- }
+ lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+ if (!lacl) {
+ pr_err("Unable to allocate memory for struct se_lun_acl.\n");
+@@ -1467,7 +1461,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ INIT_LIST_HEAD(&lacl->lacl_list);
+ lacl->mapped_lun = mapped_lun;
+ lacl->se_lun_nacl = nacl;
+- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
++ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
++ nacl->initiatorname);
+
+ return lacl;
+ }
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index 09b6f87..60009bd 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
+ ret = -EINVAL;
+ goto out;
+ }
++ if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
++ pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
++ "-1: %u for Target Portal Group: %u\n", mapped_lun,
++ TRANSPORT_MAX_LUNS_PER_TPG-1,
++ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
++ ret = -EINVAL;
++ goto out;
++ }
+
+- lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+- config_item_name(acl_ci), &ret);
++ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
++ mapped_lun, &ret);
+ if (!lacl) {
+ ret = -EINVAL;
+ goto out;
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index d91fe44..d048e33 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -117,16 +117,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
+ struct se_node_acl *acl;
+
+ spin_lock_irq(&tpg->acl_node_lock);
+- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+- if (!strcmp(acl->initiatorname, initiatorname) &&
+- !acl->dynamic_node_acl) {
+- spin_unlock_irq(&tpg->acl_node_lock);
+- return acl;
+- }
+- }
++ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ spin_unlock_irq(&tpg->acl_node_lock);
+
+- return NULL;
++ return acl;
+ }
+
+ /* core_tpg_add_node_to_devs():
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 90dad17..6748568 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -2695,7 +2695,7 @@ serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+ if (ser->irq >= nr_irqs || ser->irq < 0 ||
+ ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+ ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+- ser->type == PORT_STARTECH)
++ ser->type == PORT_STARTECH || uart_config[ser->type].name == NULL)
+ return -EINVAL;
+ return 0;
+ }
+@@ -2705,7 +2705,7 @@ serial8250_type(struct uart_port *port)
+ {
+ int type = port->type;
+
+- if (type >= ARRAY_SIZE(uart_config))
++ if (type >= ARRAY_SIZE(uart_config) || uart_config[type].name == NULL)
+ type = 0;
+ return uart_config[type].name;
+ }
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 9314d93..937f927 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -618,7 +618,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+- return -EINTR;
++ return -ERESTARTSYS;
+ }
+
+ tty_set_termios(tty, &tmp_termios);
+@@ -685,7 +685,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+- return -EINTR;
++ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&tty->termios_mutex);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index e716839..632df54 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -657,7 +657,7 @@ static inline void save_screen(struct vc_data *vc)
+ * Redrawing of screen
+ */
+
+-static void clear_buffer_attributes(struct vc_data *vc)
++void clear_buffer_attributes(struct vc_data *vc)
+ {
+ unsigned short *p = (unsigned short *)vc->vc_origin;
+ int count = vc->vc_screenbuf_size / 2;
+@@ -3016,7 +3016,7 @@ int __init vty_init(const struct file_operations *console_fops)
+
+ static struct class *vtconsole_class;
+
+-static int bind_con_driver(const struct consw *csw, int first, int last,
++static int do_bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+ {
+ struct module *owner = csw->owner;
+@@ -3027,7 +3027,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
++ WARN_CONSOLE_UNLOCKED();
+
+ /* check if driver is registered */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3112,11 +3112,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+
+ retval = 0;
+ err:
+- console_unlock();
+ module_put(owner);
+ return retval;
+ };
+
++
++static int bind_con_driver(const struct consw *csw, int first, int last,
++ int deflt)
++{
++ int ret;
++
++ console_lock();
++ ret = do_bind_con_driver(csw, first, last, deflt);
++ console_unlock();
++ return ret;
++}
++
+ #ifdef CONFIG_VT_HW_CONSOLE_BINDING
+ static int con_is_graphics(const struct consw *csw, int first, int last)
+ {
+@@ -3153,6 +3164,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
+ */
+ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ {
++ int retval;
++
++ console_lock();
++ retval = do_unbind_con_driver(csw, first, last, deflt);
++ console_unlock();
++ return retval;
++}
++EXPORT_SYMBOL(unbind_con_driver);
++
++/* unlocked version of unbind_con_driver() */
++int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
++{
+ struct module *owner = csw->owner;
+ const struct consw *defcsw = NULL;
+ struct con_driver *con_driver = NULL, *con_back = NULL;
+@@ -3161,7 +3184,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
++ WARN_CONSOLE_UNLOCKED();
+
+ /* check if driver is registered and if it is unbindable */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3174,10 +3197,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ }
+ }
+
+- if (retval) {
+- console_unlock();
++ if (retval)
+ goto err;
+- }
+
+ retval = -ENODEV;
+
+@@ -3193,15 +3214,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ }
+ }
+
+- if (retval) {
+- console_unlock();
++ if (retval)
+ goto err;
+- }
+
+- if (!con_is_bound(csw)) {
+- console_unlock();
++ if (!con_is_bound(csw))
+ goto err;
+- }
+
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+@@ -3228,15 +3245,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ if (!con_is_bound(csw))
+ con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+
+- console_unlock();
+ /* ignore return value, binding should not fail */
+- bind_con_driver(defcsw, first, last, deflt);
++ do_bind_con_driver(defcsw, first, last, deflt);
+ err:
+ module_put(owner);
+ return retval;
+
+ }
+-EXPORT_SYMBOL(unbind_con_driver);
++EXPORT_SYMBOL_GPL(do_unbind_con_driver);
+
+ static int vt_bind(struct con_driver *con)
+ {
+@@ -3508,28 +3524,18 @@ int con_debug_leave(void)
+ }
+ EXPORT_SYMBOL_GPL(con_debug_leave);
+
+-/**
+- * register_con_driver - register console driver to console layer
+- * @csw: console driver
+- * @first: the first console to take over, minimum value is 0
+- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+- *
+- * DESCRIPTION: This function registers a console driver which can later
+- * bind to a range of consoles specified by @first and @last. It will
+- * also initialize the console driver by calling con_startup().
+- */
+-int register_con_driver(const struct consw *csw, int first, int last)
++static int do_register_con_driver(const struct consw *csw, int first, int last)
+ {
+ struct module *owner = csw->owner;
+ struct con_driver *con_driver;
+ const char *desc;
+ int i, retval = 0;
+
++ WARN_CONSOLE_UNLOCKED();
++
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
+-
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+@@ -3582,10 +3588,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
+ }
+
+ err:
+- console_unlock();
+ module_put(owner);
+ return retval;
+ }
++
++/**
++ * register_con_driver - register console driver to console layer
++ * @csw: console driver
++ * @first: the first console to take over, minimum value is 0
++ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
++ *
++ * DESCRIPTION: This function registers a console driver which can later
++ * bind to a range of consoles specified by @first and @last. It will
++ * also initialize the console driver by calling con_startup().
++ */
++int register_con_driver(const struct consw *csw, int first, int last)
++{
++ int retval;
++
++ console_lock();
++ retval = do_register_con_driver(csw, first, last);
++ console_unlock();
++ return retval;
++}
+ EXPORT_SYMBOL(register_con_driver);
+
+ /**
+@@ -3601,9 +3626,18 @@ EXPORT_SYMBOL(register_con_driver);
+ */
+ int unregister_con_driver(const struct consw *csw)
+ {
+- int i, retval = -ENODEV;
++ int retval;
+
+ console_lock();
++ retval = do_unregister_con_driver(csw);
++ console_unlock();
++ return retval;
++}
++EXPORT_SYMBOL(unregister_con_driver);
++
++int do_unregister_con_driver(const struct consw *csw)
++{
++ int i, retval = -ENODEV;
+
+ /* cannot unregister a bound driver */
+ if (con_is_bound(csw))
+@@ -3629,27 +3663,53 @@ int unregister_con_driver(const struct consw *csw)
+ }
+ }
+ err:
+- console_unlock();
+ return retval;
+ }
+-EXPORT_SYMBOL(unregister_con_driver);
++EXPORT_SYMBOL_GPL(do_unregister_con_driver);
+
+ /*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+- * take_over_console is basically a register followed by unbind
++ * take_over_console is basically a register followed by unbind
++ */
++int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
++{
++ int err;
++
++ err = do_register_con_driver(csw, first, last);
++ /*
++ * If we get an busy error we still want to bind the console driver
++ * and return success, as we may have unbound the console driver
++ * but not unregistered it.
++ */
++ if (err == -EBUSY)
++ err = 0;
++ if (!err)
++ do_bind_con_driver(csw, first, last, deflt);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(do_take_over_console);
++
++/*
++ * If we support more console drivers, this function is used
++ * when a driver wants to take over some existing consoles
++ * and become default driver for newly opened ones.
++ *
++ * take_over_console is basically a register followed by unbind
+ */
+ int take_over_console(const struct consw *csw, int first, int last, int deflt)
+ {
+ int err;
+
+ err = register_con_driver(csw, first, last);
+- /* if we get an busy error we still want to bind the console driver
++ /*
++ * If we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+-  * but not unregistered it.
+- */
++ * but not unregistered it.
++ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index c77f0d6..9f3003e 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -541,22 +541,10 @@ int usb_match_device(struct usb_device *dev, const struct usb_device_id *id)
+ }
+
+ /* returns 0 if no match, 1 if match */
+-int usb_match_one_id(struct usb_interface *interface,
+- const struct usb_device_id *id)
++int usb_match_one_id_intf(struct usb_device *dev,
++ struct usb_host_interface *intf,
++ const struct usb_device_id *id)
+ {
+- struct usb_host_interface *intf;
+- struct usb_device *dev;
+-
+- /* proc_connectinfo in devio.c may call us with id == NULL. */
+- if (id == NULL)
+- return 0;
+-
+- intf = interface->cur_altsetting;
+- dev = interface_to_usbdev(interface);
+-
+- if (!usb_match_device(dev, id))
+- return 0;
+-
+ /* The interface class, subclass, and protocol should never be
+ * checked for a match if the device class is Vendor Specific,
+ * unless the match record specifies the Vendor ID. */
+@@ -581,6 +569,26 @@ int usb_match_one_id(struct usb_interface *interface,
+
+ return 1;
+ }
++
++/* returns 0 if no match, 1 if match */
++int usb_match_one_id(struct usb_interface *interface,
++ const struct usb_device_id *id)
++{
++ struct usb_host_interface *intf;
++ struct usb_device *dev;
++
++ /* proc_connectinfo in devio.c may call us with id == NULL. */
++ if (id == NULL)
++ return 0;
++
++ intf = interface->cur_altsetting;
++ dev = interface_to_usbdev(interface);
++
++ if (!usb_match_device(dev, id))
++ return 0;
++
++ return usb_match_one_id_intf(dev, intf, id);
++}
+ EXPORT_SYMBOL_GPL(usb_match_one_id);
+
+ /**
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0ff8e9a..2564d8d 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1883,7 +1883,7 @@ static int usb_enumerate_device(struct usb_device *udev)
+ if (err < 0) {
+ dev_err(&udev->dev, "can't read configurations, error %d\n",
+ err);
+- goto fail;
++ return err;
+ }
+ }
+ if (udev->wusb == 1 && udev->authorized == 0) {
+@@ -1899,8 +1899,12 @@ static int usb_enumerate_device(struct usb_device *udev)
+ udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+ }
+ err = usb_enumerate_device_otg(udev);
+-fail:
+- return err;
++ if (err < 0)
++ return err;
++
++ usb_detect_interface_quirks(udev);
++
++ return 0;
+ }
+
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 3f08c09..0aaa4f1 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -15,17 +15,22 @@
+ #include <linux/usb/quirks.h>
+ #include "usb.h"
+
+-/* List of quirky USB devices. Please keep this list ordered by:
++/* Lists of quirky USB devices, split in device quirks and interface quirks.
++ * Device quirks are applied at the very beginning of the enumeration process,
++ * right after reading the device descriptor. They can thus only match on device
++ * information.
++ *
++ * Interface quirks are applied after reading all the configuration descriptors.
++ * They can match on both device and interface information.
++ *
++ * Note that the DELAY_INIT and HONOR_BNUMINTERFACES quirks do not make sense as
++ * interface quirks, as they only influence the enumeration process which is run
++ * before processing the interface quirks.
++ *
++ * Please keep the lists ordered by:
+ * 1) Vendor ID
+ * 2) Product ID
+ * 3) Class ID
+- *
+- * as we want specific devices to be overridden first, and only after that, any
+- * class specific quirks.
+- *
+- * Right now the logic aborts if it finds a valid device in the table, we might
+- * want to change that in the future if it turns out that a whole class of
+- * devices is broken...
+ */
+ static const struct usb_device_id usb_quirk_list[] = {
+ /* CBM - Flash disk */
+@@ -41,53 +46,23 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C200 */
+- { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C250 */
+- { USB_DEVICE(0x046d, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C300 */
+- { USB_DEVICE(0x046d, 0x0805), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam B/C500 */
+- { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C600 */
+- { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam Pro 9000 */
+- { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Fusion */
++ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C905 */
+- { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Orbit MP */
++ { USB_DEVICE(0x046d, 0x08c2), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C210 */
+- { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Pro for Notebook */
++ { USB_DEVICE(0x046d, 0x08c3), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C260 */
+- { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Pro 5000 */
++ { USB_DEVICE(0x046d, 0x08c5), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C310 */
+- { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam OEM Dell Notebook */
++ { USB_DEVICE(0x046d, 0x08c6), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C910 */
+- { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C160 */
+- { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C270 */
+- { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Quickcam Pro 9000 */
+- { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Quickcam E3500 */
+- { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Quickcam Vision Pro */
+- { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam OEM Cisco VT Camera II */
++ { USB_DEVICE(0x046d, 0x08c7), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Harmony 700-series */
+ { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
+@@ -163,16 +138,57 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { } /* terminating entry must be last */
+ };
+
+-static const struct usb_device_id *find_id(struct usb_device *udev)
++static const struct usb_device_id usb_interface_quirk_list[] = {
++ /* Logitech UVC Cameras */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
++ .driver_info = USB_QUIRK_RESET_RESUME },
++
++ { } /* terminating entry must be last */
++};
++
++static bool usb_match_any_interface(struct usb_device *udev,
++ const struct usb_device_id *id)
+ {
+- const struct usb_device_id *id = usb_quirk_list;
++ unsigned int i;
+
+- for (; id->idVendor || id->bDeviceClass || id->bInterfaceClass ||
+- id->driver_info; id++) {
+- if (usb_match_device(udev, id))
+- return id;
++ for (i = 0; i < udev->descriptor.bNumConfigurations; ++i) {
++ struct usb_host_config *cfg = &udev->config[i];
++ unsigned int j;
++
++ for (j = 0; j < cfg->desc.bNumInterfaces; ++j) {
++ struct usb_interface_cache *cache;
++ struct usb_host_interface *intf;
++
++ cache = cfg->intf_cache[j];
++ if (cache->num_altsetting == 0)
++ continue;
++
++ intf = &cache->altsetting[0];
++ if (usb_match_one_id_intf(udev, intf, id))
++ return true;
++ }
++ }
++
++ return false;
++}
++
++static u32 __usb_detect_quirks(struct usb_device *udev,
++ const struct usb_device_id *id)
++{
++ u32 quirks = 0;
++
++ for (; id->match_flags; id++) {
++ if (!usb_match_device(udev, id))
++ continue;
++
++ if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_INFO) &&
++ !usb_match_any_interface(udev, id))
++ continue;
++
++ quirks |= (u32)(id->driver_info);
+ }
+- return NULL;
++
++ return quirks;
+ }
+
+ /*
+@@ -180,14 +196,10 @@ static const struct usb_device_id *find_id(struct usb_device *udev)
+ */
+ void usb_detect_quirks(struct usb_device *udev)
+ {
+- const struct usb_device_id *id = usb_quirk_list;
+-
+- id = find_id(udev);
+- if (id)
+- udev->quirks = (u32)(id->driver_info);
++ udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
+ if (udev->quirks)
+ dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
+- udev->quirks);
++ udev->quirks);
+
+ /* For the present, all devices default to USB-PERSIST enabled */
+ #if 0 /* was: #ifdef CONFIG_PM */
+@@ -204,3 +216,16 @@ void usb_detect_quirks(struct usb_device *udev)
+ udev->persist_enabled = 1;
+ #endif /* CONFIG_PM */
+ }
++
++void usb_detect_interface_quirks(struct usb_device *udev)
++{
++ u32 quirks;
++
++ quirks = __usb_detect_quirks(udev, usb_interface_quirk_list);
++ if (quirks == 0)
++ return;
++
++ dev_dbg(&udev->dev, "USB interface quirks for this device: %x\n",
++ quirks);
++ udev->quirks |= quirks;
++}
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 45e8479..3e1159b 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -24,6 +24,7 @@ extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
+ extern int usb_deauthorize_device(struct usb_device *);
+ extern int usb_authorize_device(struct usb_device *);
+ extern void usb_detect_quirks(struct usb_device *udev);
++extern void usb_detect_interface_quirks(struct usb_device *udev);
+ extern int usb_remove_device(struct usb_device *udev);
+
+ extern int usb_get_device_descriptor(struct usb_device *dev,
+@@ -35,6 +36,9 @@ extern int usb_set_configuration(struct usb_device *dev, int configuration);
+ extern int usb_choose_configuration(struct usb_device *udev);
+
+ extern void usb_kick_khubd(struct usb_device *dev);
++extern int usb_match_one_id_intf(struct usb_device *dev,
++ struct usb_host_interface *intf,
++ const struct usb_device_id *id);
+ extern int usb_match_device(struct usb_device *dev,
+ const struct usb_device_id *id);
+ extern void usb_forced_unbind_intf(struct usb_interface *intf);
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
+index e39b029..d4159b8 100644
+--- a/drivers/usb/host/ehci-omap.c
++++ b/drivers/usb/host/ehci-omap.c
+@@ -337,7 +337,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+-MODULE_ALIAS("platform:omap-ehci");
++MODULE_ALIAS("platform:ehci-omap");
+ MODULE_AUTHOR("Texas Instruments, Inc.");
+ MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index d644a66..71c4696 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1916,24 +1916,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
+ {
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on && usb_control_msg(port->serial->dev,
++ /* Disable flow control */
++ if (!on) {
++ if (usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 0, priv->interface, NULL, 0,
+ WDR_TIMEOUT) < 0) {
+- dev_err(&port->dev, "error from flowcontrol urb\n");
++ dev_err(&port->dev, "error from flowcontrol urb\n");
+ }
+- /* drop RTS and DTR */
+- if (on)
+- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+- else
+- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
++ else
++ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ /*
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index d3addb2..de0bb8e 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -558,19 +558,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
+ unsigned int control_state;
+ struct mct_u232_private *priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* drop DTR and RTS */
+- spin_lock_irq(&priv->lock);
+- if (on)
+- priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+- else
+- priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+- control_state = priv->control_state;
+- spin_unlock_irq(&priv->lock);
+- mct_u232_set_modem_ctrl(port->serial, control_state);
+- }
+- mutex_unlock(&port->serial->disc_mutex);
++ spin_lock_irq(&priv->lock);
++ if (on)
++ priv->control_state |= TIOCM_DTR | TIOCM_RTS;
++ else
++ priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
++ control_state = priv->control_state;
++ spin_unlock_irq(&priv->lock);
++
++ mct_u232_set_modem_ctrl(port->serial, control_state);
+ }
+
+ static void mct_u232_close(struct usb_serial_port *port)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 52cd814..24a3ea6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -479,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
++ .reserved = BIT(4),
+ };
+
+ static const struct option_blacklist_info zte_0037_blacklist = {
+@@ -575,8 +576,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -1215,7 +1222,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index e1f1ebd..a7fa673 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -891,19 +891,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+ {
+- struct usb_serial *serial = port->serial;
+ struct sierra_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+
+- if (serial->dev) {
+- mutex_lock(&serial->disc_mutex);
+- if (!serial->disconnected)
+- sierra_send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
+- }
++ sierra_send_setup(port);
+ }
+
+ static int sierra_startup(struct usb_serial *serial)
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index 87362e4..fff7f17 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -533,19 +533,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
+
+ dbg("%s\n", __func__);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on &&
+- ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
++ /* Disable flow control */
++ if (!on) {
++ if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
+ dev_err(&port->dev, "error from flowcontrol urb\n");
+- /* drop RTS and DTR */
+- if (on)
+- set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+- else
+- clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
++ else
++ clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index e5206de..dc1ce62 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -697,9 +697,20 @@ static int serial_carrier_raised(struct tty_port *port)
+ static void serial_dtr_rts(struct tty_port *port, int on)
+ {
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+- struct usb_serial_driver *drv = p->serial->type;
+- if (drv->dtr_rts)
++ struct usb_serial *serial = p->serial;
++ struct usb_serial_driver *drv = serial->type;
++
++ if (!drv->dtr_rts)
++ return;
++ /*
++ * Work-around bug in the tty-layer which can result in dtr_rts
++ * being called after a disconnect (and tty_unregister_device
++ * has returned). Remove once bug has been squashed.
++ */
++ mutex_lock(&serial->disc_mutex);
++ if (!serial->disconnected)
+ drv->dtr_rts(p, on);
++ mutex_unlock(&serial->disc_mutex);
+ }
+
+ static const struct tty_port_operations serial_port_ops = {
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index d555ca9..6c92301 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -41,7 +41,6 @@ static int debug;
+
+ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ {
+- struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+
+ struct usb_wwan_intf_private *intfdata;
+@@ -54,12 +53,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ return;
+
+ portdata = usb_get_serial_port_data(port);
+- mutex_lock(&serial->disc_mutex);
++ /* FIXME: locking */
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+- if (serial->dev)
+- intfdata->send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
++
++ intfdata->send_setup(port);
+ }
+ EXPORT_SYMBOL(usb_wwan_dtr_rts);
+
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 16b0bf0..7ab9046 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -147,7 +147,7 @@ static int usb_stor_huawei_dongles_pid(struct us_data *us)
+ int idProduct;
+
+ idesc = &us->pusb_intf->cur_altsetting->desc;
+- idProduct = us->pusb_dev->descriptor.idProduct;
++ idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ /* The first port is CDROM,
+ * means the dongle in the single port mode,
+ * and a switch command is required to be sent. */
+@@ -169,7 +169,7 @@ int usb_stor_huawei_init(struct us_data *us)
+ int result = 0;
+
+ if (usb_stor_huawei_dongles_pid(us)) {
+- if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++ if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+ result = usb_stor_huawei_scsi_init(us);
+ else
+ result = usb_stor_huawei_feature_init(us);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 2c85530..65a6a75 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index ae66278..be32b1b 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1073,7 +1073,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ }
+ _iov = iov + ret;
+ size = reg->memory_size - addr + reg->guest_phys_addr;
+- _iov->iov_len = min((u64)len, size);
++ _iov->iov_len = min((u64)len - s, size);
+ _iov->iov_base = (void __user *)(unsigned long)
+ (reg->userspace_addr + addr - reg->guest_phys_addr);
+ s += size;
+diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
+index 66bc74d..b35c857 100644
+--- a/drivers/video/backlight/adp8860_bl.c
++++ b/drivers/video/backlight/adp8860_bl.c
+@@ -791,7 +791,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
+
+ static int adp8860_i2c_resume(struct i2c_client *client)
+ {
+- adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
++ adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
+
+ return 0;
+ }
+diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
+index 6c68a68..25a9b3a 100644
+--- a/drivers/video/backlight/adp8870_bl.c
++++ b/drivers/video/backlight/adp8870_bl.c
+@@ -965,7 +965,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+
+ static int adp8870_i2c_resume(struct i2c_client *client)
+ {
+- adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
++ adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
+
+ return 0;
+ }
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index bf9a9b7..9b8bcab 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -530,6 +530,33 @@ static int search_for_mapped_con(void)
+ return retval;
+ }
+
++static int do_fbcon_takeover(int show_logo)
++{
++ int err, i;
++
++ if (!num_registered_fb)
++ return -ENODEV;
++
++ if (!show_logo)
++ logo_shown = FBCON_LOGO_DONTSHOW;
++
++ for (i = first_fb_vc; i <= last_fb_vc; i++)
++ con2fb_map[i] = info_idx;
++
++ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
++ fbcon_is_default);
++
++ if (err) {
++ for (i = first_fb_vc; i <= last_fb_vc; i++)
++ con2fb_map[i] = -1;
++ info_idx = -1;
++ } else {
++ fbcon_has_console_bind = 1;
++ }
++
++ return err;
++}
++
+ static int fbcon_takeover(int show_logo)
+ {
+ int err, i;
+@@ -991,7 +1018,7 @@ static const char *fbcon_startup(void)
+ }
+
+ /* Setup default font */
+- if (!p->fontdata) {
++ if (!p->fontdata && !vc->vc_font.data) {
+ if (!fontname[0] || !(font = find_font(fontname)))
+ font = get_default_font(info->var.xres,
+ info->var.yres,
+@@ -1001,6 +1028,8 @@ static const char *fbcon_startup(void)
+ vc->vc_font.height = font->height;
+ vc->vc_font.data = (void *)(p->fontdata = font->data);
+ vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
++ } else {
++ p->fontdata = vc->vc_font.data;
+ }
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1160,9 +1189,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ ops->p = &fb_display[fg_console];
+ }
+
+-static void fbcon_free_font(struct display *p)
++static void fbcon_free_font(struct display *p, bool freefont)
+ {
+- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ p->fontdata = NULL;
+ p->userfont = 0;
+@@ -1174,8 +1203,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ int idx;
++ bool free_font = true;
+
+- fbcon_free_font(p);
+ idx = con2fb_map[vc->vc_num];
+
+ if (idx == -1)
+@@ -1186,6 +1215,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ if (!info)
+ goto finished;
+
++ if (info->flags & FBINFO_MISC_FIRMWARE)
++ free_font = false;
+ ops = info->fbcon_par;
+
+ if (!ops)
+@@ -1197,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ ops->flags &= ~FBCON_FLAGS_INIT;
+ finished:
+
++ fbcon_free_font(p, free_font);
++
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
+@@ -2978,7 +3011,7 @@ static int fbcon_unbind(void)
+ {
+ int ret;
+
+- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
++ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+ if (!ret)
+@@ -3051,7 +3084,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ primary_device = -1;
+
+ if (!num_registered_fb)
+- unregister_con_driver(&fb_con);
++ do_unregister_con_driver(&fb_con);
+
+ return 0;
+ }
+@@ -3116,7 +3149,7 @@ static int fbcon_fb_registered(struct fb_info *info)
+ }
+
+ if (info_idx != -1)
+- ret = fbcon_takeover(1);
++ ret = do_fbcon_takeover(1);
+ } else {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map_boot[i] == idx)
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index d449a74..5855d17 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ unsigned short video_port_status = vga_video_port_reg + 6;
+ int font_select = 0x00, beg, i;
+ char *charmap;
+-
++ bool clear_attribs = false;
+ if (vga_video_type != VIDEO_TYPE_EGAM) {
+ charmap = (char *) VGA_MAP_MEM(colourmap, 0);
+ beg = 0x0e;
+@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+
+ /* if 512 char mode is already enabled don't re-enable it. */
+ if ((set) && (ch512 != vga_512_chars)) {
+- /* attribute controller */
+- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+- struct vc_data *c = vc_cons[i].d;
+- if (c && c->vc_sw == &vga_con)
+- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+- }
+ vga_512_chars = ch512;
+ /* 256-char: enable intensity bit
+ 512-char: disable intensity bit */
+@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ it means, but it works, and it appears necessary */
+ inb_p(video_port_status);
+ vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
++ clear_attribs = true;
+ }
+ raw_spin_unlock_irq(&vga_lock);
++
++ if (clear_attribs) {
++ for (i = 0; i < MAX_NR_CONSOLES; i++) {
++ struct vc_data *c = vc_cons[i].d;
++ if (c && c->vc_sw == &vga_con) {
++ /* force hi font mask to 0, so we always clear
++ the bit on either transition */
++ c->vc_hi_font_mask = 0x00;
++ clear_buffer_attributes(c);
++ c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
++ }
++ }
++ }
+ return 0;
+ }
+
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 7a41220..c133dde 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1628,7 +1628,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ event.info = fb_info;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
++ console_lock();
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
++ console_unlock();
+ unlock_fb_info(fb_info);
+ return 0;
+ }
+@@ -1644,8 +1646,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
++ console_lock();
+ event.info = fb_info;
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
++ console_unlock();
+ unlock_fb_info(fb_info);
+
+ if (ret)
+@@ -1660,7 +1664,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ num_registered_fb--;
+ fb_cleanup_device(fb_info);
+ event.info = fb_info;
++ console_lock();
+ fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
++ console_unlock();
+
+ /* this may free fb info */
+ put_fb_info(fb_info);
+@@ -1831,11 +1837,8 @@ int fb_new_modelist(struct fb_info *info)
+ err = 1;
+
+ if (!list_empty(&info->modelist)) {
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ event.info = info;
+ err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+- unlock_fb_info(info);
+ }
+
+ return err;
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
+index 67afa9c..303fb9f 100644
+--- a/drivers/video/fbsysfs.c
++++ b/drivers/video/fbsysfs.c
+@@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device,
+ if (i * sizeof(struct fb_videomode) != count)
+ return -EINVAL;
+
++ if (!lock_fb_info(fb_info))
++ return -ENODEV;
+ console_lock();
+ list_splice(&fb_info->modelist, &old_list);
+ fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
+@@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device,
+ fb_destroy_modelist(&old_list);
+
+ console_unlock();
++ unlock_fb_info(fb_info);
+
+ return 0;
+ }
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index dbc13e9..c93d59e 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
+ u->name, (void *)(unsigned long)port);
+ if (rc >= 0)
+ rc = 0;
++ else {
++ /* bind failed, should close the port now */
++ struct evtchn_close close;
++ close.port = port;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++ BUG();
++ set_port_user(port, NULL);
++ }
+
+ return rc;
+ }
+@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+ {
+ int irq = irq_from_evtchn(port);
+
++ BUG_ON(irq < 0);
++
+ unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+
+ set_port_user(port, NULL);
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index 63616d7..d07c4cd 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -8,6 +8,7 @@
+ #include <linux/bitops.h>
+ #include <xen/events.h>
+ #include <linux/sched.h>
++#include <linux/ratelimit.h>
+ #include "pciback.h"
+
+ int verbose_request;
+@@ -135,7 +136,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+ {
+ struct xen_pcibk_dev_data *dev_data;
+- int otherend = pdev->xdev->otherend_id;
+ int status;
+
+ if (unlikely(verbose_request))
+@@ -144,8 +144,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ status = pci_enable_msi(dev);
+
+ if (status) {
+- printk(KERN_ERR "error enable msi for guest %x status %x\n",
+- otherend, status);
++ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
++ pci_name(dev), pdev->xdev->otherend_id,
++ status);
+ op->value = 0;
+ return XEN_PCI_ERR_op_failed;
+ }
+@@ -223,10 +224,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ pci_name(dev), i,
+ op->msix_entries[i].vector);
+ }
+- } else {
+- printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
+- pci_name(dev), result);
+- }
++ } else
++ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
++ pci_name(dev), pdev->xdev->otherend_id,
++ result);
+ kfree(entries);
+
+ op->value = result;
+diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
+index b8e8b0a..4a1b984 100644
+--- a/fs/binfmt_em86.c
++++ b/fs/binfmt_em86.c
+@@ -42,7 +42,6 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
+ return -ENOEXEC;
+ }
+
+- bprm->recursion_depth++; /* Well, the bang-shell is implicit... */
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index ca52e92..7423cb9 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -116,10 +116,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (!enabled)
+ goto _ret;
+
+- retval = -ENOEXEC;
+- if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
+- goto _ret;
+-
+ /* to keep locking time low, we copy the interpreter string */
+ read_lock(&entries_lock);
+ fmt = check_file(bprm);
+@@ -199,8 +195,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (retval < 0)
+ goto _error;
+
+- bprm->recursion_depth++;
+-
+ retval = search_binary_handler (bprm, regs);
+ if (retval < 0)
+ goto _error;
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index e39c18a..211ede0 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -22,15 +22,13 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ char interp[BINPRM_BUF_SIZE];
+ int retval;
+
+- if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') ||
+- (bprm->recursion_depth > BINPRM_MAX_RECURSION))
++ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
+ return -ENOEXEC;
+ /*
+ * This section does the #! interpretation.
+ * Sorta complicated, but hopefully it will work. -TYT
+ */
+
+- bprm->recursion_depth++;
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 9b98987..613edd8 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -82,13 +82,14 @@ sector_t blkdev_max_block(struct block_device *bdev)
+ }
+
+ /* Kill _all_ buffers and pagecache , dirty or not.. */
+-static void kill_bdev(struct block_device *bdev)
++void kill_bdev(struct block_device *bdev)
+ {
+ if (bdev->bd_inode->i_mapping->nrpages == 0)
+ return;
+ invalidate_bh_lrus();
+ truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+ }
++EXPORT_SYMBOL(kill_bdev);
+
+ int set_blocksize(struct block_device *bdev, int size)
+ {
+@@ -1024,6 +1025,7 @@ int revalidate_disk(struct gendisk *disk)
+
+ mutex_lock(&bdev->bd_mutex);
+ check_disk_size_change(disk, bdev);
++ bdev->bd_invalidated = 0;
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+ return ret;
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 0e3c092..b4d2438 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -918,7 +918,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
+ * own time */
+ dget(object->backer);
+ mntget(cache->mnt);
+- file = dentry_open(object->backer, cache->mnt, O_RDWR,
++ file = dentry_open(object->backer, cache->mnt, O_RDWR | O_LARGEFILE,
+ cache->cache_cred);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index d740ab6..ac401d2 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -304,9 +304,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
+ dio->end_io(dio->iocb, offset, transferred,
+ dio->private, ret, is_async);
+ } else {
++ inode_dio_done(dio->inode);
+ if (is_async)
+ aio_complete(dio->iocb, ret, 0);
+- inode_dio_done(dio->inode);
+ }
+
+ return ret;
+diff --git a/fs/exec.c b/fs/exec.c
+index c27fa0d..312e297 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1385,6 +1385,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ struct linux_binfmt *fmt;
+ pid_t old_pid;
+
++ /* This allows 4 levels of binfmt rewrites before failing hard. */
++ if (depth > 5)
++ return -ELOOP;
++
+ retval = security_bprm_check(bprm);
+ if (retval)
+ return retval;
+@@ -1408,12 +1412,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ if (!try_module_get(fmt->module))
+ continue;
+ read_unlock(&binfmt_lock);
++ bprm->recursion_depth = depth + 1;
+ retval = fn(bprm, regs);
+- /*
+- * Restore the depth counter to its starting value
+- * in this call, so we don't have to rely on every
+- * load_binary function to restore it on return.
+- */
+ bprm->recursion_depth = depth;
+ if (retval >= 0) {
+ if (depth == 0)
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index d6970f7..484ffee 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -420,11 +420,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+
+ free_clusters = percpu_counter_read_positive(fcc);
+ dirty_clusters = percpu_counter_read_positive(dcc);
+- root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
++
++ /*
++ * r_blocks_count should always be multiple of the cluster ratio so
++ * we are safe to do a plane bit shift only.
++ */
++ root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+
+ if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
+ EXT4_FREECLUSTERS_WATERMARK) {
+- free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
++ free_clusters = percpu_counter_sum_positive(fcc);
+ dirty_clusters = percpu_counter_sum_positive(dcc);
+ }
+ /* Check whether we have space after accounting for current
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index fbb92e6..b48e0dc 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -45,6 +45,17 @@
+
+ #include <trace/events/ext4.h>
+
++/*
++ * used by extent splitting.
++ */
++#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
++ due to ENOSPC */
++#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
++#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
++
++#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
++#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
++
+ static int ext4_split_extent(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+@@ -52,6 +63,13 @@ static int ext4_split_extent(handle_t *handle,
+ int split_flag,
+ int flags);
+
++static int ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t split,
++ int split_flag,
++ int flags);
++
+ static int ext4_ext_truncate_extend_restart(handle_t *handle,
+ struct inode *inode,
+ int needed)
+@@ -636,6 +654,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ struct ext4_extent_header *eh;
+ struct buffer_head *bh;
+ short int depth, i, ppos = 0, alloc = 0;
++ int ret;
+
+ eh = ext_inode_hdr(inode);
+ depth = ext_depth(inode);
+@@ -665,12 +684,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ path[ppos].p_ext = NULL;
+
+ bh = sb_getblk(inode->i_sb, path[ppos].p_block);
+- if (unlikely(!bh))
++ if (unlikely(!bh)) {
++ ret = -ENOMEM;
+ goto err;
++ }
+ if (!bh_uptodate_or_lock(bh)) {
+ trace_ext4_ext_load_extent(inode, block,
+ path[ppos].p_block);
+- if (bh_submit_read(bh) < 0) {
++ ret = bh_submit_read(bh);
++ if (ret < 0) {
+ put_bh(bh);
+ goto err;
+ }
+@@ -683,13 +705,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ put_bh(bh);
+ EXT4_ERROR_INODE(inode,
+ "ppos %d > depth %d", ppos, depth);
++ ret = -EIO;
+ goto err;
+ }
+ path[ppos].p_bh = bh;
+ path[ppos].p_hdr = eh;
+ i--;
+
+- if (need_to_validate && ext4_ext_check(inode, eh, i))
++ ret = need_to_validate ? ext4_ext_check(inode, eh, i) : 0;
++ if (ret < 0)
+ goto err;
+ }
+
+@@ -711,7 +735,7 @@ err:
+ ext4_ext_drop_refs(path);
+ if (alloc)
+ kfree(path);
+- return ERR_PTR(-EIO);
++ return ERR_PTR(ret);
+ }
+
+ /*
+@@ -866,7 +890,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ }
+ bh = sb_getblk(inode->i_sb, newblock);
+ if (!bh) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto cleanup;
+ }
+ lock_buffer(bh);
+@@ -938,7 +962,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ newblock = ablocks[--a];
+ bh = sb_getblk(inode->i_sb, newblock);
+ if (!bh) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto cleanup;
+ }
+ lock_buffer(bh);
+@@ -1049,11 +1073,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ return err;
+
+ bh = sb_getblk(inode->i_sb, newblock);
+- if (!bh) {
+- err = -EIO;
+- ext4_std_error(inode->i_sb, err);
+- return err;
+- }
++ if (!bh)
++ return -ENOMEM;
+ lock_buffer(bh);
+
+ err = ext4_journal_get_create_access(handle, bh);
+@@ -2321,7 +2342,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ struct ext4_extent *ex;
+
+ /* the header must be checked already in ext4_ext_remove_space() */
+- ext_debug("truncate since %u in leaf\n", start);
++ ext_debug("truncate since %u in leaf to %u\n", start, end);
+ if (!path[depth].p_hdr)
+ path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
+ eh = path[depth].p_hdr;
+@@ -2356,7 +2377,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ ext_debug(" border %u:%u\n", a, b);
+
+ /* If this extent is beyond the end of the hole, skip it */
+- if (end <= ex_ee_block) {
++ if (end < ex_ee_block) {
+ ex--;
+ ex_ee_block = le32_to_cpu(ex->ee_block);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
+@@ -2495,16 +2516,17 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
+ return 1;
+ }
+
+-static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
++static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
++ ext4_lblk_t end)
+ {
+ struct super_block *sb = inode->i_sb;
+ int depth = ext_depth(inode);
+- struct ext4_ext_path *path;
++ struct ext4_ext_path *path = NULL;
+ ext4_fsblk_t partial_cluster = 0;
+ handle_t *handle;
+- int i, err;
++ int i = 0, err;
+
+- ext_debug("truncate since %u\n", start);
++ ext_debug("truncate since %u to %u\n", start, end);
+
+ /* probably first extent we're gonna free will be last in block */
+ handle = ext4_journal_start(inode, depth + 1);
+@@ -2517,29 +2539,96 @@ again:
+ trace_ext4_ext_remove_space(inode, start, depth);
+
+ /*
++ * Check if we are removing extents inside the extent tree. If that
++ * is the case, we are going to punch a hole inside the extent tree
++ * so we have to check whether we need to split the extent covering
++ * the last block to remove so we can easily remove the part of it
++ * in ext4_ext_rm_leaf().
++ */
++ if (end < EXT_MAX_BLOCKS - 1) {
++ struct ext4_extent *ex;
++ ext4_lblk_t ee_block;
++
++ /* find extent for this block */
++ path = ext4_ext_find_extent(inode, end, NULL);
++ if (IS_ERR(path)) {
++ ext4_journal_stop(handle);
++ return PTR_ERR(path);
++ }
++ depth = ext_depth(inode);
++ ex = path[depth].p_ext;
++ if (!ex) {
++ ext4_ext_drop_refs(path);
++ kfree(path);
++ path = NULL;
++ goto cont;
++ }
++
++ ee_block = le32_to_cpu(ex->ee_block);
++
++ /*
++ * See if the last block is inside the extent, if so split
++ * the extent at 'end' block so we can easily remove the
++ * tail of the first part of the split extent in
++ * ext4_ext_rm_leaf().
++ */
++ if (end >= ee_block &&
++ end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
++ int split_flag = 0;
++
++ if (ext4_ext_is_uninitialized(ex))
++ split_flag = EXT4_EXT_MARK_UNINIT1 |
++ EXT4_EXT_MARK_UNINIT2;
++
++ /*
++ * Split the extent in two so that 'end' is the last
++ * block in the first new extent
++ */
++ err = ext4_split_extent_at(handle, inode, path,
++ end + 1, split_flag,
++ EXT4_GET_BLOCKS_PRE_IO |
++ EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
++
++ if (err < 0)
++ goto out;
++ }
++ }
++cont:
++
++ /*
+ * We start scanning from right side, freeing all the blocks
+ * after i_size and walking into the tree depth-wise.
+ */
+ depth = ext_depth(inode);
+- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
+- if (path == NULL) {
+- ext4_journal_stop(handle);
+- return -ENOMEM;
+- }
+- path[0].p_depth = depth;
+- path[0].p_hdr = ext_inode_hdr(inode);
+- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+- err = -EIO;
+- goto out;
++ if (path) {
++ int k = i = depth;
++ while (--k > 0)
++ path[k].p_block =
++ le16_to_cpu(path[k].p_hdr->eh_entries)+1;
++ } else {
++ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
++ GFP_NOFS);
++ if (path == NULL) {
++ ext4_journal_stop(handle);
++ return -ENOMEM;
++ }
++ path[0].p_depth = depth;
++ path[0].p_hdr = ext_inode_hdr(inode);
++ i = 0;
++
++ if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
++ err = -EIO;
++ goto out;
++ }
+ }
+- i = err = 0;
++ err = 0;
+
+ while (i >= 0 && err == 0) {
+ if (i == depth) {
+ /* this is leaf block */
+ err = ext4_ext_rm_leaf(handle, inode, path,
+ &partial_cluster, start,
+- EXT_MAX_BLOCKS - 1);
++ end);
+ /* root level has p_bh == NULL, brelse() eats this */
+ brelse(path[i].p_bh);
+ path[i].p_bh = NULL;
+@@ -2646,8 +2735,10 @@ again:
+ out:
+ ext4_ext_drop_refs(path);
+ kfree(path);
+- if (err == -EAGAIN)
++ if (err == -EAGAIN) {
++ path = NULL;
+ goto again;
++ }
+ ext4_journal_stop(handle);
+
+ return err;
+@@ -2722,17 +2813,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ }
+
+ /*
+- * used by extent splitting.
+- */
+-#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
+- due to ENOSPC */
+-#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
+-#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+-
+-#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+-#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
+-
+-/*
+ * ext4_split_extent_at() splits an extent at given block.
+ *
+ * @handle: the journal handle
+@@ -4274,7 +4354,7 @@ void ext4_ext_truncate(struct inode *inode)
+
+ last_block = (inode->i_size + sb->s_blocksize - 1)
+ >> EXT4_BLOCK_SIZE_BITS(sb);
+- err = ext4_ext_remove_space(inode, last_block);
++ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+
+ /* In a multi-transaction truncate, we only make the final
+ * transaction synchronous.
+@@ -4751,14 +4831,12 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+- struct ext4_ext_cache cache_ex;
+- ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
++ ext4_lblk_t first_block, stop_block;
+ struct address_space *mapping = inode->i_mapping;
+- struct ext4_map_blocks map;
+ handle_t *handle;
+ loff_t first_page, last_page, page_len;
+ loff_t first_page_offset, last_page_offset;
+- int ret, credits, blocks_released, err = 0;
++ int credits, err = 0;
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+@@ -4774,10 +4852,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ offset;
+ }
+
+- first_block = (offset + sb->s_blocksize - 1) >>
+- EXT4_BLOCK_SIZE_BITS(sb);
+- last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+-
+ first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+
+@@ -4856,7 +4930,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ }
+ }
+
+-
+ /*
+ * If i_size is contained in the last page, we need to
+ * unmap and zero the partial page after i_size
+@@ -4876,73 +4949,22 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ }
+ }
+
++ first_block = (offset + sb->s_blocksize - 1) >>
++ EXT4_BLOCK_SIZE_BITS(sb);
++ stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
++
+ /* If there are no blocks to remove, return now */
+- if (first_block >= last_block)
++ if (first_block >= stop_block)
+ goto out;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_ext_invalidate_cache(inode);
+ ext4_discard_preallocations(inode);
+
+- /*
+- * Loop over all the blocks and identify blocks
+- * that need to be punched out
+- */
+- iblock = first_block;
+- blocks_released = 0;
+- while (iblock < last_block) {
+- max_blocks = last_block - iblock;
+- num_blocks = 1;
+- memset(&map, 0, sizeof(map));
+- map.m_lblk = iblock;
+- map.m_len = max_blocks;
+- ret = ext4_ext_map_blocks(handle, inode, &map,
+- EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
+-
+- if (ret > 0) {
+- blocks_released += ret;
+- num_blocks = ret;
+- } else if (ret == 0) {
+- /*
+- * If map blocks could not find the block,
+- * then it is in a hole. If the hole was
+- * not already cached, then map blocks should
+- * put it in the cache. So we can get the hole
+- * out of the cache
+- */
+- memset(&cache_ex, 0, sizeof(cache_ex));
+- if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
+- !cache_ex.ec_start) {
+-
+- /* The hole is cached */
+- num_blocks = cache_ex.ec_block +
+- cache_ex.ec_len - iblock;
++ err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
+
+- } else {
+- /* The block could not be identified */
+- err = -EIO;
+- break;
+- }
+- } else {
+- /* Map blocks error */
+- err = ret;
+- break;
+- }
+-
+- if (num_blocks == 0) {
+- /* This condition should never happen */
+- ext_debug("Block lookup failed");
+- err = -EIO;
+- break;
+- }
+-
+- iblock += num_blocks;
+- }
+-
+- if (blocks_released > 0) {
+- ext4_ext_invalidate_cache(inode);
+- ext4_discard_preallocations(inode);
+- }
++ ext4_ext_invalidate_cache(inode);
++ ext4_discard_preallocations(inode);
+
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 3cfc73f..26d6dbf 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -146,6 +146,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ struct super_block *sb = inode->i_sb;
+ Indirect *p = chain;
+ struct buffer_head *bh;
++ int ret = -EIO;
+
+ *err = 0;
+ /* i_data is not going away, no lock needed */
+@@ -154,8 +155,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ goto no_block;
+ while (--depth) {
+ bh = sb_getblk(sb, le32_to_cpu(p->key));
+- if (unlikely(!bh))
++ if (unlikely(!bh)) {
++ ret = -ENOMEM;
+ goto failure;
++ }
+
+ if (!bh_uptodate_or_lock(bh)) {
+ if (bh_submit_read(bh) < 0) {
+@@ -177,7 +180,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ return NULL;
+
+ failure:
+- *err = -EIO;
++ *err = ret;
+ no_block:
+ return p;
+ }
+@@ -471,7 +474,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ */
+ bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto failed;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8424dda..4b2bb75 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -661,7 +661,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
+
+ bh = sb_getblk(inode->i_sb, map.m_pblk);
+ if (!bh) {
+- *errp = -EIO;
++ *errp = -ENOMEM;
+ return NULL;
+ }
+ if (map.m_flags & EXT4_MAP_NEW) {
+@@ -2795,9 +2795,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ out:
++ inode_dio_done(inode);
+ if (is_async)
+ aio_complete(iocb, ret, 0);
+- inode_dio_done(inode);
+ return;
+ }
+
+@@ -3575,11 +3575,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
+ iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+
+ bh = sb_getblk(sb, block);
+- if (!bh) {
+- EXT4_ERROR_INODE_BLOCK(inode, block,
+- "unable to read itable block");
+- return -EIO;
+- }
++ if (!bh)
++ return -ENOMEM;
+ if (!buffer_uptodate(bh)) {
+ lock_buffer(bh);
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1d07c12..553ff71 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4178,7 +4178,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
+ /* The max size of hash table is PREALLOC_TB_SIZE */
+ order = PREALLOC_TB_SIZE - 1;
+ /* Add the prealloc space to lg */
+- rcu_read_lock();
++ spin_lock(&lg->lg_prealloc_lock);
+ list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
+ pa_inode_list) {
+ spin_lock(&tmp_pa->pa_lock);
+@@ -4202,12 +4202,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
+ if (!added)
+ list_add_tail_rcu(&pa->pa_inode_list,
+ &lg->lg_prealloc_list[order]);
+- rcu_read_unlock();
++ spin_unlock(&lg->lg_prealloc_lock);
+
+ /* Now trim the list to be not more than 8 elements */
+ if (lg_prealloc_count > 8) {
+ ext4_mb_discard_lg_preallocations(sb, lg,
+- order, lg_prealloc_count);
++ order, lg_prealloc_count);
+ return;
+ }
+ return ;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 7ea4ba4..f3358ab 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -41,6 +41,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+ * is not blocked in the elevator. */
+ if (!*bh)
+ *bh = sb_getblk(sb, mmp_block);
++ if (!*bh)
++ return -ENOMEM;
+ if (*bh) {
+ get_bh(*bh);
+ lock_buffer(*bh);
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 24feb1c..54f566d 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -108,14 +108,13 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
+ inode->i_ino, offset, size, ret);
+ }
+
+- if (io->iocb)
+- aio_complete(io->iocb, io->result, 0);
+-
+- if (io->flag & EXT4_IO_END_DIRECT)
+- inode_dio_done(inode);
+ /* Wake up anyone waiting on unwritten extent conversion */
+ if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
+ wake_up_all(ext4_ioend_wq(io->inode));
++ if (io->flag & EXT4_IO_END_DIRECT)
++ inode_dio_done(inode);
++ if (io->iocb)
++ aio_complete(io->iocb, io->result, 0);
+ return ret;
+ }
+
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 4eac337..33129c0 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -142,7 +142,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
+
+ bh = sb_getblk(sb, blk);
+ if (!bh)
+- return ERR_PTR(-EIO);
++ return ERR_PTR(-ENOMEM);
+ if ((err = ext4_journal_get_write_access(handle, bh))) {
+ brelse(bh);
+ bh = ERR_PTR(err);
+@@ -220,7 +220,7 @@ static int setup_new_group_blocks(struct super_block *sb,
+
+ gdb = sb_getblk(sb, block);
+ if (!gdb) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto exit_journal;
+ }
+ if ((err = ext4_journal_get_write_access(handle, gdb))) {
+@@ -694,7 +694,7 @@ static void update_backups(struct super_block *sb,
+
+ bh = sb_getblk(sb, group * bpg + blk_off);
+ if (!bh) {
+- err = -EIO;
++ err = -ENOMEM;
+ break;
+ }
+ ext4_debug("update metadata backup %#04lx\n",
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 4410ae7..d5498b2 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -496,7 +496,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- dquot_free_block(inode, 1);
++ dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
+ }
+@@ -785,7 +785,8 @@ inserted:
+ else {
+ /* The old block is released after updating
+ the inode. */
+- error = dquot_alloc_block(inode, 1);
++ error = dquot_alloc_block(inode,
++ EXT4_C2B(EXT4_SB(sb), 1));
+ if (error)
+ goto cleanup;
+ error = ext4_journal_get_write_access(handle,
+@@ -839,16 +840,17 @@ inserted:
+
+ new_bh = sb_getblk(sb, block);
+ if (!new_bh) {
++ error = -ENOMEM;
+ getblk_failed:
+ ext4_free_blocks(handle, inode, NULL, block, 1,
+ EXT4_FREE_BLOCKS_METADATA);
+- error = -EIO;
+ goto cleanup;
+ }
+ lock_buffer(new_bh);
+ error = ext4_journal_get_create_access(handle, new_bh);
+ if (error) {
+ unlock_buffer(new_bh);
++ error = -EIO;
+ goto getblk_failed;
+ }
+ memcpy(new_bh->b_data, s->base, new_bh->b_size);
+@@ -880,7 +882,7 @@ cleanup:
+ return error;
+
+ cleanup_dquot:
+- dquot_free_block(inode, 1);
++ dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
+ goto cleanup;
+
+ bad_block:
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 8392cb8..a3a0987 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,6 +551,9 @@ again:
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ if (status < 0)
+ break;
++ /* Resend the blocking lock request after a server reboot */
++ if (resp->status == nlm_lck_denied_grace_period)
++ continue;
+ if (resp->status != nlm_lck_blocked)
+ break;
+ }
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 1aaa0ee..b17a81c 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -1101,6 +1101,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = {
+ static struct pnfs_layoutdriver_type blocklayout_type = {
+ .id = LAYOUT_BLOCK_VOLUME,
+ .name = "LAYOUT_BLOCK_VOLUME",
++ .owner = THIS_MODULE,
+ .read_pagelist = bl_read_pagelist,
+ .write_pagelist = bl_write_pagelist,
+ .alloc_layout_hdr = bl_alloc_layout_hdr,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2f98c53..6d7c53d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5891,7 +5891,8 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status == 0)
+ status = task->tk_status;
+- if (status == 0)
++ /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
++ if (status == 0 && lgp->res.layoutp->len)
+ status = pnfs_layout_process(lgp);
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, status);
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index a03ee52..c1897f7 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -569,6 +569,7 @@ static struct pnfs_layoutdriver_type objlayout_type = {
+ .flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR,
+
++ .owner = THIS_MODULE,
+ .alloc_layout_hdr = objlayout_alloc_layout_hdr,
+ .free_layout_hdr = objlayout_free_layout_hdr,
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8b197d2..7d189dc 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1009,6 +1009,8 @@ free_client(struct nfs4_client *clp)
+ put_group_info(clp->cl_cred.cr_group_info);
+ kfree(clp->cl_principal);
+ kfree(clp->cl_name.data);
++ idr_remove_all(&clp->cl_stateids);
++ idr_destroy(&clp->cl_stateids);
+ kfree(clp);
+ }
+
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 8445fbc..6f292dd 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+
+ /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+- if (unlikely(!(mask & IN_ALL_EVENTS)))
+- return -EINVAL;
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+@@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+
+ /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+- if (unlikely(!(mask & IN_ALL_EVENTS)))
+- return -EINVAL;
+
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_i_mark))
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 78b68af..4402b18 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
+ level = ocfs2_iocb_rw_locked_level(iocb);
+ ocfs2_rw_unlock(inode, level);
+
++ inode_dio_done(inode);
+ if (is_async)
+ aio_complete(iocb, ret, 0);
+- inode_dio_done(inode);
+ }
+
+ /*
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 81a4cd2..231eab2 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ * everything is up to the caller :) */
+ status = ocfs2_should_refresh_lock_res(lockres);
+ if (status < 0) {
++ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
+ goto bail;
+ }
+@@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+
+ ocfs2_complete_lock_res_refresh(lockres, status);
+
+- if (status < 0)
++ if (status < 0) {
++ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
++ }
+ ocfs2_track_lock_refresh(lockres);
+ }
+ bail:
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index f169da4..b7e74b5 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
+ * cluster groups will be staying in cache for the duration of
+ * this operation.
+ */
+- ac->ac_allow_chain_relink = 0;
++ ac->ac_disable_chain_relink = 1;
+
+ /* Claim the first region */
+ status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
+@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ * Do this *after* figuring out how many bits we're taking out
+ * of our target group.
+ */
+- if (ac->ac_allow_chain_relink &&
++ if (!ac->ac_disable_chain_relink &&
+ (prev_group_bh) &&
+ (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
+ status = ocfs2_relink_block_group(handle, alloc_inode,
+@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+
+ victim = ocfs2_find_victim_chain(cl);
+ ac->ac_chain = victim;
+- ac->ac_allow_chain_relink = 1;
+
+ status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
+ res, &bits_left);
+@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+ * searching each chain in order. Don't allow chain relinking
+ * because we only calculate enough journal credits for one
+ * relink per alloc. */
+- ac->ac_allow_chain_relink = 0;
++ ac->ac_disable_chain_relink = 1;
+ for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
+ if (i == victim)
+ continue;
+diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
+index b8afabf..a36d0aa 100644
+--- a/fs/ocfs2/suballoc.h
++++ b/fs/ocfs2/suballoc.h
+@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
+
+ /* these are used by the chain search */
+ u16 ac_chain;
+- int ac_allow_chain_relink;
++ int ac_disable_chain_relink;
+ group_search_t *ac_group_search;
+
+ u64 ac_last_group;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index aa9e877..0d5ea9c 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ struct buffer_head *dir_bh = NULL;
+
+ ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+- if (!ret) {
++ if (ret) {
+ mlog_errno(ret);
+ goto leave;
+ }
+diff --git a/fs/partitions/check.c b/fs/partitions/check.c
+index 6b5fcc5..1ef15cc 100644
+--- a/fs/partitions/check.c
++++ b/fs/partitions/check.c
+@@ -399,11 +399,11 @@ void delete_partition(struct gendisk *disk, int partno)
+ if (!part)
+ return;
+
+- blk_free_devt(part_devt(part));
+ rcu_assign_pointer(ptbl->part[partno], NULL);
+ rcu_assign_pointer(ptbl->last_lookup, NULL);
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
++ blk_free_devt(part_devt(part));
+
+ hd_struct_put(part);
+ }
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 57bbf90..45d18d1 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -72,6 +72,27 @@ static char *reason_str[] = {
+ "Oops", "Panic", "Kexec", "Restart", "Halt", "Poweroff", "Emergency"
+ };
+
++bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
++{
++ /*
++ * In case of NMI path, pstore shouldn't be blocked
++ * regardless of reason.
++ */
++ if (in_nmi())
++ return true;
++
++ switch (reason) {
++ /* In panic case, other cpus are stopped by smp_send_stop(). */
++ case KMSG_DUMP_PANIC:
++ /* Emergency restart shouldn't be blocked by spin lock. */
++ case KMSG_DUMP_EMERG:
++ return true;
++ default:
++ return false;
++ }
++}
++EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
++
+ /*
+ * callback from kmsg_dump. (s2,l2) has the most recently
+ * written bytes, older bytes are in (s1,l1). Save as much
+@@ -97,10 +118,12 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ else
+ why = "Unknown";
+
+- if (in_nmi()) {
+- is_locked = spin_trylock(&psinfo->buf_lock);
+- if (!is_locked)
+- pr_err("pstore dump routine blocked in NMI, may corrupt error record\n");
++ if (pstore_cannot_block_path(reason)) {
++ is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
++ if (!is_locked) {
++ pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
++ , in_nmi() ? "NMI" : why);
++ }
+ } else
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ oopscount++;
+@@ -131,9 +154,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ total += l1_cpy + l2_cpy;
+ part++;
+ }
+- if (in_nmi()) {
++ if (pstore_cannot_block_path(reason)) {
+ if (is_locked)
+- spin_unlock(&psinfo->buf_lock);
++ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ } else
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ }
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
+index c542c73..f9c90b5 100644
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -130,13 +130,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
+ else if (inum > o->inum)
+ p = p->rb_right;
+ else {
+- if (o->dnext) {
++ if (o->del) {
+ spin_unlock(&c->orphan_lock);
+ dbg_gen("deleted twice ino %lu",
+ (unsigned long)inum);
+ return;
+ }
+ if (o->cnext) {
++ o->del = 1;
+ o->dnext = c->orph_dnext;
+ c->orph_dnext = o;
+ spin_unlock(&c->orphan_lock);
+@@ -447,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
+ orphan = dnext;
+ dnext = orphan->dnext;
+ ubifs_assert(!orphan->new);
++ ubifs_assert(orphan->del);
+ rb_erase(&orphan->rb, &c->orph_tree);
+ list_del(&orphan->list);
+ c->tot_orphans -= 1;
+@@ -536,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
+ rb_link_node(&orphan->rb, parent, p);
+ rb_insert_color(&orphan->rb, &c->orph_tree);
+ list_add_tail(&orphan->list, &c->orph_list);
++ orphan->del = 1;
+ orphan->dnext = c->orph_dnext;
+ c->orph_dnext = orphan;
+ dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 8bbc99e..a39fce5 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -908,6 +908,7 @@ struct ubifs_budget_req {
+ * @dnext: next orphan to delete
+ * @inum: inode number
+ * @new: %1 => added since the last commit, otherwise %0
++ * @del: %1 => delete pending, otherwise %0
+ */
+ struct ubifs_orphan {
+ struct rb_node rb;
+@@ -917,6 +918,7 @@ struct ubifs_orphan {
+ struct ubifs_orphan *dnext;
+ ino_t inum;
+ int new;
++ unsigned del:1;
+ };
+
+ /**
+diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
+index da64e15..6cdabb4 100644
+--- a/include/linux/auto_fs.h
++++ b/include/linux/auto_fs.h
+@@ -31,25 +31,16 @@
+ #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
+
+ /*
+- * Architectures where both 32- and 64-bit binaries can be executed
+- * on 64-bit kernels need this. This keeps the structure format
+- * uniform, and makes sure the wait_queue_token isn't too big to be
+- * passed back down to the kernel.
+- *
+- * This assumes that on these architectures:
+- * mode 32 bit 64 bit
+- * -------------------------
+- * int 32 bit 32 bit
+- * long 32 bit 64 bit
+- *
+- * If so, 32-bit user-space code should be backwards compatible.
++ * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
++ * back to the kernel via ioctl from userspace. On architectures where 32- and
++ * 64-bit userspace binaries can be executed it's important that the size of
++ * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
++ * do not break the binary ABI interface by changing the structure size.
+ */
+-
+-#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
+- || defined(__powerpc__) || defined(__s390__)
+-typedef unsigned int autofs_wqt_t;
+-#else
++#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
+ typedef unsigned long autofs_wqt_t;
++#else
++typedef unsigned int autofs_wqt_t;
+ #endif
+
+ /* Packet types */
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index f606406..acd8d4b 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -67,8 +67,6 @@ struct linux_binprm {
+ #define BINPRM_FLAGS_EXECFD_BIT 1
+ #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+
+-#define BINPRM_MAX_RECURSION 4
+-
+ /* Function parameter for binfmt->coredump */
+ struct coredump_params {
+ long signr;
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 7453cfd..6ae6a15 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */
+ int con_is_bound(const struct consw *csw);
+ int register_con_driver(const struct consw *csw, int first, int last);
+ int unregister_con_driver(const struct consw *csw);
++int do_unregister_con_driver(const struct consw *csw);
+ int take_over_console(const struct consw *sw, int first, int last, int deflt);
++int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
+ void give_up_console(const struct consw *sw);
+ #ifdef CONFIG_HW_CONSOLE
+ int con_debug_enter(struct vc_data *vc);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 29b6353..a276817 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2103,6 +2103,7 @@ extern void bd_forget(struct inode *inode);
+ extern void bdput(struct block_device *);
+ extern void invalidate_bdev(struct block_device *);
+ extern int sync_blockdev(struct block_device *bdev);
++extern void kill_bdev(struct block_device *);
+ extern struct super_block *freeze_bdev(struct block_device *);
+ extern void emergency_thaw_all(void);
+ extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+@@ -2110,6 +2111,7 @@ extern int fsync_bdev(struct block_device *);
+ #else
+ static inline void bd_forget(struct inode *inode) {}
+ static inline int sync_blockdev(struct block_device *bdev) { return 0; }
++static inline void kill_bdev(struct block_device *bdev) {}
+ static inline void invalidate_bdev(struct block_device *bdev) {}
+
+ static inline struct super_block *freeze_bdev(struct block_device *sb)
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index 255491c..52a9da2 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -152,4 +152,15 @@ void ida_simple_remove(struct ida *ida, unsigned int id);
+
+ void __init idr_init_cache(void);
+
++/**
++ * idr_for_each_entry - iterate over an idr's elements of a given type
++ * @idp: idr handle
++ * @entry: the type * to use as cursor
++ * @id: id entry's key
++ */
++#define idr_for_each_entry(idp, entry, id) \
++ for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
++ entry != NULL; \
++ ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
++
+ #endif /* __IDR_H__ */
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index b16f653..f8d4b27 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -54,6 +54,8 @@ enum umh_wait {
+ UMH_WAIT_PROC = 1, /* wait for the process to complete */
+ };
+
++#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
++
+ struct subprocess_info {
+ struct work_struct work;
+ struct completion *complete;
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index 1d1b1e1..ee2baf0 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -4,6 +4,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/mm_types.h>
++#include <linux/srcu.h>
+
+ struct mmu_notifier;
+ struct mmu_notifier_ops;
+diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
+index 9404854..ce2ab3d 100644
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -43,7 +43,7 @@ struct pps_source_info {
+ int event, void *data); /* PPS echo function */
+
+ struct module *owner;
+- struct device *dev;
++ struct device *dev; /* Parent device for device_create */
+ };
+
+ struct pps_event_time {
+@@ -69,6 +69,7 @@ struct pps_device {
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS source unique ID */
++ void const *lookup_cookie; /* pps_lookup_dev only */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+@@ -82,16 +83,26 @@ struct pps_device {
+ extern struct device_attribute pps_attrs[];
+
+ /*
++ * Internal functions.
++ *
++ * These are not actually part of the exported API, but this is a
++ * convenient header file to put them in.
++ */
++
++extern int pps_register_cdev(struct pps_device *pps);
++extern void pps_unregister_cdev(struct pps_device *pps);
++
++/*
+ * Exported functions
+ */
+
+ extern struct pps_device *pps_register_source(
+ struct pps_source_info *info, int default_params);
+ extern void pps_unregister_source(struct pps_device *pps);
+-extern int pps_register_cdev(struct pps_device *pps);
+-extern void pps_unregister_cdev(struct pps_device *pps);
+ extern void pps_event(struct pps_device *pps,
+ struct pps_event_time *ts, int event, void *data);
++/* Look up a pps device by magic cookie */
++struct pps_device *pps_lookup_dev(void const *cookie);
+
+ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+ struct timespec ts)
+diff --git a/include/linux/pstore.h b/include/linux/pstore.h
+index 2ca8cde..9b16969 100644
+--- a/include/linux/pstore.h
++++ b/include/linux/pstore.h
+@@ -22,6 +22,8 @@
+ #ifndef _LINUX_PSTORE_H
+ #define _LINUX_PSTORE_H
+
++#include <linux/kmsg_dump.h>
++
+ /* types */
+ enum pstore_type_id {
+ PSTORE_TYPE_DMESG = 0,
+@@ -50,6 +52,7 @@ struct pstore_info {
+
+ #ifdef CONFIG_PSTORE
+ extern int pstore_register(struct pstore_info *);
++extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+ extern int pstore_write(enum pstore_type_id type, char *buf, size_t size);
+ #else
+ static inline int
+@@ -57,6 +60,11 @@ pstore_register(struct pstore_info *psi)
+ {
+ return -ENODEV;
+ }
++static inline bool
++pstore_cannot_block_path(enum kmsg_dump_reason reason)
++{
++ return false;
++}
+ static inline int
+ pstore_write(enum pstore_type_id type, char *buf, size_t size)
+ {
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index cb78556..1162580 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -413,6 +413,7 @@ struct quota_module_name {
+ #define INIT_QUOTA_MODULE_NAMES {\
+ {QFMT_VFS_OLD, "quota_v1"},\
+ {QFMT_VFS_V0, "quota_v2"},\
++ {QFMT_VFS_V1, "quota_v2"},\
+ {0, NULL}}
+
+ #endif /* __KERNEL__ */
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 8bec265..bae516e 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -47,8 +47,8 @@
+ #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
+ #define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
+ #define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+-#define PORT_BRCM_TRUMANAGE 22
+-#define PORT_MAX_8250 22 /* max port ID */
++#define PORT_BRCM_TRUMANAGE 25
++#define PORT_MAX_8250 25 /* max port ID */
+
+ /*
+ * ARM specific type numbers. These are not currently guaranteed
+diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
+index a54b825..6f8b026 100644
+--- a/include/linux/usb/audio.h
++++ b/include/linux/usb/audio.h
+@@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de
+ int protocol)
+ {
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+- return desc->baSourceID[desc->bNrInPins + control_size];
++ return *(uac_processing_unit_bmControls(desc, protocol)
++ + control_size);
+ }
+
+ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+ {
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+- return &desc->baSourceID[desc->bNrInPins + control_size + 1];
++ return uac_processing_unit_bmControls(desc, protocol)
++ + control_size + 1;
+ }
+
+ /* 4.5.2 Class-Specific AS Interface Descriptor */
+diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
+index c2164fa..644921f 100644
+--- a/include/linux/vt_kern.h
++++ b/include/linux/vt_kern.h
+@@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
+ int con_get_cmap(unsigned char __user *cmap);
+ void scrollback(struct vc_data *vc, int lines);
+ void scrollfront(struct vc_data *vc, int lines);
++void clear_buffer_attributes(struct vc_data *vc);
+ void update_region(struct vc_data *vc, unsigned long start, int count);
+ void redraw_screen(struct vc_data *vc, int is_switch);
+ #define update_screen(x) redraw_screen(x, 0)
+@@ -131,6 +132,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
+ int vt_waitactive(int n);
+ void change_console(struct vc_data *new_vc);
+ void reset_vc(struct vc_data *vc);
++extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
++ int deflt);
+ extern int unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
+ int vty_init(const struct file_operations *console_fops);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index e46674d..f9ce2fa 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -28,16 +28,16 @@
+
+ struct inet_hashinfo;
+
+-/* I have no idea if this is a good hash for v6 or not. -DaveM */
+ static inline unsigned int inet6_ehashfn(struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport)
+ {
+- u32 ports = (lport ^ (__force u16)fport);
++ u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+
+ return jhash_3words((__force u32)laddr->s6_addr32[3],
+- (__force u32)faddr->s6_addr32[3],
+- ports, inet_ehash_secret + net_hash_mix(net));
++ ipv6_addr_jhash(faddr),
++ ports,
++ inet_ehash_secret + net_hash_mix(net));
+ }
+
+ static inline int inet6_sk_ehashfn(const struct sock *sk)
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index f941964..ee4ee91 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -199,6 +199,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
+ extern int inet_sk_rebuild_header(struct sock *sk);
+
+ extern u32 inet_ehash_secret;
++extern u32 ipv6_hash_secret;
+ extern void build_ehash_secret(void);
+
+ static inline unsigned int inet_ehashfn(struct net *net,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index a366a8a..4d549cf 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -15,6 +15,7 @@
+
+ #include <linux/ipv6.h>
+ #include <linux/hardirq.h>
++#include <linux/jhash.h>
+ #include <net/if_inet6.h>
+ #include <net/ndisc.h>
+ #include <net/flow.h>
+@@ -386,6 +387,17 @@ struct ip6_create_arg {
+ void ip6_frag_init(struct inet_frag_queue *q, void *a);
+ int ip6_frag_match(struct inet_frag_queue *q, void *a);
+
++/* more secured version of ipv6_addr_hash() */
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++{
++ u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
++
++ return jhash_3words(v,
++ (__force u32)a->s6_addr32[2],
++ (__force u32)a->s6_addr32[3],
++ ipv6_hash_secret);
++}
++
+ static inline int ipv6_addr_any(const struct in6_addr *a)
+ {
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
+index 2be31ff..6f30e70 100644
+--- a/include/target/target_core_device.h
++++ b/include/target/target_core_device.h
+@@ -50,7 +50,7 @@ extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *
+ extern int core_dev_del_lun(struct se_portal_group *, u32);
+ extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+ extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+- u32, char *, int *);
++ struct se_node_acl *, u32, int *);
+ extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun_acl *, u32, u32);
+ extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index b6cacf1..c0739f8 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -361,12 +361,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
+ struct cgroup *cgrp = link->cgrp;
+ list_del(&link->cg_link_list);
+ list_del(&link->cgrp_link_list);
++
++ /*
++ * We may not be holding cgroup_mutex, and if cgrp->count is
++ * dropped to 0 the cgroup can be destroyed at any time, hence
++ * rcu_read_lock is used to keep it alive.
++ */
++ rcu_read_lock();
+ if (atomic_dec_and_test(&cgrp->count) &&
+ notify_on_release(cgrp)) {
+ if (taskexit)
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ check_for_release(cgrp);
+ }
++ rcu_read_unlock();
+
+ kfree(link);
+ }
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 84a524b..835eee6 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2507,8 +2507,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
+
+ dentry = task_cs(tsk)->css.cgroup->dentry;
+ spin_lock(&cpuset_buffer_lock);
+- snprintf(cpuset_name, CPUSET_NAME_LEN,
+- dentry ? (const char *)dentry->d_name.name : "/");
++
++ if (!dentry) {
++ strcpy(cpuset_name, "/");
++ } else {
++ spin_lock(&dentry->d_lock);
++ strlcpy(cpuset_name, (const char *)dentry->d_name.name,
++ CPUSET_NAME_LEN);
++ spin_unlock(&dentry->d_lock);
++ }
++
+ nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
+ tsk->mems_allowed);
+ printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 6db7a5e..cdd5607 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ * and expiry check is done in the hrtimer_interrupt or in the softirq.
+ */
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base,
+- int wakeup)
++ struct hrtimer_clock_base *base)
+ {
+- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+- if (wakeup) {
+- raw_spin_unlock(&base->cpu_base->lock);
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+- raw_spin_lock(&base->cpu_base->lock);
+- } else
+- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-
+- return 1;
+- }
+-
+- return 0;
++ return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
+ }
+
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base,
+- int wakeup)
++ struct hrtimer_clock_base *base)
+ {
+ return 0;
+ }
+@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+- hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
++ && hrtimer_enqueue_reprogram(timer, new_base)) {
++ if (wakeup) {
++ /*
++ * We need to drop cpu_base->lock to avoid a
++ * lock ordering issue vs. rq->lock.
++ */
++ raw_spin_unlock(&new_base->cpu_base->lock);
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ local_irq_restore(flags);
++ return ret;
++ } else {
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++ }
+
+ unlock_hrtimer_base(timer, &flags);
+
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index dc813a9..63633a3 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+
+ /*
+ * All handlers must agree on IRQF_SHARED, so we test just the
+- * first. Check for action->next as well.
++ * first.
+ */
+ action = desc->action;
+ if (!action || !(action->flags & IRQF_SHARED) ||
+- (action->flags & __IRQF_TIMER) ||
+- (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+- !action->next)
++ (action->flags & __IRQF_TIMER))
+ goto out;
+
+ /* Already running on another processor */
+@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ do {
+ if (handle_irq_event(desc) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
++ /* Make sure that there is still a valid action */
+ action = desc->action;
+ } while ((desc->istate & IRQS_PENDING) && action);
+ desc->istate &= ~IRQS_POLL_INPROGRESS;
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index a4bea97..d6fe08a 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -58,6 +58,43 @@ static DEFINE_SPINLOCK(umh_sysctl_lock);
+ */
+ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
+
++static void free_modprobe_argv(struct subprocess_info *info)
++{
++ kfree(info->argv[3]); /* check call_modprobe() */
++ kfree(info->argv);
++}
++
++static int call_modprobe(char *module_name, int wait)
++{
++ static char *envp[] = {
++ "HOME=/",
++ "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
++ NULL
++ };
++
++ char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
++ if (!argv)
++ goto out;
++
++ module_name = kstrdup(module_name, GFP_KERNEL);
++ if (!module_name)
++ goto free_argv;
++
++ argv[0] = modprobe_path;
++ argv[1] = "-q";
++ argv[2] = "--";
++ argv[3] = module_name; /* check free_modprobe_argv() */
++ argv[4] = NULL;
++
++ return call_usermodehelper_fns(modprobe_path, argv, envp,
++ wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
++free_argv:
++ kfree(argv);
++out:
++ return -ENOMEM;
++}
++
+ /**
+ * __request_module - try to load a kernel module
+ * @wait: wait (or not) for the operation to complete
+@@ -79,11 +116,6 @@ int __request_module(bool wait, const char *fmt, ...)
+ char module_name[MODULE_NAME_LEN];
+ unsigned int max_modprobes;
+ int ret;
+- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
+- static char *envp[] = { "HOME=/",
+- "TERM=linux",
+- "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+- NULL };
+ static atomic_t kmod_concurrent = ATOMIC_INIT(0);
+ #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
+ static int kmod_loop_msg;
+@@ -126,9 +158,7 @@ int __request_module(bool wait, const char *fmt, ...)
+
+ trace_module_request(module_name, wait, _RET_IP_);
+
+- ret = call_usermodehelper_fns(modprobe_path, argv, envp,
+- wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
+- NULL, NULL, NULL);
++ ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+
+ atomic_dec(&kmod_concurrent);
+ return ret;
+@@ -186,7 +216,7 @@ static int ____call_usermodehelper(void *data)
+ /* Exec failed? */
+ fail:
+ sub_info->retval = retval;
+- do_exit(0);
++ return 0;
+ }
+
+ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+@@ -197,6 +227,19 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+ }
+ EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
++static void umh_complete(struct subprocess_info *sub_info)
++{
++ struct completion *comp = xchg(&sub_info->complete, NULL);
++ /*
++ * See call_usermodehelper_exec(). If xchg() returns NULL
++ * we own sub_info, the UMH_KILLABLE caller has gone away.
++ */
++ if (comp)
++ complete(comp);
++ else
++ call_usermodehelper_freeinfo(sub_info);
++}
++
+ /* Keventd can't block, but this (a child) can. */
+ static int wait_for_helper(void *data)
+ {
+@@ -233,7 +276,7 @@ static int wait_for_helper(void *data)
+ sub_info->retval = ret;
+ }
+
+- complete(sub_info->complete);
++ umh_complete(sub_info);
+ return 0;
+ }
+
+@@ -245,6 +288,9 @@ static void __call_usermodehelper(struct work_struct *work)
+ enum umh_wait wait = sub_info->wait;
+ pid_t pid;
+
++ if (wait != UMH_NO_WAIT)
++ wait &= ~UMH_KILLABLE;
++
+ /* CLONE_VFORK: wait until the usermode helper has execve'd
+ * successfully We need the data structures to stay around
+ * until that is done. */
+@@ -267,7 +313,7 @@ static void __call_usermodehelper(struct work_struct *work)
+ case UMH_WAIT_EXEC:
+ if (pid < 0)
+ sub_info->retval = pid;
+- complete(sub_info->complete);
++ umh_complete(sub_info);
+ }
+ }
+
+@@ -435,9 +481,21 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ queue_work(khelper_wq, &sub_info->work);
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+ goto unlock;
++
++ if (wait & UMH_KILLABLE) {
++ retval = wait_for_completion_killable(&done);
++ if (!retval)
++ goto wait_done;
++
++ /* umh_complete() will see NULL and free sub_info */
++ if (xchg(&sub_info->complete, NULL))
++ goto unlock;
++ /* fallthrough, umh_complete() was already called */
++ }
++
+ wait_for_completion(&done);
++wait_done:
+ retval = sub_info->retval;
+-
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ unlock:
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index e7cb76d..962c291 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1450,8 +1450,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ while (!signal_pending(current)) {
+ if (timer.it.cpu.expires.sched == 0) {
+ /*
+- * Our timer fired and was reset.
++ * Our timer fired and was reset, below
++ * deletion can not fail.
+ */
++ posix_cpu_timer_del(&timer);
+ spin_unlock_irq(&timer.it_lock);
+ return 0;
+ }
+@@ -1469,9 +1471,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ * We were interrupted by a signal.
+ */
+ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+- posix_cpu_timer_set(&timer, 0, &zero_it, it);
++ error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
++ if (!error) {
++ /*
++ * Timer is now unarmed, deletion can not fail.
++ */
++ posix_cpu_timer_del(&timer);
++ }
+ spin_unlock_irq(&timer.it_lock);
+
++ while (error == TIMER_RETRY) {
++ /*
++ * We need to handle case when timer was or is in the
++ * middle of firing. In other cases we already freed
++ * resources.
++ */
++ spin_lock_irq(&timer.it_lock);
++ error = posix_cpu_timer_del(&timer);
++ spin_unlock_irq(&timer.it_lock);
++ }
++
+ if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+ /*
+ * It actually did fire already.
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 69185ae..e885be1 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
+ {
+ struct k_itimer *timr;
+
++ /*
++ * timer_t could be any type >= int and we want to make sure any
++ * @timer_id outside positive int range fails lookup.
++ */
++ if ((unsigned long long)timer_id > INT_MAX)
++ return NULL;
++
+ rcu_read_lock();
+ timr = idr_find(&posix_timers_id, (int)timer_id);
+ if (timr) {
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index a650694..9f9aa32 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
+
+ /* Convert the decnet address to binary */
+ result = -EIO;
+- nodep = strchr(buf, '.') + 1;
++ nodep = strchr(buf, '.');
+ if (!nodep)
+ goto out;
++ ++nodep;
+
+ area = simple_strtoul(buf, NULL, 10);
+ node = simple_strtoul(nodep, NULL, 10);
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
+index eb51d76..3f42652 100644
+--- a/kernel/timeconst.pl
++++ b/kernel/timeconst.pl
+@@ -369,10 +369,8 @@ if ($hz eq '--can') {
+ die "Usage: $0 HZ\n";
+ }
+
+- @val = @{$canned_values{$hz}};
+- if (!defined(@val)) {
+- @val = compute_values($hz);
+- }
++ $cv = $canned_values{$hz};
++ @val = defined($cv) ? @$cv : compute_values($hz);
+ output($hz, @val);
+ }
+ exit 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 4b1a96b..6c880e8 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3454,37 +3454,51 @@ static void ftrace_init_module(struct module *mod,
+ ftrace_process_locs(mod, start, end);
+ }
+
+-static int ftrace_module_notify(struct notifier_block *self,
+- unsigned long val, void *data)
++static int ftrace_module_notify_enter(struct notifier_block *self,
++ unsigned long val, void *data)
+ {
+ struct module *mod = data;
+
+- switch (val) {
+- case MODULE_STATE_COMING:
++ if (val == MODULE_STATE_COMING)
+ ftrace_init_module(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites +
+ mod->num_ftrace_callsites);
+- break;
+- case MODULE_STATE_GOING:
++ return 0;
++}
++
++static int ftrace_module_notify_exit(struct notifier_block *self,
++ unsigned long val, void *data)
++{
++ struct module *mod = data;
++
++ if (val == MODULE_STATE_GOING)
+ ftrace_release_mod(mod);
+- break;
+- }
+
+ return 0;
+ }
+ #else
+-static int ftrace_module_notify(struct notifier_block *self,
+- unsigned long val, void *data)
++static int ftrace_module_notify_enter(struct notifier_block *self,
++ unsigned long val, void *data)
++{
++ return 0;
++}
++static int ftrace_module_notify_exit(struct notifier_block *self,
++ unsigned long val, void *data)
+ {
+ return 0;
+ }
+ #endif /* CONFIG_MODULES */
+
+-struct notifier_block ftrace_module_nb = {
+- .notifier_call = ftrace_module_notify,
++struct notifier_block ftrace_module_enter_nb = {
++ .notifier_call = ftrace_module_notify_enter,
+ .priority = INT_MAX, /* Run before anything that can use kprobes */
+ };
+
++struct notifier_block ftrace_module_exit_nb = {
++ .notifier_call = ftrace_module_notify_exit,
++ .priority = INT_MIN, /* Run after anything that can remove kprobes */
++};
++
+ extern unsigned long __start_mcount_loc[];
+ extern unsigned long __stop_mcount_loc[];
+
+@@ -3516,9 +3530,13 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
+- ret = register_module_notifier(&ftrace_module_nb);
++ ret = register_module_notifier(&ftrace_module_enter_nb);
++ if (ret)
++ pr_warning("Failed to register trace ftrace module enter notifier\n");
++
++ ret = register_module_notifier(&ftrace_module_exit_nb);
+ if (ret)
+- pr_warning("Failed to register trace ftrace module notifier\n");
++ pr_warning("Failed to register trace ftrace module exit notifier\n");
+
+ set_ftrace_early_filters();
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7bf068a..0ad2420 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -128,6 +128,7 @@ struct worker {
+ };
+
+ struct work_struct *current_work; /* L: work being processed */
++ work_func_t current_func; /* L: current_work's fn */
+ struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
+ struct list_head scheduled; /* L: scheduled works */
+ struct task_struct *task; /* I: worker task */
+@@ -843,7 +844,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry(worker, tmp, bwh, hentry)
+- if (worker->current_work == work)
++ if (worker->current_work == work &&
++ worker->current_func == work->func)
+ return worker;
+ return NULL;
+ }
+@@ -853,9 +855,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
+ * @gcwq: gcwq of interest
+ * @work: work to find worker for
+ *
+- * Find a worker which is executing @work on @gcwq. This function is
+- * identical to __find_worker_executing_work() except that this
+- * function calculates @bwh itself.
++ * Find a worker which is executing @work on @gcwq by searching
++ * @gcwq->busy_hash which is keyed by the address of @work. For a worker
++ * to match, its current execution should match the address of @work and
++ * its work function. This is to avoid unwanted dependency between
++ * unrelated work executions through a work item being recycled while still
++ * being executed.
++ *
++ * This is a bit tricky. A work item may be freed once its execution
++ * starts and nothing prevents the freed area from being recycled for
++ * another work item. If the same work item address ends up being reused
++ * before the original execution finishes, workqueue will identify the
++ * recycled work item as currently executing and make it wait until the
++ * current execution finishes, introducing an unwanted dependency.
++ *
++ * This function checks the work item address, work function and workqueue
++ * to avoid false positives. Note that this isn't complete as one may
++ * construct a work function which can introduce dependency onto itself
++ * through a recycled work item. Well, if somebody wants to shoot oneself
++ * in the foot that badly, there's only so much we can do, and if such
++ * deadlock actually occurs, it should be easy to locate the culprit work
++ * function.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+@@ -1816,7 +1836,6 @@ __acquires(&gcwq->lock)
+ struct global_cwq *gcwq = cwq->gcwq;
+ struct hlist_head *bwh = busy_worker_head(gcwq, work);
+ bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
+- work_func_t f = work->func;
+ int work_color;
+ struct worker *collision;
+ #ifdef CONFIG_LOCKDEP
+@@ -1845,6 +1864,7 @@ __acquires(&gcwq->lock)
+ debug_work_deactivate(work);
+ hlist_add_head(&worker->hentry, bwh);
+ worker->current_work = work;
++ worker->current_func = work->func;
+ worker->current_cwq = cwq;
+ work_color = get_work_color(work);
+
+@@ -1882,7 +1902,7 @@ __acquires(&gcwq->lock)
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+ trace_workqueue_execute_start(work);
+- f(work);
++ worker->current_func(work);
+ /*
+ * While we must be careful to not use "work" after this, the trace
+ * point will only record its address.
+@@ -1892,11 +1912,10 @@ __acquires(&gcwq->lock)
+ lock_map_release(&cwq->wq->lockdep_map);
+
+ if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+- printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+- "%s/0x%08x/%d\n",
+- current->comm, preempt_count(), task_pid_nr(current));
+- printk(KERN_ERR " last function: ");
+- print_symbol("%s\n", (unsigned long)f);
++ pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
++ " last function: %pf\n",
++ current->comm, preempt_count(), task_pid_nr(current),
++ worker->current_func);
+ debug_show_held_locks(current);
+ dump_stack();
+ }
+@@ -1910,6 +1929,7 @@ __acquires(&gcwq->lock)
+ /* we're done with it, release */
+ hlist_del_init(&worker->hentry);
+ worker->current_work = NULL;
++ worker->current_func = NULL;
+ worker->current_cwq = NULL;
+ cwq_dec_nr_in_flight(cwq, work_color, false);
+ }
+diff --git a/lib/idr.c b/lib/idr.c
+index ed055b2..aadc525 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -39,6 +39,14 @@
+ static struct kmem_cache *idr_layer_cache;
+ static DEFINE_SPINLOCK(simple_ida_lock);
+
++/* the maximum ID which can be allocated given idr->layers */
++static int idr_max(int layers)
++{
++ int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT);
++
++ return (1 << bits) - 1;
++}
++
+ static struct idr_layer *get_from_free_list(struct idr *idp)
+ {
+ struct idr_layer *p;
+@@ -223,7 +231,7 @@ build_up:
+ * Add a new layer to the top of the tree if the requested
+ * id is larger than the currently allocated space.
+ */
+- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
++ while (id > idr_max(layers)) {
+ layers++;
+ if (!p->count) {
+ /* special case: if the tree is currently empty,
+@@ -265,7 +273,7 @@ build_up:
+
+ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+ {
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ int id;
+
+ id = idr_get_empty_slot(idp, starting_id, pa);
+@@ -357,7 +365,7 @@ static void idr_remove_warning(int id)
+ static void sub_remove(struct idr *idp, int shift, int id)
+ {
+ struct idr_layer *p = idp->top;
+- struct idr_layer **pa[MAX_LEVEL];
++ struct idr_layer **pa[MAX_LEVEL + 1];
+ struct idr_layer ***paa = &pa[0];
+ struct idr_layer *to_free;
+ int n;
+@@ -451,16 +459,16 @@ void idr_remove_all(struct idr *idp)
+ int n, id, max;
+ int bt_mask;
+ struct idr_layer *p;
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ rcu_assign_pointer(idp->top, NULL);
+- max = 1 << n;
++ max = idr_max(idp->layers);
+
+ id = 0;
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > IDR_BITS && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -519,7 +527,7 @@ void *idr_find(struct idr *idp, int id)
+ /* Mask off upper bits we don't use for the search. */
+ id &= MAX_ID_MASK;
+
+- if (id >= (1 << n))
++ if (id > idr_max(p->layer + 1))
+ return NULL;
+ BUG_ON(n == 0);
+
+@@ -555,15 +563,15 @@ int idr_for_each(struct idr *idp,
+ {
+ int n, id, max, error = 0;
+ struct idr_layer *p;
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = rcu_dereference_raw(idp->top);
+- max = 1 << n;
++ max = idr_max(idp->layers);
+
+ id = 0;
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -595,23 +603,25 @@ EXPORT_SYMBOL(idr_for_each);
+ * Returns pointer to registered object with id, which is next number to
+ * given id. After being looked up, *@nextidp will be updated for the next
+ * iteration.
++ *
++ * This function can be called under rcu_read_lock(), given that the leaf
++ * pointers lifetimes are correctly managed.
+ */
+-
+ void *idr_get_next(struct idr *idp, int *nextidp)
+ {
+- struct idr_layer *p, *pa[MAX_LEVEL];
++ struct idr_layer *p, *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+ int id = *nextidp;
+ int n, max;
+
+ /* find first ent */
+- n = idp->layers * IDR_BITS;
+- max = 1 << n;
+ p = rcu_dereference_raw(idp->top);
+ if (!p)
+ return NULL;
++ n = (p->layer + 1) * IDR_BITS;
++ max = idr_max(p->layer + 1);
+
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -623,7 +633,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
+ return p;
+ }
+
+- id += 1 << n;
++ /*
++ * Proceed to the next layer at the current level. Unlike
++ * idr_for_each(), @id isn't guaranteed to be aligned to
++ * layer boundary at this point and adding 1 << n may
++ * incorrectly skip IDs. Make sure we jump to the
++ * beginning of the next layer using round_up().
++ */
++ id = round_up(id + 1, 1 << n);
+ while (n < fls(id)) {
+ n += IDR_BITS;
+ p = *--paa;
+@@ -778,7 +795,7 @@ EXPORT_SYMBOL(ida_pre_get);
+ */
+ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+ {
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
+ int idr_id = starting_id / IDA_BITMAP_BITS;
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 8d723c9..35b2bb0 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -17,6 +17,7 @@
+ #include <linux/fadvise.h>
+ #include <linux/writeback.h>
+ #include <linux/syscalls.h>
++#include <linux/swap.h>
+
+ #include <asm/unistd.h>
+
+@@ -123,9 +124,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
+ start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
+ end_index = (endbyte >> PAGE_CACHE_SHIFT);
+
+- if (end_index >= start_index)
+- invalidate_mapping_pages(mapping, start_index,
++ if (end_index >= start_index) {
++ unsigned long count = invalidate_mapping_pages(mapping,
++ start_index, end_index);
++
++ /*
++ * If fewer pages were invalidated than expected then
++ * it is possible that some of the pages were on
++ * a per-cpu pagevec for a remote CPU. Drain all
++ * pagevecs and try again.
++ */
++ if (count < (end_index - start_index + 1)) {
++ lru_add_drain_all();
++ invalidate_mapping_pages(mapping, start_index,
+ end_index);
++ }
++ }
+ break;
+ default:
+ ret = -EINVAL;
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 862b608..8d1ca2d 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -14,10 +14,14 @@
+ #include <linux/export.h>
+ #include <linux/mm.h>
+ #include <linux/err.h>
++#include <linux/srcu.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+
++/* global SRCU for all MMs */
++static struct srcu_struct srcu;
++
+ /*
+ * This function can't run concurrently against mmu_notifier_register
+ * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+@@ -25,58 +29,61 @@
+ * in parallel despite there being no task using this mm any more,
+ * through the vmas outside of the exit_mmap context, such as with
+ * vmtruncate. This serializes against mmu_notifier_unregister with
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+ * can't go away from under us as exit_mmap holds an mm_count pin
+ * itself.
+ */
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
+- struct hlist_node *n;
++ int id;
+
+ /*
+- * RCU here will block mmu_notifier_unregister until
+- * ->release returns.
++ * srcu_read_lock() here will block synchronize_srcu() in
++ * mmu_notifier_unregister() until all registered
++ * ->release() callouts this function makes have
++ * returned.
+ */
+- rcu_read_lock();
+- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- rcu_read_unlock();
+-
++ id = srcu_read_lock(&srcu);
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
++
+ /*
+- * We arrived before mmu_notifier_unregister so
+- * mmu_notifier_unregister will do nothing other than
+- * to wait ->release to finish and
+- * mmu_notifier_unregister to return.
++ * Unlink. This will prevent mmu_notifier_unregister()
++ * from also making the ->release() callout.
+ */
+ hlist_del_init_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
++ /*
++ * Clear sptes. (see 'release' description in mmu_notifier.h)
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * synchronize_rcu here prevents mmu_notifier_release to
+- * return to exit_mmap (which would proceed freeing all pages
+- * in the mm) until the ->release method returns, if it was
+- * invoked by mmu_notifier_unregister.
+- *
+- * The mmu_notifier_mm can't go away from under us because one
+- * mm_count is hold by exit_mmap.
++ * All callouts to ->release() which we have done are complete.
++ * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
++ */
++ srcu_read_unlock(&srcu, id);
++
++ /*
++ * mmu_notifier_unregister() may have unlinked a notifier and may
++ * still be calling out to it. Additionally, other notifiers
++ * may have been active via vmtruncate() et. al. Block here
++ * to ensure that all notifier callouts for this mm have been
++ * completed and the sptes are really cleaned up before returning
++ * to exit_mmap().
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+ }
+
+ /*
+@@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->clear_flush_young)
+ young |= mn->ops->clear_flush_young(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->test_young) {
+ young = mn->ops->test_young(mn, mm, address);
+@@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ break;
+ }
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->change_pte)
+ mn->ops->change_pte(mn, mm, address, pte);
+@@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ else if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+@@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+@@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_start)
+ mn->ops->invalidate_range_start(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+@@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_end)
+ mn->ops->invalidate_range_end(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+@@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+
+ BUG_ON(atomic_read(&mm->mm_users) <= 0);
+
++ /*
++ * Verify that mmu_notifier_init() already run and the global srcu is
++ * initialized.
++ */
++ BUG_ON(!srcu.per_cpu_ref);
++
+ ret = -ENOMEM;
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ if (unlikely(!mmu_notifier_mm))
+@@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+ /*
+ * This releases the mm_count pin automatically and frees the mm
+ * structure if it was the last user of it. It serializes against
+- * running mmu notifiers with RCU and against mmu_notifier_unregister
+- * with the unregister lock + RCU. All sptes must be dropped before
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister
++ * with the unregister lock + SRCU. All sptes must be dropped before
+ * calling mmu_notifier_unregister. ->release or any other notifier
+ * method may be invoked concurrently with mmu_notifier_unregister,
+ * and only after mmu_notifier_unregister returned we're guaranteed
+@@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- /*
+- * RCU here will force exit_mmap to wait ->release to finish
+- * before freeing the pages.
+- */
+- rcu_read_lock();
++ int id;
+
+ /*
+- * exit_mmap will block in mmu_notifier_release to
+- * guarantee ->release is called before freeing the
+- * pages.
++ * Ensure we synchronize up with __mmu_notifier_release().
+ */
++ id = srcu_read_lock(&srcu);
++
++ hlist_del_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+- rcu_read_unlock();
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+- hlist_del_rcu(&mn->hlist);
++ /*
++ * Allow __mmu_notifier_release() to complete.
++ */
++ srcu_read_unlock(&srcu, id);
++ } else
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+- }
+
+ /*
+- * Wait any running method to finish, of course including
+- * ->release if it was run by mmu_notifier_relase instead of us.
++ * Wait for any running method to finish, including ->release() if it
++ * was run by __mmu_notifier_release() instead of us.
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+ mmdrop(mm);
+ }
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
++
++static int __init mmu_notifier_init(void)
++{
++ return init_srcu_struct(&srcu);
++}
++
++module_init(mmu_notifier_init);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4d3a697..5c028e2 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4253,10 +4253,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
+ * round what is now in bits to nearest long in bits, then return it in
+ * bytes.
+ */
+-static unsigned long __init usemap_size(unsigned long zonesize)
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
+ {
+ unsigned long usemapsize;
+
++ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
+ usemapsize = roundup(zonesize, pageblock_nr_pages);
+ usemapsize = usemapsize >> pageblock_order;
+ usemapsize *= NR_PAGEBLOCK_BITS;
+@@ -4266,17 +4267,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
+ }
+
+ static void __init setup_usemap(struct pglist_data *pgdat,
+- struct zone *zone, unsigned long zonesize)
++ struct zone *zone,
++ unsigned long zone_start_pfn,
++ unsigned long zonesize)
+ {
+- unsigned long usemapsize = usemap_size(zonesize);
++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
+ zone->pageblock_flags = NULL;
+ if (usemapsize)
+ zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+ usemapsize);
+ }
+ #else
+-static inline void setup_usemap(struct pglist_data *pgdat,
+- struct zone *zone, unsigned long zonesize) {}
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
++ unsigned long zone_start_pfn, unsigned long zonesize) {}
+ #endif /* CONFIG_SPARSEMEM */
+
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+@@ -4401,7 +4404,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ continue;
+
+ set_pageblock_order();
+- setup_usemap(pgdat, zone, size);
++ setup_usemap(pgdat, zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+ BUG_ON(ret);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 12b9e80..a78acf0 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2121,6 +2121,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ unsigned long inodes;
+ int error = -EINVAL;
+
++ config.mpol = NULL;
+ if (shmem_parse_options(data, &config, true))
+ return error;
+
+@@ -2145,8 +2146,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ sbinfo->max_inodes = config.max_inodes;
+ sbinfo->free_inodes = config.max_inodes - inodes;
+
+- mpol_put(sbinfo->mpol);
+- sbinfo->mpol = config.mpol; /* transfers initial ref */
++ /*
++ * Preserve previous mempolicy unless mpol remount option was specified.
++ */
++ if (config.mpol) {
++ mpol_put(sbinfo->mpol);
++ sbinfo->mpol = config.mpol; /* transfers initial ref */
++ }
+ out:
+ spin_unlock(&sbinfo->stat_lock);
+ return error;
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index e16aade..718cbe8 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -16,6 +16,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/llc.h>
+ #include <linux/slab.h>
++#include <linux/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/llc.h>
+ #include <net/llc_pdu.h>
+@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
+
+ skb->dev = p->dev;
+ skb->protocol = htons(ETH_P_802_2);
++ skb->priority = TC_PRIO_CONTROL;
+
+ skb_reserve(skb, LLC_RESERVE);
+ memcpy(__skb_put(skb, length), data, length);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 1b5096a..5d228de 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -226,8 +226,12 @@ EXPORT_SYMBOL(inet_listen);
+ u32 inet_ehash_secret __read_mostly;
+ EXPORT_SYMBOL(inet_ehash_secret);
+
++u32 ipv6_hash_secret __read_mostly;
++EXPORT_SYMBOL(ipv6_hash_secret);
++
+ /*
+- * inet_ehash_secret must be set exactly once
++ * inet_ehash_secret must be set exactly once, and to a non nul value
++ * ipv6_hash_secret must be set exactly once.
+ */
+ void build_ehash_secret(void)
+ {
+@@ -237,7 +241,8 @@ void build_ehash_secret(void)
+ get_random_bytes(&rnd, sizeof(rnd));
+ } while (rnd == 0);
+
+- cmpxchg(&inet_ehash_secret, 0, rnd);
++ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
++ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ }
+ EXPORT_SYMBOL(build_ehash_secret);
+
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 43d4c3b..294a380 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -321,8 +321,8 @@ void ping_err(struct sk_buff *skb, u32 info)
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ struct inet_sock *inet_sock;
+- int type = icmph->type;
+- int code = icmph->code;
++ int type = icmp_hdr(skb)->type;
++ int code = icmp_hdr(skb)->code;
+ struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ int harderr;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 3282453..9acee9d 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -816,7 +816,6 @@ static void svc_age_temp_xprts(unsigned long closure)
+ struct svc_serv *serv = (struct svc_serv *)closure;
+ struct svc_xprt *xprt;
+ struct list_head *le, *next;
+- LIST_HEAD(to_be_aged);
+
+ dprintk("svc_age_temp_xprts\n");
+
+@@ -837,25 +836,15 @@ static void svc_age_temp_xprts(unsigned long closure)
+ if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
+ test_bit(XPT_BUSY, &xprt->xpt_flags))
+ continue;
+- svc_xprt_get(xprt);
+- list_move(le, &to_be_aged);
++ list_del_init(le);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ set_bit(XPT_DETACHED, &xprt->xpt_flags);
+- }
+- spin_unlock_bh(&serv->sv_lock);
+-
+- while (!list_empty(&to_be_aged)) {
+- le = to_be_aged.next;
+- /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
+- list_del_init(le);
+- xprt = list_entry(le, struct svc_xprt, xpt_list);
+-
+ dprintk("queuing xprt %p for closing\n", xprt);
+
+ /* a thread will dequeue and close it soon */
+ svc_xprt_enqueue(xprt);
+- svc_xprt_put(xprt);
+ }
++ spin_unlock_bh(&serv->sv_lock);
+
+ mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
+ }
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 193ce81..42b876d 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -287,12 +287,14 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ loopback_active_notify(dpcm);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
+ spin_lock(&cable->lock);
+ cable->pause |= stream;
+ spin_unlock(&cable->lock);
+ loopback_timer_stop(dpcm);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ case SNDRV_PCM_TRIGGER_RESUME:
+ spin_lock(&cable->lock);
+ dpcm->last_jiffies = jiffies;
+ cable->pause &= ~stream;
+@@ -552,7 +554,8 @@ static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
+ static struct snd_pcm_hardware loopback_pcm_hardware =
+ {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP |
+- SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE),
++ SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE |
+ SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE),
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index ef85ac5..be662c9 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
+
+ spin_lock(&codec->reg_lock);
+ if (!pvoice->running) {
+- spin_unlock_irq(&codec->reg_lock);
++ spin_unlock(&codec->reg_lock);
+ return 0;
+ }
+ outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR));
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index bde2615..3c8bc6e 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -918,8 +918,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ if (!static_hdmi_pcm && eld->eld_valid) {
+ snd_hdmi_eld_update_pcm_info(eld, hinfo);
+ if (hinfo->channels_min > hinfo->channels_max ||
+- !hinfo->rates || !hinfo->formats)
++ !hinfo->rates || !hinfo->formats) {
++ per_cvt->assigned = 0;
++ hinfo->nid = 0;
++ snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ return -ENODEV;
++ }
+ }
+
+ /* Store the updated parameters */
+@@ -983,6 +987,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
+ codec->addr, pin_nid, eld->monitor_present, eld_valid);
+
++ eld->eld_valid = false;
+ if (eld_valid) {
+ if (!snd_hdmi_get_eld(eld, codec, pin_nid))
+ snd_hdmi_show_eld(eld);
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
+index 21bcb47..62075a5 100644
+--- a/sound/pci/rme32.c
++++ b/sound/pci/rme32.c
+@@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream)
+ spin_lock_irq(&rme32->lock);
+ rme32->capture_substream = NULL;
+ rme32->capture_periodsize = 0;
+- spin_unlock(&rme32->lock);
++ spin_unlock_irq(&rme32->lock);
+ return 0;
+ }
+
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 32d2a21..4e25148 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1624,7 +1624,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "Roland", */
+ /* .product_name = "A-PRO", */
+- .ifnum = 1,
++ .ifnum = 0,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
diff --git a/3.2.54/1040_linux-3.2.41.patch b/3.2.54/1040_linux-3.2.41.patch
new file mode 100644
index 0000000..0d27fcb
--- /dev/null
+++ b/3.2.54/1040_linux-3.2.41.patch
@@ -0,0 +1,3865 @@
+diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+index b8b27b0..3f89cbd 100644
+--- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt
++++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+@@ -10,6 +10,9 @@ Required properties:
+ - "ns16850"
+ - "nvidia,tegra20-uart"
+ - "ibm,qpace-nwp-serial"
++ - "altr,16550-FIFO32"
++ - "altr,16550-FIFO64"
++ - "altr,16550-FIFO128"
+ - "serial" if the port type is unknown.
+ - reg : offset and length of the register set for the device.
+ - interrupts : should contain uart interrupt.
+diff --git a/Makefile b/Makefile
+index 47af1e9..95e6220 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
+index 1049319..510456d 100644
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -720,7 +720,7 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ /*
+ * PMXEVTYPER: Event selection reg
+ */
+-#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
++#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
+ #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+
+ /*
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index c335c76..a125c4b 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ unsigned long instr = 0, instrptr;
+ int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+ unsigned int type;
+- mm_segment_t fs;
+ unsigned int fault;
+ u16 tinstr = 0;
+ int isize = 4;
+@@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ instrptr = instruction_pointer(regs);
+
+- fs = get_fs();
+- set_fs(KERNEL_DS);
+ if (thumb_mode(regs)) {
+- fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
++ u16 *ptr = (u16 *)(instrptr & ~1);
++ fault = probe_kernel_address(ptr, tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+- fault = __get_user(tinst2, (u16 *)(instrptr+2));
++ fault = probe_kernel_address(ptr + 1, tinst2);
+ instr = (tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+@@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ }
+ } else
+- fault = __get_user(instr, (u32 *)instrptr);
+- set_fs(fs);
++ fault = probe_kernel_address(instrptr, instr);
+
+ if (fault) {
+ type = TYPE_FAULT;
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 7c815b2..111691c 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -409,7 +409,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+ * If there isn't a second FP instruction, exit now. Note that
+ * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
+ */
+- if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
++ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
+ goto exit;
+
+ /*
+diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
+index 21165a4..66ea9b8 100644
+--- a/arch/powerpc/include/asm/eeh.h
++++ b/arch/powerpc/include/asm/eeh.h
+@@ -61,7 +61,6 @@ void __init pci_addr_cache_build(void);
+ */
+ void eeh_add_device_tree_early(struct device_node *);
+ void eeh_add_device_tree_late(struct pci_bus *);
+-void eeh_add_sysfs_files(struct pci_bus *);
+
+ /**
+ * eeh_remove_device_recursive - undo EEH for device & children.
+@@ -106,8 +105,6 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+
+ static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
+-static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+-
+ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+ #define EEH_POSSIBLE_ERROR(val, type) (0)
+ #define EEH_IO_ERROR_VALUE(size) (-1UL)
+diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
+index b10beef..e1612df 100644
+--- a/arch/powerpc/kernel/of_platform.c
++++ b/arch/powerpc/kernel/of_platform.c
+@@ -91,9 +91,6 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
+- /* sysfs files should only be added after devices are added */
+- eeh_add_sysfs_files(phb->bus);
+-
+ return 0;
+ }
+
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index a3cd949..458ed3b 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -1536,14 +1536,11 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
+ pcibios_allocate_bus_resources(bus);
+ pcibios_claim_one_bus(bus);
+
+- /* Fixup EEH */
+- eeh_add_device_tree_late(bus);
+-
+ /* Add new devices to global lists. Register in proc, sysfs. */
+ pci_bus_add_devices(bus);
+
+- /* sysfs files should only be added after devices are added */
+- eeh_add_sysfs_files(bus);
++ /* Fixup EEH */
++ eeh_add_device_tree_late(bus);
+ }
+ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
+
+diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
+index 389e06b..5658690 100644
+--- a/arch/powerpc/platforms/pseries/eeh.c
++++ b/arch/powerpc/platforms/pseries/eeh.c
+@@ -1238,6 +1238,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
+ pdn->pcidev = dev;
+
+ pci_addr_cache_insert_device(dev);
++ eeh_sysfs_add_device(dev);
+ }
+
+ void eeh_add_device_tree_late(struct pci_bus *bus)
+@@ -1256,29 +1257,6 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
+ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
+
+ /**
+- * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
+- * @bus: PCI bus
+- *
+- * This routine must be used to add EEH sysfs files for PCI
+- * devices which are attached to the indicated PCI bus. The PCI bus
+- * is added after system boot through hotplug or dlpar.
+- */
+-void eeh_add_sysfs_files(struct pci_bus *bus)
+-{
+- struct pci_dev *dev;
+-
+- list_for_each_entry(dev, &bus->devices, bus_list) {
+- eeh_sysfs_add_device(dev);
+- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+- struct pci_bus *subbus = dev->subordinate;
+- if (subbus)
+- eeh_add_sysfs_files(subbus);
+- }
+- }
+-}
+-EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
+-
+-/**
+ * eeh_remove_device - undo EEH setup for the indicated pci device
+ * @dev: pci device to be removed
+ *
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 4c262f6..1e1caf56 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -162,6 +162,9 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ struct msi_desc *msidesc;
+ int *v;
+
++ if (type == PCI_CAP_ID_MSI && nvec > 1)
++ return 1;
++
+ v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+@@ -220,6 +223,9 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ struct msi_desc *msidesc;
+ struct msi_msg msg;
+
++ if (type == PCI_CAP_ID_MSI && nvec > 1)
++ return 1;
++
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ __read_msi_msg(msidesc, &msg);
+ pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
+@@ -263,6 +269,9 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ int ret = 0;
+ struct msi_desc *msidesc;
+
++ if (type == PCI_CAP_ID_MSI && nvec > 1)
++ return 1;
++
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ struct physdev_map_pirq map_irq;
+ domid_t domid;
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index a0f768c..9f73037 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_blkcipher rblkcipher;
+
+- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
+- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- alg->cra_ablkcipher.geniv ?: "<default>");
++ strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
++ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
++ sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
+@@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_blkcipher rblkcipher;
+
+- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
+- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- alg->cra_ablkcipher.geniv ?: "<built-in>");
++ strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
++ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
++ sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 04add3dc..479b7d1 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ struct crypto_report_aead raead;
+ struct aead_alg *aead = &alg->cra_aead;
+
+- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
+- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- aead->geniv ?: "<built-in>");
++ strncpy(raead.type, "aead", sizeof(raead.type));
++ strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
+
+ raead.blocksize = alg->cra_blocksize;
+ raead.maxauthsize = aead->maxauthsize;
+@@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ struct crypto_report_aead raead;
+ struct aead_alg *aead = &alg->cra_aead;
+
+- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
+- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
++ strncpy(raead.type, "nivaead", sizeof(raead.type));
++ strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
+
+ raead.blocksize = alg->cra_blocksize;
+ raead.maxauthsize = aead->maxauthsize;
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index ac93c99..7fe1752 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_hash rhash;
+
+- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
++ strncpy(rhash.type, "ahash", sizeof(rhash.type));
+
+ rhash.blocksize = alg->cra_blocksize;
+ rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 1e61d1a..04f0f38 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_blkcipher rblkcipher;
+
+- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
+- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- alg->cra_blkcipher.geniv ?: "<default>");
++ strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
++ strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
++ sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 0605a2b..5b63b8d 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -71,7 +71,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_cipher rcipher;
+
+- snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
++ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.blocksize = alg->cra_blocksize;
+ rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
+@@ -90,8 +90,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_comp rcomp;
+
+- snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
+-
++ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
+ NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rcomp);
+
+@@ -104,12 +103,14 @@ nla_put_failure:
+ static int crypto_report_one(struct crypto_alg *alg,
+ struct crypto_user_alg *ualg, struct sk_buff *skb)
+ {
+- memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
+- memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
+- sizeof(ualg->cru_driver_name));
+- memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
+- CRYPTO_MAX_ALG_NAME);
+-
++ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
++ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
++ sizeof(ualg->cru_driver_name));
++ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
++ sizeof(ualg->cru_module_name));
++
++ ualg->cru_type = 0;
++ ualg->cru_mask = 0;
+ ualg->cru_flags = alg->cra_flags;
+ ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
+
+@@ -118,8 +119,7 @@ static int crypto_report_one(struct crypto_alg *alg,
+ if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+ struct crypto_report_larval rl;
+
+- snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
+-
++ strncpy(rl.type, "larval", sizeof(rl.type));
+ NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL,
+ sizeof(struct crypto_report_larval), &rl);
+
+diff --git a/crypto/pcompress.c b/crypto/pcompress.c
+index 2e458e5..6f2a361 100644
+--- a/crypto/pcompress.c
++++ b/crypto/pcompress.c
+@@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_comp rpcomp;
+
+- snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
+-
++ strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
+ NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rpcomp);
+
+diff --git a/crypto/rng.c b/crypto/rng.c
+index 64f864f..1966c1d 100644
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_rng rrng;
+
+- snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
++ strncpy(rrng.type, "rng", sizeof(rrng.type));
+
+ rrng.seedsize = alg->cra_rng.seedsize;
+
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 9100912..f507294 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ struct crypto_report_hash rhash;
+ struct shash_alg *salg = __crypto_shash_alg(alg);
+
+- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
++ strncpy(rhash.type, "shash", sizeof(rhash.type));
++
+ rhash.blocksize = alg->cra_blocksize;
+ rhash.digestsize = salg->digestsize;
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 62c1325..87acc23 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -262,6 +262,46 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
++ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
++ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1e888c9..8c6787a 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1262,11 +1262,9 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+ /* the width of sector_t may be narrow for bit-shift */
+ sz = sec;
+ sz <<= 9;
+- mutex_lock(&bdev->bd_mutex);
+ bd_set_size(bdev, sz);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+- mutex_unlock(&bdev->bd_mutex);
+
+ out:
+ return err;
+@@ -1836,11 +1834,15 @@ static int __init loop_init(void)
+ max_part = (1UL << part_shift) - 1;
+ }
+
+- if ((1UL << part_shift) > DISK_MAX_PARTS)
+- return -EINVAL;
++ if ((1UL << part_shift) > DISK_MAX_PARTS) {
++ err = -EINVAL;
++ goto misc_out;
++ }
+
+- if (max_loop > 1UL << (MINORBITS - part_shift))
+- return -EINVAL;
++ if (max_loop > 1UL << (MINORBITS - part_shift)) {
++ err = -EINVAL;
++ goto misc_out;
++ }
+
+ /*
+ * If max_loop is specified, create that many devices upfront.
+@@ -1858,8 +1860,10 @@ static int __init loop_init(void)
+ range = 1UL << MINORBITS;
+ }
+
+- if (register_blkdev(LOOP_MAJOR, "loop"))
+- return -EIO;
++ if (register_blkdev(LOOP_MAJOR, "loop")) {
++ err = -EIO;
++ goto misc_out;
++ }
+
+ blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
+ THIS_MODULE, loop_probe, NULL, NULL);
+@@ -1872,6 +1876,10 @@ static int __init loop_init(void)
+
+ printk(KERN_INFO "loop: module loaded\n");
+ return 0;
++
++misc_out:
++ misc_deregister(&loop_misc);
++ return err;
+ }
+
+ static int loop_exit_cb(int id, void *ptr, void *data)
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 1bafb40..69ae597 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -40,6 +40,7 @@
+ #include <linux/init.h>
+ #include <linux/miscdevice.h>
+ #include <linux/delay.h>
++#include <linux/slab.h>
+ #include <asm/uaccess.h>
+
+
+@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
+ static LIST_HEAD(rng_list);
+ static DEFINE_MUTEX(rng_mutex);
+ static int data_avail;
+-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
+- __cacheline_aligned;
++static u8 *rng_buffer;
++
++static size_t rng_buffer_size(void)
++{
++ return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++}
+
+ static inline int hwrng_init(struct hwrng *rng)
+ {
+@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+
+ if (!data_avail) {
+ bytes_read = rng_get_data(current_rng, rng_buffer,
+- sizeof(rng_buffer),
++ rng_buffer_size(),
+ !(filp->f_flags & O_NONBLOCK));
+ if (bytes_read < 0) {
+ err = bytes_read;
+@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
+
+ mutex_lock(&rng_mutex);
+
++ /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
++ err = -ENOMEM;
++ if (!rng_buffer) {
++ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
++ if (!rng_buffer)
++ goto out_unlock;
++ }
++
+ /* Must not register two RNGs with the same name. */
+ err = -EEXIST;
+ list_for_each_entry(tmp, &rng_list, list) {
+diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
+index fd699cc..86ed591 100644
+--- a/drivers/char/hw_random/virtio-rng.c
++++ b/drivers/char/hw_random/virtio-rng.c
+@@ -89,14 +89,22 @@ static int virtrng_probe(struct virtio_device *vdev)
+ {
+ int err;
+
++ if (vq) {
++ /* We only support one device for now */
++ return -EBUSY;
++ }
+ /* We expect a single virtqueue. */
+ vq = virtio_find_single_vq(vdev, random_recv_done, "input");
+- if (IS_ERR(vq))
+- return PTR_ERR(vq);
++ if (IS_ERR(vq)) {
++ err = PTR_ERR(vq);
++ vq = NULL;
++ return err;
++ }
+
+ err = hwrng_register(&virtio_hwrng);
+ if (err) {
+ vdev->config->del_vqs(vdev);
++ vq = NULL;
+ return err;
+ }
+
+@@ -108,6 +116,7 @@ static void __devexit virtrng_remove(struct virtio_device *vdev)
+ vdev->config->reset(vdev);
+ hwrng_unregister(&virtio_hwrng);
+ vdev->config->del_vqs(vdev);
++ vq = NULL;
+ }
+
+ static struct virtio_device_id id_table[] = {
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 77e1e6c..46bbf43 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -303,6 +303,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
+ if (msg->len != sizeof(*mc_op))
+ return;
+
++ /* Can only change if privileged. */
++ if (!capable(CAP_NET_ADMIN)) {
++ err = EPERM;
++ goto out;
++ }
++
+ mc_op = (enum proc_cn_mcast_op*)msg->data;
+ switch (*mc_op) {
+ case PROC_CN_MCAST_LISTEN:
+@@ -315,6 +321,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
+ err = EINVAL;
+ break;
+ }
++
++out:
+ cn_proc_ack(err, msg->seq, msg->ack);
+ }
+
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index c5072a9..4bf374d 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -330,6 +330,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ cpufreq_update_policy(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
+ cpufreq_stats_free_sysfs(cpu);
+ break;
+ case CPU_DEAD:
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 982f1f5..4cd392d 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
+ static int __init smbios_present(const char __iomem *p)
+ {
+ u8 buf[32];
+- int offset = 0;
+
+ memcpy_fromio(buf, p, 32);
+ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
+@@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
+ dmi_ver = 0x0206;
+ break;
+ }
+- offset = 16;
++ return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
+ }
+- return dmi_present(buf + offset);
++ return 1;
+ }
+
+ void __init dmi_scan_machine(void)
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 5d5a868..81346ae 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -393,10 +393,11 @@ static efi_status_t
+ get_var_data(struct efivars *efivars, struct efi_variable *var)
+ {
+ efi_status_t status;
++ unsigned long flags;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irqsave(&efivars->lock, flags);
+ status = get_var_data_locked(efivars, var);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irqrestore(&efivars->lock, flags);
+
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
+@@ -405,6 +406,30 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
+ return status;
+ }
+
++static efi_status_t
++check_var_size_locked(struct efivars *efivars, u32 attributes,
++ unsigned long size)
++{
++ u64 storage_size, remaining_size, max_size;
++ efi_status_t status;
++ const struct efivar_operations *fops = efivars->ops;
++
++ if (!efivars->ops->query_variable_info)
++ return EFI_UNSUPPORTED;
++
++ status = fops->query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++
++ if (status != EFI_SUCCESS)
++ return status;
++
++ if (!storage_size || size > remaining_size || size > max_size ||
++ (remaining_size - size) < (storage_size / 2))
++ return EFI_OUT_OF_RESOURCES;
++
++ return status;
++}
++
+ static ssize_t
+ efivar_guid_read(struct efivar_entry *entry, char *buf)
+ {
+@@ -525,14 +550,19 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+ return -EINVAL;
+ }
+
+- spin_lock(&efivars->lock);
+- status = efivars->ops->set_variable(new_var->VariableName,
+- &new_var->VendorGuid,
+- new_var->Attributes,
+- new_var->DataSize,
+- new_var->Data);
++ spin_lock_irq(&efivars->lock);
++
++ status = check_var_size_locked(efivars, new_var->Attributes,
++ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+
+- spin_unlock(&efivars->lock);
++ if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
++ status = efivars->ops->set_variable(new_var->VariableName,
++ &new_var->VendorGuid,
++ new_var->Attributes,
++ new_var->DataSize,
++ new_var->Data);
++
++ spin_unlock_irq(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
+@@ -637,13 +667,43 @@ efivar_unregister(struct efivar_entry *var)
+ kobject_put(&var->kobj);
+ }
+
++static int efi_status_to_err(efi_status_t status)
++{
++ int err;
++
++ switch (status) {
++ case EFI_INVALID_PARAMETER:
++ err = -EINVAL;
++ break;
++ case EFI_OUT_OF_RESOURCES:
++ err = -ENOSPC;
++ break;
++ case EFI_DEVICE_ERROR:
++ err = -EIO;
++ break;
++ case EFI_WRITE_PROTECTED:
++ err = -EROFS;
++ break;
++ case EFI_SECURITY_VIOLATION:
++ err = -EACCES;
++ break;
++ case EFI_NOT_FOUND:
++ err = -ENOENT;
++ break;
++ default:
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
+ #ifdef CONFIG_PSTORE
+
+ static int efi_pstore_open(struct pstore_info *psi)
+ {
+ struct efivars *efivars = psi->data;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+ efivars->walk_entry = list_first_entry(&efivars->list,
+ struct efivar_entry, list);
+ return 0;
+@@ -653,7 +713,7 @@ static int efi_pstore_close(struct pstore_info *psi)
+ {
+ struct efivars *efivars = psi->data;
+
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return 0;
+ }
+
+@@ -706,11 +766,28 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ struct efivars *efivars = psi->data;
+ struct efivar_entry *entry, *found = NULL;
+ int i, ret = 0;
++ efi_status_t status = EFI_NOT_FOUND;
++ unsigned long flags;
+
+ sprintf(stub_name, "dump-type%u-%u-", type, part);
+ sprintf(name, "%s%lu", stub_name, get_seconds());
+
+- spin_lock(&efivars->lock);
++ spin_lock_irqsave(&efivars->lock, flags);
++
++ /*
++ * Check if there is a space enough to log.
++ * size: a size of logging data
++ * DUMP_NAME_LEN * 2: a maximum size of variable name
++ */
++
++ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
++ size + DUMP_NAME_LEN * 2);
++
++ if (status) {
++ spin_unlock_irqrestore(&efivars->lock, flags);
++ *id = part;
++ return -ENOSPC;
++ }
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = stub_name[i];
+@@ -748,7 +825,7 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
+ size, psi->buf);
+
+- spin_unlock(&efivars->lock);
++ spin_unlock_irqrestore(&efivars->lock, flags);
+
+ if (found)
+ efivar_unregister(found);
+@@ -831,7 +908,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ return -EINVAL;
+ }
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+
+ /*
+ * Does this variable already exist?
+@@ -849,10 +926,18 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ }
+ }
+ if (found) {
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EINVAL;
+ }
+
++ status = check_var_size_locked(efivars, new_var->Attributes,
++ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
++
++ if (status && status != EFI_UNSUPPORTED) {
++ spin_unlock_irq(&efivars->lock);
++ return efi_status_to_err(status);
++ }
++
+ /* now *really* create the variable via EFI */
+ status = efivars->ops->set_variable(new_var->VariableName,
+ &new_var->VendorGuid,
+@@ -863,10 +948,10 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
+ status);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EIO;
+ }
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+
+ /* Create the entry in sysfs. Locking is not required here */
+ status = efivar_create_sysfs_entry(efivars,
+@@ -894,7 +979,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+
+ /*
+ * Does this variable already exist?
+@@ -912,7 +997,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ }
+ }
+ if (!found) {
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EINVAL;
+ }
+ /* force the Attributes/DataSize to 0 to ensure deletion */
+@@ -928,12 +1013,12 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
+ status);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EIO;
+ }
+ list_del(&search_efivar->list);
+ /* We need to release this lock before unregistering. */
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ efivar_unregister(search_efivar);
+
+ /* It's dead Jim.... */
+@@ -1041,9 +1126,9 @@ efivar_create_sysfs_entry(struct efivars *efivars,
+ kfree(short_name);
+ short_name = NULL;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+ list_add(&new_efivar->list, &efivars->list);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+
+ return 0;
+ }
+@@ -1112,9 +1197,9 @@ void unregister_efivars(struct efivars *efivars)
+ struct efivar_entry *entry, *n;
+
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+ list_del(&entry->list);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ efivar_unregister(entry);
+ }
+ if (efivars->new_var)
+@@ -1235,6 +1320,7 @@ efivars_init(void)
+ ops.get_variable = efi.get_variable;
+ ops.set_variable = efi.set_variable;
+ ops.get_next_variable = efi.get_next_variable;
++ ops.query_variable_info = efi.query_variable_info;
+ error = register_efivars(&__efivars, &ops, efi_kobj);
+ if (error)
+ goto err_put;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 2303c2b..4591582 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7280,8 +7280,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct intel_framebuffer *intel_fb;
+- struct drm_i915_gem_object *obj;
++ struct drm_framebuffer *old_fb = crtc->fb;
++ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+@@ -7293,8 +7293,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+
+ work->event = event;
+ work->dev = crtc->dev;
+- intel_fb = to_intel_framebuffer(crtc->fb);
+- work->old_fb_obj = intel_fb->obj;
++ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+@@ -7314,9 +7313,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+- intel_fb = to_intel_framebuffer(fb);
+- obj = intel_fb->obj;
+-
+ mutex_lock(&dev->struct_mutex);
+
+ /* Reference the objects for the scheduled work. */
+@@ -7347,6 +7343,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+
+ cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ crtc->fb = old_fb;
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index ec36dd9..c32fd93 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -958,6 +958,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ found = 1;
+ }
+
++ /* quirks */
++ /* Radeon 9100 (R200) */
++ if ((dev->pdev->device == 0x514D) &&
++ (dev->pdev->subsystem_vendor == 0x174B) &&
++ (dev->pdev->subsystem_device == 0x7149)) {
++ /* vbios value is bad, use the default */
++ found = 0;
++ }
++
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index a23b63a..611aafc 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1533,6 +1533,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, USB_DEVICE_ID_MTP_SITRONIX) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 25f3290..e665bdf 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -644,6 +644,7 @@
+
+ #define USB_VENDOR_ID_SONY 0x054c
+ #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
++#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
+ #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
+ #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
+
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 5cd25bd..4142c21 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -44,9 +44,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ {
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+- if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
+- *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
+- hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
++ /*
++ * Some Sony RF receivers wrongly declare the mouse pointer as a
++ * a constant non-data variable.
++ */
++ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
++ /* usage page: generic desktop controls */
++ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
++ /* usage: mouse */
++ rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++ /* input (usage page for x,y axes): constant, variable, relative */
++ rdesc[54] == 0x81 && rdesc[55] == 0x07) {
++ hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
++ /* input: data, variable, relative */
+ rdesc[55] = 0x06;
+ }
+
+@@ -218,6 +228,8 @@ static const struct hid_device_id sony_devices[] = {
+ .driver_data = SIXAXIS_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
+ .driver_data = VAIO_RDESC_CONSTANT },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
++ .driver_data = VAIO_RDESC_CONSTANT },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, sony_devices);
+diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
+index 89f5244..0e8343f 100644
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
+ * The windows host expects the key/value pair to be encoded
+ * in utf16.
+ */
+- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
+- (wchar_t *)kvp_data->data.key);
++ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
++ (wchar_t *) kvp_data->data.key,
++ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
+ kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
+- valuelen = utf8s_to_utf16s(value, strlen(value),
+- (wchar_t *)kvp_data->data.value);
++ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
++ (wchar_t *) kvp_data->data.value,
++ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
+ kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
+
+ kvp_data->data.value_type = REG_SZ; /* all our values are strings */
+diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
+index 58eded2..c9910f7 100644
+--- a/drivers/hwmon/lineage-pem.c
++++ b/drivers/hwmon/lineage-pem.c
+@@ -421,6 +421,7 @@ static struct attribute *pem_input_attributes[] = {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
++ NULL
+ };
+
+ static const struct attribute_group pem_input_group = {
+@@ -431,6 +432,7 @@ static struct attribute *pem_fan_attributes[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
++ NULL
+ };
+
+ static const struct attribute_group pem_fan_group = {
+diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
+index 820fff4..43c7414 100644
+--- a/drivers/hwmon/pmbus/ltc2978.c
++++ b/drivers/hwmon/pmbus/ltc2978.c
+@@ -59,10 +59,10 @@ enum chips { ltc2978, ltc3880 };
+ struct ltc2978_data {
+ enum chips id;
+ int vin_min, vin_max;
+- int temp_min, temp_max;
++ int temp_min, temp_max[2];
+ int vout_min[8], vout_max[8];
+ int iout_max[2];
+- int temp2_max[2];
++ int temp2_max;
+ struct pmbus_driver_info info;
+ };
+
+@@ -113,9 +113,10 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
+ ret = pmbus_read_word_data(client, page,
+ LTC2978_MFR_TEMPERATURE_PEAK);
+ if (ret >= 0) {
+- if (lin11_to_val(ret) > lin11_to_val(data->temp_max))
+- data->temp_max = ret;
+- ret = data->temp_max;
++ if (lin11_to_val(ret)
++ > lin11_to_val(data->temp_max[page]))
++ data->temp_max[page] = ret;
++ ret = data->temp_max[page];
+ }
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+@@ -204,10 +205,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
+ ret = pmbus_read_word_data(client, page,
+ LTC3880_MFR_TEMPERATURE2_PEAK);
+ if (ret >= 0) {
+- if (lin11_to_val(ret)
+- > lin11_to_val(data->temp2_max[page]))
+- data->temp2_max[page] = ret;
+- ret = data->temp2_max[page];
++ if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
++ data->temp2_max = ret;
++ ret = data->temp2_max;
+ }
+ break;
+ case PMBUS_VIRT_READ_VIN_MIN:
+@@ -248,11 +248,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+- data->iout_max[page] = 0x7fff;
++ data->iout_max[page] = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_TEMP2_HISTORY:
+- data->temp2_max[page] = 0x7fff;
++ data->temp2_max = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+@@ -262,12 +262,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
+ break;
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ data->vin_min = 0x7bff;
+- data->vin_max = 0;
++ data->vin_max = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ data->temp_min = 0x7bff;
+- data->temp_max = 0x7fff;
++ data->temp_max[page] = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ default:
+@@ -323,12 +323,14 @@ static int ltc2978_probe(struct i2c_client *client,
+ info = &data->info;
+ info->write_word_data = ltc2978_write_word_data;
+
+- data->vout_min[0] = 0xffff;
+ data->vin_min = 0x7bff;
++ data->vin_max = 0x7c00;
+ data->temp_min = 0x7bff;
+- data->temp_max = 0x7fff;
++ for (i = 0; i < ARRAY_SIZE(data->temp_max); i++)
++ data->temp_max[i] = 0x7c00;
++ data->temp2_max = 0x7c00;
+
+- switch (id->driver_data) {
++ switch (data->id) {
+ case ltc2978:
+ info->read_word_data = ltc2978_read_word_data;
+ info->pages = 8;
+@@ -338,7 +340,6 @@ static int ltc2978_probe(struct i2c_client *client,
+ for (i = 1; i < 8; i++) {
+ info->func[i] = PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT;
+- data->vout_min[i] = 0xffff;
+ }
+ break;
+ case ltc3880:
+@@ -354,12 +355,15 @@ static int ltc2978_probe(struct i2c_client *client,
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+- data->vout_min[1] = 0xffff;
++ data->iout_max[0] = 0x7c00;
++ data->iout_max[1] = 0x7c00;
+ break;
+ default:
+ ret = -ENODEV;
+ goto err_mem;
+ }
++ for (i = 0; i < info->pages; i++)
++ data->vout_min[i] = 0xffff;
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index 5357925..3e3153e 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -926,7 +926,13 @@ static int __devinit sht15_probe(struct platform_device *pdev)
+ if (voltage)
+ data->supply_uV = voltage;
+
+- regulator_enable(data->reg);
++ ret = regulator_enable(data->reg);
++ if (ret != 0) {
++ dev_err(&pdev->dev,
++ "failed to enable regulator: %d\n", ret);
++ goto err_free_data;
++ }
++
+ /*
+ * Setup a notifier block to update this if another device
+ * causes the voltage to change
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 62a4d5c..b7d1cdd 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1396,6 +1396,7 @@ static struct syscore_ops amd_iommu_syscore_ops = {
+ */
+ static int __init amd_iommu_init(void)
+ {
++ struct amd_iommu *iommu;
+ int i, ret = 0;
+
+ /*
+@@ -1444,9 +1445,6 @@ static int __init amd_iommu_init(void)
+ if (amd_iommu_pd_alloc_bitmap == NULL)
+ goto free;
+
+- /* init the device table */
+- init_device_table();
+-
+ /*
+ * let all alias entries point to itself
+ */
+@@ -1496,6 +1494,12 @@ static int __init amd_iommu_init(void)
+ if (ret)
+ goto free_disable;
+
++ /* init the device table */
++ init_device_table();
++
++ for_each_iommu(iommu)
++ iommu_flush_all_caches(iommu);
++
+ amd_iommu_init_api();
+
+ amd_iommu_init_notifier();
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 58d8c6d..aa142f9 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1262,20 +1262,6 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
+ return 0;
+ }
+
+-/*
+- * Encode key into its hex representation
+- */
+-static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < size; i++) {
+- sprintf(hex, "%02x", *key);
+- hex += 2;
+- key++;
+- }
+-}
+-
+ static void crypt_free_tfms(struct crypt_config *cc, int cpu)
+ {
+ struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+@@ -1739,11 +1725,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-static int crypt_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void crypt_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct crypt_config *cc = ti->private;
+- unsigned int sz = 0;
++ unsigned i, sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+@@ -1753,17 +1739,11 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s ", cc->cipher_string);
+
+- if (cc->key_size > 0) {
+- if ((maxlen - sz) < ((cc->key_size << 1) + 1))
+- return -ENOMEM;
+-
+- crypt_encode_key(result + sz, cc->key, cc->key_size);
+- sz += cc->key_size << 1;
+- } else {
+- if (sz >= maxlen)
+- return -ENOMEM;
+- result[sz++] = '-';
+- }
++ if (cc->key_size > 0)
++ for (i = 0; i < cc->key_size; i++)
++ DMEMIT("%02x", cc->key[i]);
++ else
++ DMEMIT("-");
+
+ DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
+ cc->dev->name, (unsigned long long)cc->start);
+@@ -1773,7 +1753,6 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
+
+ break;
+ }
+- return 0;
+ }
+
+ static void crypt_postsuspend(struct dm_target *ti)
+@@ -1867,7 +1846,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
+
+ static struct target_type crypt_target = {
+ .name = "crypt",
+- .version = {1, 11, 0},
++ .version = {1, 11, 1},
+ .module = THIS_MODULE,
+ .ctr = crypt_ctr,
+ .dtr = crypt_dtr,
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index f18375d..11431ac 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
+ return delay_bio(dc, dc->read_delay, bio);
+ }
+
+-static int delay_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void delay_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct delay_c *dc = ti->private;
+ int sz = 0;
+@@ -314,8 +314,6 @@ static int delay_status(struct dm_target *ti, status_type_t type,
+ dc->write_delay);
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int delay_iterate_devices(struct dm_target *ti,
+@@ -337,7 +335,7 @@ out:
+
+ static struct target_type delay_target = {
+ .name = "delay",
+- .version = {1, 1, 0},
++ .version = {1, 1, 1},
+ .module = THIS_MODULE,
+ .ctr = delay_ctr,
+ .dtr = delay_dtr,
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index b280c43..746b5e8 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -331,8 +331,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ return error;
+ }
+
+-static int flakey_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void flakey_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ unsigned sz = 0;
+ struct flakey_c *fc = ti->private;
+@@ -362,7 +362,6 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
+
+ break;
+ }
+- return 0;
+ }
+
+ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+@@ -405,7 +404,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
+
+ static struct target_type flakey_target = {
+ .name = "flakey",
+- .version = {1, 2, 0},
++ .version = {1, 2, 1},
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 42c873f..e6a300c 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1065,6 +1065,7 @@ static void retrieve_status(struct dm_table *table,
+ num_targets = dm_table_get_num_targets(table);
+ for (i = 0; i < num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(table, i);
++ size_t l;
+
+ remaining = len - (outptr - outbuf);
+ if (remaining <= sizeof(struct dm_target_spec)) {
+@@ -1089,14 +1090,17 @@ static void retrieve_status(struct dm_table *table,
+
+ /* Get the status/table string from the target driver */
+ if (ti->type->status) {
+- if (ti->type->status(ti, type, outptr, remaining)) {
+- param->flags |= DM_BUFFER_FULL_FLAG;
+- break;
+- }
++ ti->type->status(ti, type, outptr, remaining);
+ } else
+ outptr[0] = '\0';
+
+- outptr += strlen(outptr) + 1;
++ l = strlen(outptr) + 1;
++ if (l == remaining) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ break;
++ }
++
++ outptr += l;
+ used = param->data_start + (outptr - outbuf);
+
+ outptr = align_ptr(outptr);
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 9728839..c55d8e4 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -94,8 +94,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio,
+ return DM_MAPIO_REMAPPED;
+ }
+
+-static int linear_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void linear_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct linear_c *lc = (struct linear_c *) ti->private;
+
+@@ -109,7 +109,6 @@ static int linear_status(struct dm_target *ti, status_type_t type,
+ (unsigned long long)lc->start);
+ break;
+ }
+- return 0;
+ }
+
+ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
+@@ -154,7 +153,7 @@ static int linear_iterate_devices(struct dm_target *ti,
+
+ static struct target_type linear_target = {
+ .name = "linear",
+- .version = {1, 1, 0},
++ .version = {1, 1, 1},
+ .module = THIS_MODULE,
+ .ctr = linear_ctr,
+ .dtr = linear_dtr,
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index a417f94..7e766f9 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1323,8 +1323,8 @@ static void multipath_resume(struct dm_target *ti)
+ * [priority selector-name num_ps_args [ps_args]*
+ * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
+ */
+-static int multipath_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void multipath_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ int sz = 0;
+ unsigned long flags;
+@@ -1427,8 +1427,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
+ }
+
+ spin_unlock_irqrestore(&m->lock, flags);
+-
+- return 0;
+ }
+
+ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
+@@ -1623,7 +1621,7 @@ out:
+ *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+ .name = "multipath",
+- .version = {1, 3, 0},
++ .version = {1, 3, 1},
+ .module = THIS_MODULE,
+ .ctr = multipath_ctr,
+ .dtr = multipath_dtr,
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index d2a3223..86862ea 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1017,8 +1017,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_c
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-static int raid_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void raid_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct raid_set *rs = ti->private;
+ unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
+@@ -1153,8 +1153,6 @@ static int raid_status(struct dm_target *ti, status_type_t type,
+ DMEMIT(" -");
+ }
+ }
+-
+- return 0;
+ }
+
+ static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+@@ -1208,7 +1206,7 @@ static void raid_resume(struct dm_target *ti)
+
+ static struct target_type raid_target = {
+ .name = "raid",
+- .version = {1, 1, 0},
++ .version = {1, 1, 1},
+ .module = THIS_MODULE,
+ .ctr = raid_ctr,
+ .dtr = raid_dtr,
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index dae2b7a..b7b649d 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1358,8 +1358,8 @@ static char device_status_char(struct mirror *m)
+ }
+
+
+-static int mirror_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void mirror_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ unsigned int m, sz = 0;
+ struct mirror_set *ms = (struct mirror_set *) ti->private;
+@@ -1394,8 +1394,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
+ if (ms->features & DM_RAID1_HANDLE_ERRORS)
+ DMEMIT(" 1 handle_errors");
+ }
+-
+- return 0;
+ }
+
+ static int mirror_iterate_devices(struct dm_target *ti,
+@@ -1414,7 +1412,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
+
+ static struct target_type mirror_target = {
+ .name = "mirror",
+- .version = {1, 12, 1},
++ .version = {1, 12, 2},
+ .module = THIS_MODULE,
+ .ctr = mirror_ctr,
+ .dtr = mirror_dtr,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 6f75887..34ec2b5 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1845,8 +1845,8 @@ static void snapshot_merge_resume(struct dm_target *ti)
+ start_merge(s);
+ }
+
+-static int snapshot_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void snapshot_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ unsigned sz = 0;
+ struct dm_snapshot *snap = ti->private;
+@@ -1892,8 +1892,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
+ maxlen - sz);
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int snapshot_iterate_devices(struct dm_target *ti,
+@@ -2148,8 +2146,8 @@ static void origin_resume(struct dm_target *ti)
+ ti->split_io = get_origin_minimum_chunksize(dev->bdev);
+ }
+
+-static int origin_status(struct dm_target *ti, status_type_t type, char *result,
+- unsigned int maxlen)
++static void origin_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct dm_dev *dev = ti->private;
+
+@@ -2162,8 +2160,6 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
+ snprintf(result, maxlen, "%s", dev->name);
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+@@ -2191,7 +2187,7 @@ static int origin_iterate_devices(struct dm_target *ti,
+
+ static struct target_type origin_target = {
+ .name = "snapshot-origin",
+- .version = {1, 7, 1},
++ .version = {1, 7, 2},
+ .module = THIS_MODULE,
+ .ctr = origin_ctr,
+ .dtr = origin_dtr,
+@@ -2204,7 +2200,7 @@ static struct target_type origin_target = {
+
+ static struct target_type snapshot_target = {
+ .name = "snapshot",
+- .version = {1, 10, 0},
++ .version = {1, 10, 1},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+@@ -2327,3 +2323,5 @@ module_exit(dm_snapshot_exit);
+ MODULE_DESCRIPTION(DM_NAME " snapshot target");
+ MODULE_AUTHOR("Joe Thornber");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("dm-snapshot-origin");
++MODULE_ALIAS("dm-snapshot-merge");
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index 3d80cf0..cbd41d2 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -301,8 +301,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
+ *
+ */
+
+-static int stripe_status(struct dm_target *ti,
+- status_type_t type, char *result, unsigned int maxlen)
++static void stripe_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct stripe_c *sc = (struct stripe_c *) ti->private;
+ char buffer[sc->stripes + 1];
+@@ -329,7 +329,6 @@ static int stripe_status(struct dm_target *ti,
+ (unsigned long long)sc->stripe[i].physical_start);
+ break;
+ }
+- return 0;
+ }
+
+ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+@@ -418,7 +417,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+
+ static struct target_type stripe_target = {
+ .name = "striped",
+- .version = {1, 4, 0},
++ .version = {1, 4, 1},
+ .module = THIS_MODULE,
+ .ctr = stripe_ctr,
+ .dtr = stripe_dtr,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d432032..da4d299 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2090,8 +2090,8 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
+ * <transaction id> <used metadata sectors>/<total metadata sectors>
+ * <used data sectors>/<total data sectors> <held metadata root>
+ */
+-static int pool_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void pool_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ int r;
+ unsigned sz = 0;
+@@ -2108,32 +2108,41 @@ static int pool_status(struct dm_target *ti, status_type_t type,
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+- r = dm_pool_get_metadata_transaction_id(pool->pmd,
+- &transaction_id);
+- if (r)
+- return r;
++ r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
++ if (r) {
++ DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
++ goto err;
++ }
+
+- r = dm_pool_get_free_metadata_block_count(pool->pmd,
+- &nr_free_blocks_metadata);
+- if (r)
+- return r;
++ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
++ if (r) {
++ DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
++ goto err;
++ }
+
+ r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_pool_get_metadata_dev_size returned %d", r);
++ goto err;
++ }
+
+- r = dm_pool_get_free_block_count(pool->pmd,
+- &nr_free_blocks_data);
+- if (r)
+- return r;
++ r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
++ if (r) {
++ DMERR("dm_pool_get_free_block_count returned %d", r);
++ goto err;
++ }
+
+ r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_pool_get_data_dev_size returned %d", r);
++ goto err;
++ }
+
+ r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_pool_get_held_metadata_root returned %d", r);
++ goto err;
++ }
+
+ DMEMIT("%llu %llu/%llu %llu/%llu ",
+ (unsigned long long)transaction_id,
+@@ -2162,8 +2171,10 @@ static int pool_status(struct dm_target *ti, status_type_t type,
+ DMEMIT("skip_block_zeroing ");
+ break;
+ }
++ return;
+
+- return 0;
++err:
++ DMEMIT("Error");
+ }
+
+ static int pool_iterate_devices(struct dm_target *ti,
+@@ -2201,7 +2212,7 @@ static struct target_type pool_target = {
+ .name = "thin-pool",
+ .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
+ DM_TARGET_IMMUTABLE,
+- .version = {1, 0, 0},
++ .version = {1, 0, 1},
+ .module = THIS_MODULE,
+ .ctr = pool_ctr,
+ .dtr = pool_dtr,
+@@ -2339,8 +2350,8 @@ static void thin_postsuspend(struct dm_target *ti)
+ /*
+ * <nr mapped sectors> <highest mapped sector>
+ */
+-static int thin_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void thin_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ int r;
+ ssize_t sz = 0;
+@@ -2354,12 +2365,16 @@ static int thin_status(struct dm_target *ti, status_type_t type,
+ switch (type) {
+ case STATUSTYPE_INFO:
+ r = dm_thin_get_mapped_count(tc->td, &mapped);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_thin_get_mapped_count returned %d", r);
++ goto err;
++ }
+
+ r = dm_thin_get_highest_mapped_block(tc->td, &highest);
+- if (r < 0)
+- return r;
++ if (r < 0) {
++ DMERR("dm_thin_get_highest_mapped_block returned %d", r);
++ goto err;
++ }
+
+ DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
+ if (r)
+@@ -2377,7 +2392,10 @@ static int thin_status(struct dm_target *ti, status_type_t type,
+ }
+ }
+
+- return 0;
++ return;
++
++err:
++ DMEMIT("Error");
+ }
+
+ static int thin_iterate_devices(struct dm_target *ti,
+@@ -2410,7 +2428,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+
+ static struct target_type thin_target = {
+ .name = "thin",
+- .version = {1, 0, 0},
++ .version = {1, 0, 1},
+ .module = THIS_MODULE,
+ .ctr = thin_ctr,
+ .dtr = thin_dtr,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 145e378e..1702133 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -345,6 +345,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ bio_io_error(bio);
+ return;
+ }
++ if (mddev->ro == 1 && unlikely(rw == WRITE)) {
++ bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
++ return;
++ }
+ smp_rmb(); /* Ensure implications of 'active' are visible */
+ rcu_read_lock();
+ if (mddev->suspended) {
+@@ -2838,6 +2842,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
+ } else if (!sectors)
+ sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
+ rdev->data_offset;
++ if (!my_mddev->pers->resize)
++ /* Cannot change size for RAID0 or Linear etc */
++ return -EINVAL;
+ }
+ if (sectors < my_mddev->dev_sectors)
+ return -EINVAL; /* component must fit device */
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 7294bd1..d3e6f35 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -286,7 +286,7 @@ abort:
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
+- *private_conf = NULL;
++ *private_conf = ERR_PTR(err);
+ return err;
+ }
+
+@@ -330,7 +330,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
+ "%s does not support generic reshape\n", __func__);
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+- array_sectors += rdev->sectors;
++ array_sectors += (rdev->sectors &
++ ~(sector_t)(mddev->chunk_sectors-1));
+
+ return array_sectors;
+ }
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 0182649..a783530 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5402,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ */
+ e1000e_release_hw_control(adapter);
+
+- pci_disable_device(pdev);
++ pci_clear_master(pdev);
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
+index ad14fec..f1c32a5 100644
+--- a/drivers/net/wireless/ath/ath9k/common.h
++++ b/drivers/net/wireless/ath/ath9k/common.h
+@@ -35,7 +35,7 @@
+ #define WME_AC_BK 3
+ #define WME_NUM_AC 4
+
+-#define ATH_RSSI_DUMMY_MARKER 0x127
++#define ATH_RSSI_DUMMY_MARKER 127
+ #define ATH_RSSI_LPF_LEN 10
+ #define RSSI_LPF_THRESHOLD -20
+ #define ATH_RSSI_EP_MULTIPLIER (1<<7)
+diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
+index da55967..09be407 100644
+--- a/drivers/net/wireless/ath/ath9k/htc.h
++++ b/drivers/net/wireless/ath/ath9k/htc.h
+@@ -22,6 +22,7 @@
+ #include <linux/firmware.h>
+ #include <linux/skbuff.h>
+ #include <linux/netdevice.h>
++#include <linux/etherdevice.h>
+ #include <linux/leds.h>
+ #include <linux/slab.h>
+ #include <net/mac80211.h>
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index 2d81c70..a48bb83 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -1069,15 +1069,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+
+ last_rssi = priv->rx.last_rssi;
+
+- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+- rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+- ATH_RSSI_EP_MULTIPLIER);
++ if (ieee80211_is_beacon(hdr->frame_control) &&
++ !is_zero_ether_addr(common->curbssid) &&
++ compare_ether_addr(hdr->addr3, common->curbssid) == 0) {
++ s8 rssi = rxbuf->rxstatus.rs_rssi;
+
+- if (rxbuf->rxstatus.rs_rssi < 0)
+- rxbuf->rxstatus.rs_rssi = 0;
++ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
++ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+
+- if (ieee80211_is_beacon(fc))
+- priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
++ if (rssi < 0)
++ rssi = 0;
++
++ priv->ah->stats.avgbrssi = rssi;
++ }
+
+ rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
+ rx_status->band = hw->conf.channel->band;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+index 8533ba2..f081d53 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+@@ -180,6 +180,15 @@ struct iwl_queue {
+ #define TFD_TX_CMD_SLOTS 256
+ #define TFD_CMD_SLOTS 32
+
++/*
++ * The FH will write back to the first TB only, so we need
++ * to copy some data into the buffer regardless of whether
++ * it should be mapped or not. This indicates how much to
++ * copy, even for HCMDs it must be big enough to fit the
++ * DRAM scratch from the TX cmd, at least 16 bytes.
++ */
++#define IWL_HCMD_MIN_COPY_SIZE 16
++
+ struct iwl_tx_queue {
+ struct iwl_queue q;
+ struct iwl_tfd *tfds;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+index 4a0c953..e6b3853 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+@@ -688,11 +688,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ u32 idx;
+- u16 copy_size, cmd_size;
++ u16 copy_size, cmd_size, dma_size;
+ bool is_ct_kill = false;
+ bool had_nocopy = false;
+ int i;
+ u8 *cmd_dest;
++ const u8 *cmddata[IWL_MAX_CMD_TFDS];
++ u16 cmdlen[IWL_MAX_CMD_TFDS];
+ #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
+ int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
+@@ -717,15 +719,30 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
++ cmddata[i] = cmd->data[i];
++ cmdlen[i] = cmd->len[i];
++
+ if (!cmd->len[i])
+ continue;
++
++ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
++ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
++ int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
++
++ if (copy > cmdlen[i])
++ copy = cmdlen[i];
++ cmdlen[i] -= copy;
++ cmddata[i] += copy;
++ copy_size += copy;
++ }
++
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy))
+ return -EINVAL;
+- copy_size += cmd->len[i];
++ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+@@ -778,13 +795,30 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ /* and copy the data that needs to be copied */
+
+ cmd_dest = out_cmd->payload;
++ copy_size = sizeof(out_cmd->hdr);
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+- if (!cmd->len[i])
++ int copy = 0;
++
++ if (!cmd->len)
+ continue;
+- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+- break;
+- memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
+- cmd_dest += cmd->len[i];
++
++ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
++ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
++ copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
++
++ if (copy > cmd->len[i])
++ copy = cmd->len[i];
++ }
++
++ /* copy everything if not nocopy/dup */
++ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
++ copy = cmd->len[i];
++
++ if (copy) {
++ memcpy(cmd_dest, cmd->data[i], copy);
++ cmd_dest += copy;
++ copy_size += copy;
++ }
+ }
+
+ IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
+@@ -794,7 +828,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
+ q->write_ptr, idx, trans->shrd->cmd_queue);
+
+- phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
++ /*
++ * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
++ * still map at least that many bytes for the hardware to write back to.
++ * We have enough space, so that's not a problem.
++ */
++ dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
++
++ phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, dma_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
+ idx = -ENOMEM;
+@@ -802,7 +843,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ }
+
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+- dma_unmap_len_set(out_meta, len, copy_size);
++ dma_unmap_len_set(out_meta, len, dma_size);
+
+ iwlagn_txq_attach_buf_to_tfd(trans, txq,
+ phys_addr, copy_size, 1);
+@@ -812,14 +853,15 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ trace_idx = 1;
+ #endif
+
++ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+- if (!cmd->len[i])
++ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ continue;
+ phys_addr = dma_map_single(bus(trans)->dev,
+- (void *)cmd->data[i],
+- cmd->len[i], DMA_BIDIRECTIONAL);
++ (void *)cmddata[i],
++ cmdlen[i], DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
+ iwlagn_unmap_tfd(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+@@ -829,10 +871,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ }
+
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+- cmd->len[i], 0);
++ cmdlen[i], 0);
+ #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+- trace_bufs[trace_idx] = cmd->data[i];
+- trace_lens[trace_idx] = cmd->len[i];
++ trace_bufs[trace_idx] = cmddata[i];
++ trace_lens[trace_idx] = cmdlen[i];
+ trace_idx++;
+ #endif
+ }
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index de94244..3cf4ecc 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -290,7 +290,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
+ i++;
+ udelay(10);
+ /* 50ms max wait */
+- if (i == 50000)
++ if (i == 5000)
+ break;
+ }
+
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index fc35308..9b9843e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1707,7 +1707,6 @@ static void netback_changed(struct xenbus_device *dev,
+ case XenbusStateInitialised:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+- case XenbusStateConnected:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+@@ -1718,6 +1717,9 @@ static void netback_changed(struct xenbus_device *dev,
+ if (xennet_connect(netdev) != 0)
+ break;
+ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
+ netif_notify_peers(netdev);
+ break;
+
+diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
+index f5b718d..aed7756 100644
+--- a/drivers/scsi/dc395x.c
++++ b/drivers/scsi/dc395x.c
+@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
+ dcb->max_command = 1;
+ dcb->target_id = target;
+ dcb->target_lun = lun;
++ dcb->dev_mode = eeprom->target[target].cfg0;
+ #ifndef DC395x_NO_DISCONNECT
+ dcb->identify_msg =
+ IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
+ #else
+ dcb->identify_msg = IDENTIFY(0, lun);
+ #endif
+- dcb->dev_mode = eeprom->target[target].cfg0;
+ dcb->inquiry7 = 0;
+ dcb->sync_mode = 0;
+ dcb->min_nego_period = clock_period[period_index];
+diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
+index abc5ac5..8960f1a 100644
+--- a/drivers/staging/hv/storvsc_drv.c
++++ b/drivers/staging/hv/storvsc_drv.c
+@@ -815,6 +815,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ if (!bounce_sgl)
+ return NULL;
+
++ sg_init_table(bounce_sgl, num_pages);
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index ae62d57..754d54e 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -718,8 +718,6 @@ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
+ if (device->flags & DEVICE_FLAGS_OPENED)
+ device_close(device->dev);
+
+- usb_put_dev(interface_to_usbdev(intf));
+-
+ return 0;
+ }
+
+@@ -730,8 +728,6 @@ static int vt6656_resume(struct usb_interface *intf)
+ if (!device || !device->dev)
+ return -ENODEV;
+
+- usb_get_dev(interface_to_usbdev(intf));
+-
+ if (!(device->flags & DEVICE_FLAGS_OPENED))
+ device_open(device->dev);
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 6748568..cff03e5 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -322,6 +322,27 @@ static const struct serial8250_config uart_config[] = {
+ .tx_loadsz = 1024,
+ .flags = UART_CAP_HFIFO,
+ },
++ [PORT_ALTR_16550_F32] = {
++ .name = "Altera 16550 FIFO32",
++ .fifo_size = 32,
++ .tx_loadsz = 32,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
++ [PORT_ALTR_16550_F64] = {
++ .name = "Altera 16550 FIFO64",
++ .fifo_size = 64,
++ .tx_loadsz = 64,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
++ [PORT_ALTR_16550_F128] = {
++ .name = "Altera 16550 FIFO128",
++ .fifo_size = 128,
++ .tx_loadsz = 128,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
+ };
+
+ #if defined(CONFIG_MIPS_ALCHEMY)
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index a753956..6986256 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1154,6 +1154,7 @@ pci_xr17c154_setup(struct serial_private *priv,
+
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
++#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
+
+ /*
+ * Master list of serial port init/setup/exit quirks.
+@@ -1418,15 +1419,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+- .device = PCI_DEVICE_ID_PLX_9050,
+- .subvendor = PCI_VENDOR_ID_PLX,
+- .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584,
+- .init = pci_plx9050_init,
+- .setup = pci_default_setup,
+- .exit = __devexit_p(pci_plx9050_exit),
+- },
+- {
+- .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_ROMULUS,
+ .subvendor = PCI_VENDOR_ID_PLX,
+ .subdevice = PCI_DEVICE_ID_PLX_ROMULUS,
+@@ -3120,7 +3112,12 @@ static struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_PLX,
+ PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0,
+- pbn_b0_4_115200 },
++ pbn_b2_4_115200 },
++ /* Unknown card - subdevice 0x1588 */
++ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
++ PCI_VENDOR_ID_PLX,
++ PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0,
++ pbn_b2_8_115200 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_KEYSPAN,
+ PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0,
+@@ -4086,6 +4083,10 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_VENDOR_ID_IBM, 0x0299,
+ 0, 0, pbn_b0_bt_2_115200 },
+
++ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
++ 0x1000, 0x0012,
++ 0, 0, pbn_b0_bt_2_115200 },
++
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 925a1e5..3771277 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -464,7 +464,7 @@ config SERIAL_SAMSUNG_UARTS_4
+ config SERIAL_SAMSUNG_UARTS
+ int
+ depends on ARM && PLAT_SAMSUNG
+- default 6 if ARCH_S5P6450
++ default 6 if CPU_S5P6450
+ default 4 if SERIAL_SAMSUNG_UARTS_4
+ default 3
+ help
+diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
+index e8c9cee..6563cad 100644
+--- a/drivers/tty/serial/of_serial.c
++++ b/drivers/tty/serial/of_serial.c
+@@ -182,6 +182,12 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
+ { .compatible = "ns16750", .data = (void *)PORT_16750, },
+ { .compatible = "ns16850", .data = (void *)PORT_16850, },
+ { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
++ { .compatible = "altr,16550-FIFO32",
++ .data = (void *)PORT_ALTR_16550_F32, },
++ { .compatible = "altr,16550-FIFO64",
++ .data = (void *)PORT_ALTR_16550_F64, },
++ { .compatible = "altr,16550-FIFO128",
++ .data = (void *)PORT_ALTR_16550_F128, },
+ #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+ { .compatible = "ibm,qpace-nwp-serial",
+ .data = (void *)PORT_NWPSERIAL, },
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 6c9b7cd..4f02f9c 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -114,11 +114,14 @@ static void __tty_buffer_flush(struct tty_struct *tty)
+ {
+ struct tty_buffer *thead;
+
+- while ((thead = tty->buf.head) != NULL) {
+- tty->buf.head = thead->next;
+- tty_buffer_free(tty, thead);
++ if (tty->buf.head == NULL)
++ return;
++ while ((thead = tty->buf.head->next) != NULL) {
++ tty_buffer_free(tty, tty->buf.head);
++ tty->buf.head = thead;
+ }
+- tty->buf.tail = NULL;
++ WARN_ON(tty->buf.head != tty->buf.tail);
++ tty->buf.head->read = tty->buf.head->commit;
+ }
+
+ /**
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 97b2c55..fe8c04b 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -70,6 +70,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+ #define WDM_POLL_RUNNING 6
+ #define WDM_RESPONDING 7
+ #define WDM_SUSPENDING 8
++#define WDM_OVERFLOW 10
+
+ #define WDM_MAX 16
+
+@@ -134,6 +135,7 @@ static void wdm_in_callback(struct urb *urb)
+ {
+ struct wdm_device *desc = urb->context;
+ int status = urb->status;
++ int length = urb->actual_length;
+
+ spin_lock(&desc->iuspin);
+ clear_bit(WDM_RESPONDING, &desc->flags);
+@@ -164,9 +166,17 @@ static void wdm_in_callback(struct urb *urb)
+ }
+
+ desc->rerr = status;
+- desc->reslength = urb->actual_length;
+- memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength);
+- desc->length += desc->reslength;
++ if (length + desc->length > desc->wMaxCommand) {
++ /* The buffer would overflow */
++ set_bit(WDM_OVERFLOW, &desc->flags);
++ } else {
++ /* we may already be in overflow */
++ if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
++ memmove(desc->ubuf + desc->length, desc->inbuf, length);
++ desc->length += length;
++ desc->reslength = length;
++ }
++ }
+ skip_error:
+ wake_up(&desc->wait);
+
+@@ -433,6 +443,11 @@ retry:
+ rv = -ENODEV;
+ goto err;
+ }
++ if (test_bit(WDM_OVERFLOW, &desc->flags)) {
++ clear_bit(WDM_OVERFLOW, &desc->flags);
++ rv = -ENOBUFS;
++ goto err;
++ }
+ i++;
+ if (file->f_flags & O_NONBLOCK) {
+ if (!test_bit(WDM_READ, &desc->flags)) {
+@@ -472,6 +487,7 @@ retry:
+ spin_unlock_irq(&desc->iuspin);
+ goto retry;
+ }
++
+ if (!desc->reslength) { /* zero length read */
+ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+ clear_bit(WDM_READ, &desc->flags);
+@@ -926,6 +942,7 @@ static int wdm_post_reset(struct usb_interface *intf)
+ struct wdm_device *desc = usb_get_intfdata(intf);
+ int rv;
+
++ clear_bit(WDM_OVERFLOW, &desc->flags);
+ rv = recover_from_urb_loss(desc);
+ mutex_unlock(&desc->wlock);
+ mutex_unlock(&desc->rlock);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2564d8d..22cbe06 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2148,70 +2148,35 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if ((portstatus & USB_PORT_STAT_RESET))
+ goto delay;
+
+- /*
+- * Some buggy devices require a warm reset to be issued even
+- * when the port appears not to be connected.
++ if (hub_port_warm_reset_required(hub, portstatus))
++ return -ENOTCONN;
++
++ /* Device went away? */
++ if (!(portstatus & USB_PORT_STAT_CONNECTION))
++ return -ENOTCONN;
++
++ /* bomb out completely if the connection bounced. A USB 3.0
++ * connection may bounce if multiple warm resets were issued,
++ * but the device may have successfully re-connected. Ignore it.
+ */
+- if (!warm) {
+- /*
+- * Some buggy devices can cause an NEC host controller
+- * to transition to the "Error" state after a hot port
+- * reset. This will show up as the port state in
+- * "Inactive", and the port may also report a
+- * disconnect. Forcing a warm port reset seems to make
+- * the device work.
+- *
+- * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
+- */
+- if (hub_port_warm_reset_required(hub, portstatus)) {
+- int ret;
+-
+- if ((portchange & USB_PORT_STAT_C_CONNECTION))
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_CONNECTION);
+- if (portchange & USB_PORT_STAT_C_LINK_STATE)
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_PORT_LINK_STATE);
+- if (portchange & USB_PORT_STAT_C_RESET)
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_RESET);
+- dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
+- port1);
+- ret = hub_port_reset(hub, port1,
+- udev, HUB_BH_RESET_TIME,
+- true);
+- if ((portchange & USB_PORT_STAT_C_CONNECTION))
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_CONNECTION);
+- return ret;
+- }
+- /* Device went away? */
+- if (!(portstatus & USB_PORT_STAT_CONNECTION))
+- return -ENOTCONN;
+-
+- /* bomb out completely if the connection bounced */
+- if ((portchange & USB_PORT_STAT_C_CONNECTION))
+- return -ENOTCONN;
+-
+- if ((portstatus & USB_PORT_STAT_ENABLE)) {
+- if (hub_is_wusb(hub))
+- udev->speed = USB_SPEED_WIRELESS;
+- else if (hub_is_superspeed(hub->hdev))
+- udev->speed = USB_SPEED_SUPER;
+- else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
+- udev->speed = USB_SPEED_HIGH;
+- else if (portstatus & USB_PORT_STAT_LOW_SPEED)
+- udev->speed = USB_SPEED_LOW;
+- else
+- udev->speed = USB_SPEED_FULL;
++ if (!hub_is_superspeed(hub->hdev) &&
++ (portchange & USB_PORT_STAT_C_CONNECTION))
++ return -ENOTCONN;
++
++ if ((portstatus & USB_PORT_STAT_ENABLE)) {
++ if (!udev)
+ return 0;
+- }
+- } else {
+- if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+- hub_port_warm_reset_required(hub,
+- portstatus))
+- return -ENOTCONN;
+
++ if (hub_is_wusb(hub))
++ udev->speed = USB_SPEED_WIRELESS;
++ else if (hub_is_superspeed(hub->hdev))
++ udev->speed = USB_SPEED_SUPER;
++ else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
++ udev->speed = USB_SPEED_HIGH;
++ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
++ udev->speed = USB_SPEED_LOW;
++ else
++ udev->speed = USB_SPEED_FULL;
+ return 0;
+ }
+
+@@ -2229,16 +2194,16 @@ delay:
+ }
+
+ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+- struct usb_device *udev, int *status, bool warm)
++ struct usb_device *udev, int *status)
+ {
+ switch (*status) {
+ case 0:
+- if (!warm) {
+- struct usb_hcd *hcd;
+- /* TRSTRCY = 10 ms; plus some extra */
+- msleep(10 + 40);
++ /* TRSTRCY = 10 ms; plus some extra */
++ msleep(10 + 40);
++ if (udev) {
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
+ update_devnum(udev, 0);
+- hcd = bus_to_hcd(udev->bus);
+ /* The xHC may think the device is already reset,
+ * so ignore the status.
+ */
+@@ -2250,14 +2215,15 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ case -ENODEV:
+ clear_port_feature(hub->hdev,
+ port1, USB_PORT_FEAT_C_RESET);
+- /* FIXME need disconnect() for NOTATTACHED device */
+ if (hub_is_superspeed(hub->hdev)) {
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
++ clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
+ }
+- if (!warm)
++ if (udev)
+ usb_set_device_state(udev, *status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+@@ -2270,18 +2236,30 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm)
+ {
+ int i, status;
++ u16 portchange, portstatus;
+
+- if (!warm) {
+- /* Block EHCI CF initialization during the port reset.
+- * Some companion controllers don't like it when they mix.
+- */
+- down_read(&ehci_cf_port_reset_rwsem);
+- } else {
+- if (!hub_is_superspeed(hub->hdev)) {
++ if (!hub_is_superspeed(hub->hdev)) {
++ if (warm) {
+ dev_err(hub->intfdev, "only USB3 hub support "
+ "warm reset\n");
+ return -EINVAL;
+ }
++ /* Block EHCI CF initialization during the port reset.
++ * Some companion controllers don't like it when they mix.
++ */
++ down_read(&ehci_cf_port_reset_rwsem);
++ } else if (!warm) {
++ /*
++ * If the caller hasn't explicitly requested a warm reset,
++ * double check and see if one is needed.
++ */
++ status = hub_port_status(hub, port1,
++ &portstatus, &portchange);
++ if (status < 0)
++ goto done;
++
++ if (hub_port_warm_reset_required(hub, portstatus))
++ warm = true;
+ }
+
+ /* Reset the port */
+@@ -2302,10 +2280,33 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ status);
+ }
+
+- /* return on disconnect or reset */
++ /* Check for disconnect or reset */
+ if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+- hub_port_finish_reset(hub, port1, udev, &status, warm);
+- goto done;
++ hub_port_finish_reset(hub, port1, udev, &status);
++
++ if (!hub_is_superspeed(hub->hdev))
++ goto done;
++
++ /*
++ * If a USB 3.0 device migrates from reset to an error
++ * state, re-issue the warm reset.
++ */
++ if (hub_port_status(hub, port1,
++ &portstatus, &portchange) < 0)
++ goto done;
++
++ if (!hub_port_warm_reset_required(hub, portstatus))
++ goto done;
++
++ /*
++ * If the port is in SS.Inactive or Compliance Mode, the
++ * hot or warm reset failed. Try another warm reset.
++ */
++ if (!warm) {
++ dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
++ port1);
++ warm = true;
++ }
+ }
+
+ dev_dbg (hub->intfdev,
+@@ -2319,7 +2320,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ port1);
+
+ done:
+- if (!warm)
++ if (!hub_is_superspeed(hub->hdev))
+ up_read(&ehci_cf_port_reset_rwsem);
+
+ return status;
+@@ -3735,12 +3736,21 @@ static void hub_events(void)
+ */
+ if (hub_port_warm_reset_required(hub, portstatus)) {
+ int status;
++ struct usb_device *udev =
++ hub->hdev->children[i - 1];
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- status = hub_port_reset(hub, i, NULL,
+- HUB_BH_RESET_TIME, true);
+- if (status < 0)
+- hub_port_disable(hub, i, 1);
++ if (!udev) {
++ status = hub_port_reset(hub, i,
++ NULL, HUB_BH_RESET_TIME,
++ true);
++ if (status < 0)
++ hub_port_disable(hub, i, 1);
++ } else {
++ usb_lock_device(udev);
++ status = usb_reset_device(udev);
++ usb_unlock_device(udev);
++ }
+ connect_change = 0;
+ }
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 381d00d..913a178 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -91,6 +91,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
+ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
++ { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
+ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+@@ -156,6 +157,25 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
++ { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
++ { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */
++ { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */
++ { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */
++ { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */
++ { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */
++ { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */
++ { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */
++ { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */
++ { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */
++ { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */
++ { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
++ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
++ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 24a3ea6..4418538 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -341,6 +341,8 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_EU3_E 0x0051
+ #define CINTERION_PRODUCT_EU3_P 0x0052
+ #define CINTERION_PRODUCT_PH8 0x0053
++#define CINTERION_PRODUCT_AH6 0x0055
++#define CINTERION_PRODUCT_PLS8 0x0060
+
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+@@ -579,6 +581,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+@@ -1260,6 +1263,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index 87271e3..153d719 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -69,6 +69,7 @@ static struct usb_device_id id_table[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
+ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
+ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) },
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 7ab9046..105d900 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
+ return 0;
+ }
+
+-/* This places the HUAWEI usb dongles in multi-port mode */
+-static int usb_stor_huawei_feature_init(struct us_data *us)
++/* This places the HUAWEI E220 devices in multi-port mode */
++int usb_stor_huawei_e220_init(struct us_data *us)
+ {
+ int result;
+
+@@ -104,75 +104,3 @@ static int usb_stor_huawei_feature_init(struct us_data *us)
+ US_DEBUGP("Huawei mode set result is %d\n", result);
+ return 0;
+ }
+-
+-/*
+- * It will send a scsi switch command called rewind' to huawei dongle.
+- * When the dongle receives this command at the first time,
+- * it will reboot immediately. After rebooted, it will ignore this command.
+- * So it is unnecessary to read its response.
+- */
+-static int usb_stor_huawei_scsi_init(struct us_data *us)
+-{
+- int result = 0;
+- int act_len = 0;
+- struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
+- char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
+- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+-
+- bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+- bcbw->Tag = 0;
+- bcbw->DataTransferLength = 0;
+- bcbw->Flags = bcbw->Lun = 0;
+- bcbw->Length = sizeof(rewind_cmd);
+- memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
+- memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
+-
+- result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
+- US_BULK_CB_WRAP_LEN, &act_len);
+- US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
+- return result;
+-}
+-
+-/*
+- * It tries to find the supported Huawei USB dongles.
+- * In Huawei, they assign the following product IDs
+- * for all of their mobile broadband dongles,
+- * including the new dongles in the future.
+- * So if the product ID is not included in this list,
+- * it means it is not Huawei's mobile broadband dongles.
+- */
+-static int usb_stor_huawei_dongles_pid(struct us_data *us)
+-{
+- struct usb_interface_descriptor *idesc;
+- int idProduct;
+-
+- idesc = &us->pusb_intf->cur_altsetting->desc;
+- idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+- /* The first port is CDROM,
+- * means the dongle in the single port mode,
+- * and a switch command is required to be sent. */
+- if (idesc && idesc->bInterfaceNumber == 0) {
+- if ((idProduct == 0x1001)
+- || (idProduct == 0x1003)
+- || (idProduct == 0x1004)
+- || (idProduct >= 0x1401 && idProduct <= 0x1500)
+- || (idProduct >= 0x1505 && idProduct <= 0x1600)
+- || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
+- return 1;
+- }
+- }
+- return 0;
+-}
+-
+-int usb_stor_huawei_init(struct us_data *us)
+-{
+- int result = 0;
+-
+- if (usb_stor_huawei_dongles_pid(us)) {
+- if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+- result = usb_stor_huawei_scsi_init(us);
+- else
+- result = usb_stor_huawei_feature_init(us);
+- }
+- return result;
+-}
+diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
+index 5376d4f..529327f 100644
+--- a/drivers/usb/storage/initializers.h
++++ b/drivers/usb/storage/initializers.h
+@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
+ * flash reader */
+ int usb_stor_ucr61s2b_init(struct us_data *us);
+
+-/* This places the HUAWEI usb dongles in multi-port mode */
+-int usb_stor_huawei_init(struct us_data *us);
++/* This places the HUAWEI E220 devices in multi-port mode */
++int usb_stor_huawei_e220_init(struct us_data *us);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 12640ef..fa8a1b2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1515,10 +1515,335 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
+ /* Reported by fangxiaozhi <huananhu@huawei.com>
+ * This brings the HUAWEI data card devices into multi-port mode
+ */
+-UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
++UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+
+ /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index c374978..5149d4e 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -918,7 +918,8 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
+ tmp64 = (triplet_ret >> 2);
+ rn |= (tmp64 << i);
+
+- if (kthread_should_stop()) {
++ /* ensure we're called from kthread and not by netlink callback */
++ if (!dev->priv && kthread_should_stop()) {
+ dev_dbg(&dev->dev, "Abort w1_search\n");
+ return;
+ }
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index d07c4cd..e947e78 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -114,7 +114,8 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
+ if (dev->msi_enabled)
+ pci_disable_msi(dev);
+ #endif
+- pci_disable_device(dev);
++ if (pci_is_enabled(dev))
++ pci_disable_device(dev);
+
+ pci_write_config_word(dev, PCI_COMMAND, 0);
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 613edd8..833dddb 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1064,7 +1064,9 @@ void bd_set_size(struct block_device *bdev, loff_t size)
+ {
+ unsigned bsize = bdev_logical_block_size(bdev);
+
+- bdev->bd_inode->i_size = size;
++ mutex_lock(&bdev->bd_inode->i_mutex);
++ i_size_write(bdev->bd_inode, size);
++ mutex_unlock(&bdev->bd_inode->i_mutex);
+ while (bsize < PAGE_CACHE_SIZE) {
+ if (size & bsize)
+ break;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index f4b839f..9899205 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -543,6 +543,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+ new_device->writeable = 0;
+ new_device->in_fs_metadata = 0;
+ new_device->can_discard = 0;
++ spin_lock_init(&new_device->io_lock);
+ list_replace_rcu(&device->dev_list, &new_device->dev_list);
+
+ call_rcu(&device->rcu, free_device);
+@@ -576,6 +577,12 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+ __btrfs_close_devices(fs_devices);
+ free_fs_devices(fs_devices);
+ }
++ /*
++ * Wait for rcu kworkers under __btrfs_close_devices
++ * to finish all blkdev_puts so device is really
++ * free when umount is done.
++ */
++ rcu_barrier();
+ return ret;
+ }
+
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b1451af..b3a2a40 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -561,6 +561,11 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
+ dentry = ERR_PTR(-ENOENT);
+ break;
+ }
++ if (!S_ISDIR(dir->i_mode)) {
++ dput(dentry);
++ dentry = ERR_PTR(-ENOTDIR);
++ break;
++ }
+
+ /* skip separators */
+ while (*s == sep)
+diff --git a/fs/compat.c b/fs/compat.c
+index e07a3d3..4bf082d 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -572,6 +572,10 @@ ssize_t compat_rw_copy_check_uvector(int type,
+ }
+ *ret_pointer = iov;
+
++ ret = -EFAULT;
++ if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
++ goto out;
++
+ /*
+ * Single unix specification:
+ * We should -EINVAL if an element length is not >= 0 and fitting an
+@@ -1103,17 +1107,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
+ if (!file->f_op)
+ goto out;
+
+- ret = -EFAULT;
+- if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
+- goto out;
+-
+- tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
++ ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
+ UIO_FASTIOV, iovstack, &iov, 1);
+- if (tot_len == 0) {
+- ret = 0;
++ if (ret <= 0)
+ goto out;
+- }
+
++ tot_len = ret;
+ ret = rw_verify_area(type, file, pos, tot_len);
+ if (ret < 0)
+ goto out;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 922d289..b7f314f 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -374,7 +374,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
+ return bdev;
+
+ fail:
+- ext3_msg(sb, "error: failed to open journal device %s: %ld",
++ ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld",
+ __bdevname(dev, b), PTR_ERR(bdev));
+
+ return NULL;
+@@ -902,7 +902,7 @@ static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
+ /*todo: use simple_strtoll with >32bit ext3 */
+ sb_block = simple_strtoul(options, &options, 0);
+ if (*options && *options != ',') {
+- ext3_msg(sb, "error: invalid sb specification: %s",
++ ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s",
+ (char *) *data);
+ return 1;
+ }
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index a87a656..c25cf15 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
+ int charlen;
+
+ if (utf8) {
+- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
++ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
++ (wchar_t *) outname, FAT_LFN_LEN + 2);
+ if (*outlen < 0)
+ return *outlen;
+ else if (*outlen > FAT_LFN_LEN)
+diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
+index 4f9319a..98f1261 100644
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -366,20 +366,14 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
+ struct inode *old_dir = data->old_dir;
+ struct inode *new_dir = data->new_dir;
+ struct dentry *old_dentry = data->old_dentry;
+- struct dentry *new_dentry = data->new_dentry;
+
+ if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+
+- if (task->tk_status != 0) {
++ if (task->tk_status != 0)
+ nfs_cancel_async_unlink(old_dentry);
+- return;
+- }
+-
+- d_drop(old_dentry);
+- d_drop(new_dentry);
+ }
+
+ /**
+@@ -589,6 +583,18 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
+ error = rpc_wait_for_completion_task(task);
+ if (error == 0)
+ error = task->tk_status;
++ switch (error) {
++ case 0:
++ /* The rename succeeded */
++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
++ d_move(dentry, sdentry);
++ break;
++ case -ERESTARTSYS:
++ /* The result of the rename is unknown. Play it safe by
++ * forcing a new lookup */
++ d_drop(dentry);
++ d_drop(sdentry);
++ }
+ rpc_put_task(task);
+ out_dput:
+ dput(sdentry);
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index 44a88a9..0eb059e 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -114,34 +114,57 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
+ }
+ EXPORT_SYMBOL(utf32_to_utf8);
+
+-int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
++static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian)
++{
++ switch (endian) {
++ default:
++ *s = (wchar_t) c;
++ break;
++ case UTF16_LITTLE_ENDIAN:
++ *s = __cpu_to_le16(c);
++ break;
++ case UTF16_BIG_ENDIAN:
++ *s = __cpu_to_be16(c);
++ break;
++ }
++}
++
++int utf8s_to_utf16s(const u8 *s, int len, enum utf16_endian endian,
++ wchar_t *pwcs, int maxlen)
+ {
+ u16 *op;
+ int size;
+ unicode_t u;
+
+ op = pwcs;
+- while (*s && len > 0) {
++ while (len > 0 && maxlen > 0 && *s) {
+ if (*s & 0x80) {
+ size = utf8_to_utf32(s, len, &u);
+ if (size < 0)
+ return -EINVAL;
++ s += size;
++ len -= size;
+
+ if (u >= PLANE_SIZE) {
++ if (maxlen < 2)
++ break;
+ u -= PLANE_SIZE;
+- *op++ = (wchar_t) (SURROGATE_PAIR |
+- ((u >> 10) & SURROGATE_BITS));
+- *op++ = (wchar_t) (SURROGATE_PAIR |
++ put_utf16(op++, SURROGATE_PAIR |
++ ((u >> 10) & SURROGATE_BITS),
++ endian);
++ put_utf16(op++, SURROGATE_PAIR |
+ SURROGATE_LOW |
+- (u & SURROGATE_BITS));
++ (u & SURROGATE_BITS),
++ endian);
++ maxlen -= 2;
+ } else {
+- *op++ = (wchar_t) u;
++ put_utf16(op++, u, endian);
++ maxlen--;
+ }
+- s += size;
+- len -= size;
+ } else {
+- *op++ = *s++;
++ put_utf16(op++, *s++, endian);
+ len--;
++ maxlen--;
+ }
+ }
+ return op - pwcs;
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 05ed5ca..8ca88fc 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -859,6 +859,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
+ {
+ int ret = -ENOENT;
+
++ if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
++ return -EINVAL;
++
+ mutex_lock(&inode->i_mutex);
+
+ if (inode->i_pipe) {
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 98f34b8..6712ab6 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -72,8 +72,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
+ typedef int (*dm_preresume_fn) (struct dm_target *ti);
+ typedef void (*dm_resume_fn) (struct dm_target *ti);
+
+-typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+- char *result, unsigned int maxlen);
++typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
++ char *result, unsigned maxlen);
+
+ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 1721c41..ce95a4b 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -30,7 +30,12 @@
+ #define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
+ #define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+ #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
+ #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
+
+ typedef unsigned long efi_status_t;
+ typedef u8 efi_bool_t;
+@@ -470,6 +475,7 @@ struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
++ efi_query_variable_info_t *query_variable_info;
+ };
+
+ struct efivars {
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index d47beef..5dc635f 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -43,7 +43,7 @@ enum utf16_endian {
+ UTF16_BIG_ENDIAN
+ };
+
+-/* nls.c */
++/* nls_base.c */
+ extern int register_nls(struct nls_table *);
+ extern int unregister_nls(struct nls_table *);
+ extern struct nls_table *load_nls(char *);
+@@ -52,7 +52,8 @@ extern struct nls_table *load_nls_default(void);
+
+ extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+ extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+-extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
++extern int utf8s_to_utf16s(const u8 *s, int len,
++ enum utf16_endian endian, wchar_t *pwcs, int maxlen);
+ extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+ enum utf16_endian endian, u8 *s, int maxlen);
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index bae516e..42e35c3 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -48,7 +48,10 @@
+ #define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
+ #define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+ #define PORT_BRCM_TRUMANAGE 25
+-#define PORT_MAX_8250 25 /* max port ID */
++#define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */
++#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */
++#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
++#define PORT_MAX_8250 28 /* max port ID */
+
+ /*
+ * ARM specific type numbers. These are not currently guaranteed
+diff --git a/kernel/signal.c b/kernel/signal.c
+index d2f55ea..71e1816 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -481,6 +481,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ if (force_default || ka->sa.sa_handler != SIG_IGN)
+ ka->sa.sa_handler = SIG_DFL;
+ ka->sa.sa_flags = 0;
++#ifdef SA_RESTORER
++ ka->sa.sa_restorer = NULL;
++#endif
+ sigemptyset(&ka->sa.sa_mask);
+ ka++;
+ }
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index cd31345..762264d 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -386,24 +386,28 @@ config KPROBE_EVENT
+ If you want to use perf tools, this option is strongly recommended.
+
+ config DYNAMIC_FTRACE
+- bool "enable/disable ftrace tracepoints dynamically"
++ bool "enable/disable function tracing dynamically"
+ depends on FUNCTION_TRACER
+ depends on HAVE_DYNAMIC_FTRACE
+ default y
+ help
+- This option will modify all the calls to ftrace dynamically
+- (will patch them out of the binary image and replace them
+- with a No-Op instruction) as they are called. A table is
+- created to dynamically enable them again.
++ This option will modify all the calls to function tracing
++ dynamically (will patch them out of the binary image and
++ replace them with a No-Op instruction) on boot up. During
++ compile time, a table is made of all the locations that ftrace
++ can function trace, and this table is linked into the kernel
++ image. When this is enabled, functions can be individually
++ enabled, and the functions not enabled will not affect
++ performance of the system.
++
++ See the files in /sys/kernel/debug/tracing:
++ available_filter_functions
++ set_ftrace_filter
++ set_ftrace_notrace
+
+ This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
+ otherwise has native performance as long as no tracing is active.
+
+- The changes to the code are done by a kernel thread that
+- wakes up once a second and checks to see if any ftrace calls
+- were made. If so, it runs stop_machine (stops all CPUS)
+- and modifies the code to jump over the call to ftrace.
+-
+ config FUNCTION_PROFILER
+ bool "Kernel function profiler"
+ depends on FUNCTION_TRACER
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 9ad7d1e..09d87b7 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -515,19 +515,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
+
+ zone->present_pages += onlined_pages;
+ zone->zone_pgdat->node_present_pages += onlined_pages;
+- if (need_zonelists_rebuild)
+- build_all_zonelists(zone);
+- else
+- zone_pcp_update(zone);
++ if (onlined_pages) {
++ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
++ if (need_zonelists_rebuild)
++ build_all_zonelists(zone);
++ else
++ zone_pcp_update(zone);
++ }
+
+ mutex_unlock(&zonelists_mutex);
+
+ init_per_zone_wmark_min();
+
+- if (onlined_pages) {
++ if (onlined_pages)
+ kswapd_run(zone_to_nid(zone));
+- node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+- }
+
+ vm_total_pages = nr_free_pagecache_pages();
+
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index e920aa3..70e814a 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -434,12 +434,6 @@ compat_process_vm_rw(compat_pid_t pid,
+ if (flags != 0)
+ return -EINVAL;
+
+- if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec)))
+- goto out;
+-
+- if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec)))
+- goto out;
+-
+ if (vm_write)
+ rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
+ UIO_FASTIOV, iovstack_l,
+@@ -464,8 +458,6 @@ free_iovecs:
+ kfree(iov_r);
+ if (iov_l != iovstack_l)
+ kfree(iov_l);
+-
+-out:
+ return rc;
+ }
+
+diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
+index ac3520e..0c82ce3 100644
+--- a/net/batman-adv/icmp_socket.c
++++ b/net/batman-adv/icmp_socket.c
+@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
+
+ spin_unlock_bh(&socket_client->lock);
+
+- error = __copy_to_user(buf, &socket_packet->icmp_packet,
+- socket_packet->icmp_len);
++ packet_len = min(count, socket_packet->icmp_len);
++ error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
+
+- packet_len = socket_packet->icmp_len;
+ kfree(socket_packet);
+
+ if (error)
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 19acd00..16fbf8c 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -2354,6 +2354,8 @@ static const struct proto_ops dn_proto_ops = {
+ .sendpage = sock_no_sendpage,
+ };
+
++void dn_register_sysctl_skeleton(void);
++void dn_unregister_sysctl_skeleton(void);
+ void dn_register_sysctl(void);
+ void dn_unregister_sysctl(void);
+
+@@ -2374,6 +2376,7 @@ static int __init decnet_init(void)
+ if (rc != 0)
+ goto out;
+
++ dn_register_sysctl_skeleton();
+ dn_neigh_init();
+ dn_dev_init();
+ dn_route_init();
+@@ -2413,6 +2416,7 @@ static void __exit decnet_exit(void)
+ dn_fib_cleanup();
+
+ proc_net_remove(&init_net, "decnet");
++ dn_unregister_sysctl_skeleton();
+
+ proto_unregister(&dn_proto);
+
+diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
+index 02e75d1..d50a13c 100644
+--- a/net/decnet/sysctl_net_decnet.c
++++ b/net/decnet/sysctl_net_decnet.c
+@@ -55,6 +55,7 @@ static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW };
+ static char node_name[7] = "???";
+
+ static struct ctl_table_header *dn_table_header = NULL;
++static struct ctl_table_header *dn_skeleton_table_header = NULL;
+
+ /*
+ * ctype.h :-)
+@@ -357,6 +358,27 @@ static struct ctl_path dn_path[] = {
+ { }
+ };
+
++static struct ctl_table empty[1];
++
++static struct ctl_table dn_skeleton[] = {
++ {
++ .procname = "conf",
++ .mode = 0555,
++ .child = empty,
++ },
++ { }
++};
++
++void dn_register_sysctl_skeleton(void)
++{
++ dn_skeleton_table_header = register_sysctl_paths(dn_path, dn_skeleton);
++}
++
++void dn_unregister_sysctl_skeleton(void)
++{
++ unregister_sysctl_table(dn_skeleton_table_header);
++}
++
+ void dn_register_sysctl(void)
+ {
+ dn_table_header = register_sysctl_paths(dn_path, dn_table);
+@@ -368,6 +390,12 @@ void dn_unregister_sysctl(void)
+ }
+
+ #else /* CONFIG_SYSCTL */
++void dn_register_sysctl_skeleton(void)
++{
++}
++void dn_unregister_sysctl_skeleton(void)
++{
++}
+ void dn_unregister_sysctl(void)
+ {
+ }
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 6c91208..3c5d329 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -481,13 +481,17 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
+ * xprt_wait_for_buffer_space - wait for transport output buffer to clear
+ * @task: task to be put to sleep
+ * @action: function pointer to be executed after wait
++ *
++ * Note that we only set the timer for the case of RPC_IS_SOFT(), since
++ * we don't in general want to force a socket disconnection due to
++ * an incomplete RPC call transmission.
+ */
+ void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
+ {
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+- task->tk_timeout = req->rq_timeout;
++ task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
+ rpc_sleep_on(&xprt->pending, task, action);
+ }
+ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
+diff --git a/security/keys/compat.c b/security/keys/compat.c
+index 4c48e13..1b0b7bf 100644
+--- a/security/keys/compat.c
++++ b/security/keys/compat.c
+@@ -40,12 +40,12 @@ long compat_keyctl_instantiate_key_iov(
+ ARRAY_SIZE(iovstack),
+ iovstack, &iov, 1);
+ if (ret < 0)
+- return ret;
++ goto err;
+ if (ret == 0)
+ goto no_payload_free;
+
+ ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+-
++err:
+ if (iov != iovstack)
+ kfree(iov);
+ return ret;
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 1068cb1..60d0df7 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -54,7 +54,7 @@ int install_user_keyrings(void)
+
+ kenter("%p{%u}", user, user->uid);
+
+- if (user->uid_keyring) {
++ if (user->uid_keyring && user->session_keyring) {
+ kleave(" = 0 [exist]");
+ return 0;
+ }
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 160b1bd..24d44b2 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -290,10 +290,10 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
+ tid.device = SNDRV_TIMER_GLOBAL_SYSTEM;
+ err = snd_timer_open(&t, str, &tid, q->queue);
+ }
+- if (err < 0) {
+- snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err);
+- return err;
+- }
++ }
++ if (err < 0) {
++ snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err);
++ return err;
+ }
+ t->callback = snd_seq_timer_interrupt;
+ t->callback_data = q;
+diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
+index 130cfe6..ac0af03 100644
+--- a/sound/core/vmaster.c
++++ b/sound/core/vmaster.c
+@@ -209,7 +209,10 @@ static int slave_put(struct snd_kcontrol *kcontrol,
+ }
+ if (!changed)
+ return 0;
+- return slave_put_val(slave, ucontrol);
++ err = slave_put_val(slave, ucontrol);
++ if (err < 0)
++ return err;
++ return 1;
+ }
+
+ static int slave_tlv_cmd(struct snd_kcontrol *kcontrol,
diff --git a/3.2.54/1041_linux-3.2.42.patch b/3.2.54/1041_linux-3.2.42.patch
new file mode 100644
index 0000000..77a08ed
--- /dev/null
+++ b/3.2.54/1041_linux-3.2.42.patch
@@ -0,0 +1,3602 @@
+diff --git a/Makefile b/Makefile
+index 95e6220..d44f009 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
+index 43ba0fb..559ee24 100644
+--- a/arch/arm/include/asm/signal.h
++++ b/arch/arm/include/asm/signal.h
+@@ -127,6 +127,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h
+index 8790dfc..e6952a0 100644
+--- a/arch/avr32/include/asm/signal.h
++++ b/arch/avr32/include/asm/signal.h
+@@ -128,6 +128,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h
+index ea6af9a..057fea2 100644
+--- a/arch/cris/include/asm/signal.h
++++ b/arch/cris/include/asm/signal.h
+@@ -122,6 +122,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
+index fd8b66e..8695707 100644
+--- a/arch/h8300/include/asm/signal.h
++++ b/arch/h8300/include/asm/signal.h
+@@ -121,6 +121,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
+index b2eeb0d..802d561 100644
+--- a/arch/m32r/include/asm/signal.h
++++ b/arch/m32r/include/asm/signal.h
+@@ -123,6 +123,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
+index 93fe83e..a20ae63 100644
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -119,6 +119,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
+index 1865d72..eecaa76 100644
+--- a/arch/mn10300/include/asm/signal.h
++++ b/arch/mn10300/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
+index 3eb13be..ec63a0a 100644
+--- a/arch/powerpc/include/asm/signal.h
++++ b/arch/powerpc/include/asm/signal.h
+@@ -109,6 +109,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index edae5bb..b92b756 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -268,7 +268,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+- .mmu_features = MMU_FTR_HPTE_TABLE,
++ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
+index cdf5cb2..c872626 100644
+--- a/arch/s390/include/asm/signal.h
++++ b/arch/s390/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index 1d8648c..8743029 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
+
+ static inline void __tlb_flush_mm(struct mm_struct * mm)
+ {
+- if (unlikely(cpumask_empty(mm_cpumask(mm))))
+- return;
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
+diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
+index e49b828..4929431 100644
+--- a/arch/sparc/include/asm/signal.h
++++ b/arch/sparc/include/asm/signal.h
+@@ -191,6 +191,7 @@ struct __old_sigaction {
+ unsigned long sa_flags;
+ void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ typedef struct sigaltstack {
+ void __user *ss_sp;
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 598457c..6cbc795 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -125,6 +125,8 @@ typedef unsigned long sigset_t;
+ extern void do_notify_resume(struct pt_regs *, void *, __u32);
+ # endif /* __KERNEL__ */
+
++#define __ARCH_HAS_SA_RESTORER
++
+ #ifdef __i386__
+ # ifdef __KERNEL__
+ struct old_sigaction {
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 73da6b6..2d4e76b 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -736,3 +736,13 @@ void intel_ds_init(void)
+ }
+ }
+ }
++
++void perf_restore_debug_store(void)
++{
++ struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
++
++ if (!x86_pmu.bts && !x86_pmu.pebs)
++ return;
++
++ wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
++}
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index b7c2849..554b7b5 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -169,10 +169,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+ char c;
+ unsigned zero_len;
+
+- for (; len; --len) {
++ for (; len; --len, to++) {
+ if (__get_user_nocheck(c, from++, sizeof(char)))
+ break;
+- if (__put_user_nocheck(c, to++, sizeof(char)))
++ if (__put_user_nocheck(c, to, sizeof(char)))
+ break;
+ }
+
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index f10c0af..43c9f6a 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -11,6 +11,7 @@
+ #include <linux/suspend.h>
+ #include <linux/export.h>
+ #include <linux/smp.h>
++#include <linux/perf_event.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+@@ -225,6 +226,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
+
+ do_fpu_end();
+ mtrr_bp_restore();
++ perf_restore_debug_store();
+ }
+
+ /* Needed by apm.c */
+diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
+index 633ba73..75edf8a 100644
+--- a/arch/xtensa/include/asm/signal.h
++++ b/arch/xtensa/include/asm/signal.h
+@@ -133,6 +133,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index efba163..bf4d6e2 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -53,6 +53,24 @@ config EFI_VARS
+ Subsequent efibootmgr releases may be found at:
+ <http://linux.dell.com/efibootmgr>
+
++config EFI_VARS_PSTORE
++ bool "Register efivars backend for pstore"
++ depends on EFI_VARS && PSTORE
++ default y
++ help
++ Say Y here to enable use efivars as a backend to pstore. This
++ will allow writing console messages, crash dumps, or anything
++ else supported by pstore to EFI variables.
++
++config EFI_VARS_PSTORE_DEFAULT_DISABLE
++ bool "Disable using efivars as a pstore backend by default"
++ depends on EFI_VARS_PSTORE
++ default n
++ help
++ Saying Y here will disable the use of efivars as a storage
++ backend for pstore by default. This setting can be overridden
++ using the efivars module's pstore_disable parameter.
++
+ config EFI_PCDP
+ bool "Console device selection via EFI PCDP or HCDP table"
+ depends on ACPI && EFI && IA64
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 81346ae..b15c0aa 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -92,6 +92,11 @@ MODULE_VERSION(EFIVARS_VERSION);
+
+ #define DUMP_NAME_LEN 52
+
++static bool efivars_pstore_disable =
++ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
++
++module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
++
+ /*
+ * The maximum size of VariableName + Data = 1024
+ * Therefore, it's reasonable to save that much
+@@ -122,6 +127,8 @@ struct efivar_attribute {
+ ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+ };
+
++static struct efivars __efivars;
++
+ #define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+@@ -146,6 +153,14 @@ efivar_create_sysfs_entry(struct efivars *efivars,
+ efi_char16_t *variable_name,
+ efi_guid_t *vendor_guid);
+
++/*
++ * Prototype for workqueue functions updating sysfs entry
++ */
++
++static void efivar_update_sysfs_entries(struct work_struct *);
++static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
++static bool efivar_wq_enabled = true;
++
+ /* Return the number of unicode characters in data */
+ static unsigned long
+ utf16_strnlen(efi_char16_t *s, size_t maxlength)
+@@ -659,8 +674,6 @@ static struct kobj_type efivar_ktype = {
+ .default_attrs = def_attrs,
+ };
+
+-static struct pstore_info efi_pstore_info;
+-
+ static inline void
+ efivar_unregister(struct efivar_entry *var)
+ {
+@@ -697,7 +710,7 @@ static int efi_status_to_err(efi_status_t status)
+ return err;
+ }
+
+-#ifdef CONFIG_PSTORE
++#ifdef CONFIG_EFI_VARS_PSTORE
+
+ static int efi_pstore_open(struct pstore_info *psi)
+ {
+@@ -774,19 +787,21 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+
+ spin_lock_irqsave(&efivars->lock, flags);
+
+- /*
+- * Check if there is a space enough to log.
+- * size: a size of logging data
+- * DUMP_NAME_LEN * 2: a maximum size of variable name
+- */
++ if (size) {
++ /*
++ * Check if there is a space enough to log.
++ * size: a size of logging data
++ * DUMP_NAME_LEN * 2: a maximum size of variable name
++ */
+
+- status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
+- size + DUMP_NAME_LEN * 2);
++ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
++ size + DUMP_NAME_LEN * 2);
+
+- if (status) {
+- spin_unlock_irqrestore(&efivars->lock, flags);
+- *id = part;
+- return -ENOSPC;
++ if (status) {
++ spin_unlock_irqrestore(&efivars->lock, flags);
++ *id = part;
++ return -ENOSPC;
++ }
+ }
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+@@ -830,11 +845,8 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ if (found)
+ efivar_unregister(found);
+
+- if (size)
+- ret = efivar_create_sysfs_entry(efivars,
+- utf16_strsize(efi_name,
+- DUMP_NAME_LEN * 2),
+- efi_name, &vendor);
++ if (efivar_wq_enabled)
++ schedule_work(&efivar_work);
+
+ *id = part;
+ return ret;
+@@ -847,36 +859,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+
+ return 0;
+ }
+-#else
+-static int efi_pstore_open(struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static int efi_pstore_close(struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+- struct timespec *timespec,
+- char **buf, struct pstore_info *psi)
+-{
+- return -1;
+-}
+-
+-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+- unsigned int part, size_t size, struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+- struct pstore_info *psi)
+-{
+- return 0;
+-}
+-#endif
+
+ static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+@@ -888,6 +870,24 @@ static struct pstore_info efi_pstore_info = {
+ .erase = efi_pstore_erase,
+ };
+
++static void efivar_pstore_register(struct efivars *efivars)
++{
++ efivars->efi_pstore_info = efi_pstore_info;
++ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
++ if (efivars->efi_pstore_info.buf) {
++ efivars->efi_pstore_info.bufsize = 1024;
++ efivars->efi_pstore_info.data = efivars;
++ spin_lock_init(&efivars->efi_pstore_info.buf_lock);
++ pstore_register(&efivars->efi_pstore_info);
++ }
++}
++#else
++static void efivar_pstore_register(struct efivars *efivars)
++{
++ return;
++}
++#endif
++
+ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+@@ -1025,6 +1025,103 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ return count;
+ }
+
++static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
++{
++ struct efivar_entry *entry, *n;
++ struct efivars *efivars = &__efivars;
++ unsigned long strsize1, strsize2;
++ bool found = false;
++
++ strsize1 = utf16_strsize(variable_name, 1024);
++ list_for_each_entry_safe(entry, n, &efivars->list, list) {
++ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
++ if (strsize1 == strsize2 &&
++ !memcmp(variable_name, &(entry->var.VariableName),
++ strsize2) &&
++ !efi_guidcmp(entry->var.VendorGuid,
++ *vendor)) {
++ found = true;
++ break;
++ }
++ }
++ return found;
++}
++
++/*
++ * Returns the size of variable_name, in bytes, including the
++ * terminating NULL character, or variable_name_size if no NULL
++ * character is found among the first variable_name_size bytes.
++ */
++static unsigned long var_name_strnsize(efi_char16_t *variable_name,
++ unsigned long variable_name_size)
++{
++ unsigned long len;
++ efi_char16_t c;
++
++ /*
++ * The variable name is, by definition, a NULL-terminated
++ * string, so make absolutely sure that variable_name_size is
++ * the value we expect it to be. If not, return the real size.
++ */
++ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
++ c = variable_name[(len / sizeof(c)) - 1];
++ if (!c)
++ break;
++ }
++
++ return min(len, variable_name_size);
++}
++
++static void efivar_update_sysfs_entries(struct work_struct *work)
++{
++ struct efivars *efivars = &__efivars;
++ efi_guid_t vendor;
++ efi_char16_t *variable_name;
++ unsigned long variable_name_size = 1024;
++ efi_status_t status = EFI_NOT_FOUND;
++ bool found;
++
++ /* Add new sysfs entries */
++ while (1) {
++ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
++ if (!variable_name) {
++ pr_err("efivars: Memory allocation failed.\n");
++ return;
++ }
++
++ spin_lock_irq(&efivars->lock);
++ found = false;
++ while (1) {
++ variable_name_size = 1024;
++ status = efivars->ops->get_next_variable(
++ &variable_name_size,
++ variable_name,
++ &vendor);
++ if (status != EFI_SUCCESS) {
++ break;
++ } else {
++ if (!variable_is_present(variable_name,
++ &vendor)) {
++ found = true;
++ break;
++ }
++ }
++ }
++ spin_unlock_irq(&efivars->lock);
++
++ if (!found) {
++ kfree(variable_name);
++ break;
++ } else {
++ variable_name_size = var_name_strnsize(variable_name,
++ variable_name_size);
++ efivar_create_sysfs_entry(efivars,
++ variable_name_size,
++ variable_name, &vendor);
++ }
++ }
++}
++
+ /*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+@@ -1212,6 +1309,35 @@ void unregister_efivars(struct efivars *efivars)
+ }
+ EXPORT_SYMBOL_GPL(unregister_efivars);
+
++/*
++ * Print a warning when duplicate EFI variables are encountered and
++ * disable the sysfs workqueue since the firmware is buggy.
++ */
++static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
++ unsigned long len16)
++{
++ size_t i, len8 = len16 / sizeof(efi_char16_t);
++ char *s8;
++
++ /*
++ * Disable the workqueue since the algorithm it uses for
++ * detecting new variables won't work with this buggy
++ * implementation of GetNextVariableName().
++ */
++ efivar_wq_enabled = false;
++
++ s8 = kzalloc(len8, GFP_KERNEL);
++ if (!s8)
++ return;
++
++ for (i = 0; i < len8; i++)
++ s8[i] = s16[i];
++
++ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
++ s8, vendor_guid);
++ kfree(s8);
++}
++
+ int register_efivars(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *parent_kobj)
+@@ -1252,6 +1378,24 @@ int register_efivars(struct efivars *efivars,
+ &vendor_guid);
+ switch (status) {
+ case EFI_SUCCESS:
++ variable_name_size = var_name_strnsize(variable_name,
++ variable_name_size);
++
++ /*
++ * Some firmware implementations return the
++ * same variable name on multiple calls to
++ * get_next_variable(). Terminate the loop
++ * immediately as there is no guarantee that
++ * we'll ever see a different variable name,
++ * and may end up looping here forever.
++ */
++ if (variable_is_present(variable_name, &vendor_guid)) {
++ dup_variable_bug(variable_name, &vendor_guid,
++ variable_name_size);
++ status = EFI_NOT_FOUND;
++ break;
++ }
++
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name,
+@@ -1271,15 +1415,8 @@ int register_efivars(struct efivars *efivars,
+ if (error)
+ unregister_efivars(efivars);
+
+- efivars->efi_pstore_info = efi_pstore_info;
+-
+- efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+- if (efivars->efi_pstore_info.buf) {
+- efivars->efi_pstore_info.bufsize = 1024;
+- efivars->efi_pstore_info.data = efivars;
+- spin_lock_init(&efivars->efi_pstore_info.buf_lock);
+- pstore_register(&efivars->efi_pstore_info);
+- }
++ if (!efivars_pstore_disable)
++ efivar_pstore_register(efivars);
+
+ out:
+ kfree(variable_name);
+@@ -1288,7 +1425,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(register_efivars);
+
+-static struct efivars __efivars;
+ static struct efivar_operations ops;
+
+ /*
+@@ -1346,6 +1482,8 @@ err_put:
+ static void __exit
+ efivars_exit(void)
+ {
++ cancel_work_sync(&efivar_work);
++
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ unregister_efivars(&__efivars);
+ kobject_put(efi_kobj);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 9080eb7..7211f67 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -852,7 +852,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
++ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+ /* ignore tiny modes */
+@@ -933,6 +933,7 @@ set_size:
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
++ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
+
+ return mode;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 5620192..9b4e5c6 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -122,7 +122,7 @@ static const char *cache_level_str(int type)
+ static void
+ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+ {
+- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
++ seq_printf(m, "%pK: %s%s %8zd %04x %04x %d %d%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 878b989..b1bb734 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -907,15 +907,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+ {
+ int i;
++ int relocs_total = 0;
++ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+- /* First check for malicious input causing overflow */
+- if (exec[i].relocation_count >
+- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
++ /* First check for malicious input causing overflow in
++ * the worst case where we need to allocate the entire
++ * relocation tree as a single array.
++ */
++ if (exec[i].relocation_count > relocs_max - relocs_total)
+ return -EINVAL;
++ relocs_total += exec[i].relocation_count;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 4591582..17961df 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8059,7 +8059,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
++ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ if (intel_enable_rc6(dev_priv->dev))
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+index 289140b..cffb007 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -419,6 +419,25 @@ blind_set:
+ goto end;
+ }
+
++static void intel_setup_cadls(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ int i = 0;
++ u32 disp_id;
++
++ /* Initialize the CADL field by duplicating the DIDL values.
++ * Technically, this is not always correct as display outputs may exist,
++ * but not active. This initialization is necessary for some Clevo
++ * laptops that check this field before processing the brightness and
++ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
++ * there are less than eight devices. */
++ do {
++ disp_id = ioread32(&opregion->acpi->didl[i]);
++ iowrite32(disp_id, &opregion->acpi->cadl[i]);
++ } while (++i < 8 && disp_id != 0);
++}
++
+ void intel_opregion_init(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -428,8 +447,10 @@ void intel_opregion_init(struct drm_device *dev)
+ return;
+
+ if (opregion->acpi) {
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_didl_outputs(dev);
++ intel_setup_cadls(dev);
++ }
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
+index 17e1a9b..441de38 100644
+--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
++++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
+@@ -139,13 +139,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+ sdomain, ddomain, "dma");
+ }
+
+- time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+- RADEON_BENCHMARK_COPY_BLIT, n);
+- if (time < 0)
+- goto out_cleanup;
+- if (time > 0)
+- radeon_benchmark_log_results(n, size, time,
+- sdomain, ddomain, "blit");
++ if (rdev->asic->copy_blit) {
++ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
++ RADEON_BENCHMARK_COPY_BLIT, n);
++ if (time < 0)
++ goto out_cleanup;
++ if (time > 0)
++ radeon_benchmark_log_results(n, size, time,
++ sdomain, ddomain, "blit");
++ }
+
+ out_cleanup:
+ if (sobj) {
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 3d7885a..3056ea4 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -341,7 +341,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ u32 val;
+ int err = 0;
+
+- clk_enable(i2c_dev->clk);
++ err = clk_enable(i2c_dev->clk);
++ if (err < 0) {
++ dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
++ return err;
++ }
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+@@ -536,7 +540,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+- clk_enable(i2c_dev->clk);
++ ret = clk_enable(i2c_dev->clk);
++ if (ret < 0) {
++ dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
++ return ret;
++ }
++
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index da4d299..2c9dd2c 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2212,7 +2212,7 @@ static struct target_type pool_target = {
+ .name = "thin-pool",
+ .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
+ DM_TARGET_IMMUTABLE,
+- .version = {1, 0, 1},
++ .version = {1, 0, 2},
+ .module = THIS_MODULE,
+ .ctr = pool_ctr,
+ .dtr = pool_dtr,
+@@ -2428,7 +2428,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+
+ static struct target_type thin_target = {
+ .name = "thin",
+- .version = {1, 0, 1},
++ .version = {1, 0, 2},
+ .module = THIS_MODULE,
+ .ctr = thin_ctr,
+ .dtr = thin_dtr,
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index e6cdfde..1de0f5f 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -139,15 +139,8 @@ struct child {
+ struct btree_node *n;
+ };
+
+-static struct dm_btree_value_type le64_type = {
+- .context = NULL,
+- .size = sizeof(__le64),
+- .inc = NULL,
+- .dec = NULL,
+- .equal = NULL
+-};
+-
+-static int init_child(struct dm_btree_info *info, struct btree_node *parent,
++static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
++ struct btree_node *parent,
+ unsigned index, struct child *result)
+ {
+ int r, inc;
+@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+ result->n = dm_block_data(result->block);
+
+ if (inc)
+- inc_children(info->tm, result->n, &le64_type);
++ inc_children(info->tm, result->n, vt);
+
+ *((__le64 *) value_ptr(parent, index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(result->block));
+@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ }
+
+ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+- unsigned left_index)
++ struct dm_btree_value_type *vt, unsigned left_index)
+ {
+ int r;
+ struct btree_node *parent;
+@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+
+ parent = dm_block_data(shadow_current(s));
+
+- r = init_child(info, parent, left_index, &left);
++ r = init_child(info, vt, parent, left_index, &left);
+ if (r)
+ return r;
+
+- r = init_child(info, parent, left_index + 1, &right);
++ r = init_child(info, vt, parent, left_index + 1, &right);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ }
+
+ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+- unsigned left_index)
++ struct dm_btree_value_type *vt, unsigned left_index)
+ {
+ int r;
+ struct btree_node *parent = dm_block_data(shadow_current(s));
+@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ /*
+ * FIXME: fill out an array?
+ */
+- r = init_child(info, parent, left_index, &left);
++ r = init_child(info, vt, parent, left_index, &left);
+ if (r)
+ return r;
+
+- r = init_child(info, parent, left_index + 1, &center);
++ r = init_child(info, vt, parent, left_index + 1, &center);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+- r = init_child(info, parent, left_index + 2, &right);
++ r = init_child(info, vt, parent, left_index + 2, &right);
+ if (r) {
+ exit_child(info, &left);
+ exit_child(info, &center);
+@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
+ }
+
+ static int rebalance_children(struct shadow_spine *s,
+- struct dm_btree_info *info, uint64_t key)
++ struct dm_btree_info *info,
++ struct dm_btree_value_type *vt, uint64_t key)
+ {
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
+ has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
+
+ if (!has_left_sibling)
+- r = rebalance2(s, info, i);
++ r = rebalance2(s, info, vt, i);
+
+ else if (!has_right_sibling)
+- r = rebalance2(s, info, i - 1);
++ r = rebalance2(s, info, vt, i - 1);
+
+ else
+- r = rebalance3(s, info, i - 1);
++ r = rebalance3(s, info, vt, i - 1);
+
+ return r;
+ }
+@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+- r = rebalance_children(s, info, key);
++ r = rebalance_children(s, info, vt, key);
+ if (r)
+ break;
+
+@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ return r;
+ }
+
++static struct dm_btree_value_type le64_type = {
++ .context = NULL,
++ .size = sizeof(__le64),
++ .inc = NULL,
++ .dec = NULL,
++ .equal = NULL
++};
++
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+ {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 202ae34..63e3c47 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1715,6 +1715,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+
+ bond_compute_features(bond);
+
++ bond_update_speed_duplex(new_slave);
++
+ read_lock(&bond->lock);
+
+ new_slave->last_arp_rx = jiffies;
+@@ -1758,8 +1760,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ new_slave->link = BOND_LINK_DOWN;
+ }
+
+- bond_update_speed_duplex(new_slave);
+-
+ if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+ /* if there is a primary slave, remember it */
+ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
+@@ -2437,8 +2437,6 @@ static void bond_miimon_commit(struct bonding *bond)
+ bond_set_backup_slave(slave);
+ }
+
+- bond_update_speed_duplex(slave);
+-
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ bond->dev->name, slave->dev->name,
+ slave->speed, slave->duplex ? "full" : "half");
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index a6611f1..5d5a05f 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -650,25 +650,30 @@ static void efx_fini_channels(struct efx_nic *efx)
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
++ struct pci_dev *dev = efx->pci_dev;
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+- rc = efx_nic_flush_queues(efx);
+- if (rc && EFX_WORKAROUND_7803(efx)) {
+- /* Schedule a reset to recover from the flush failure. The
+- * descriptor caches reference memory we're about to free,
+- * but falcon_reconfigure_mac_wrapper() won't reconnect
+- * the MACs because of the pending reset. */
+- netif_err(efx, drv, efx->net_dev,
+- "Resetting to recover from flush failure\n");
+- efx_schedule_reset(efx, RESET_TYPE_ALL);
+- } else if (rc) {
+- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+- } else {
+- netif_dbg(efx, drv, efx->net_dev,
+- "successfully flushed all queues\n");
++ /* Only perform flush if dma is enabled */
++ if (dev->is_busmaster) {
++ rc = efx_nic_flush_queues(efx);
++
++ if (rc && EFX_WORKAROUND_7803(efx)) {
++ /* Schedule a reset to recover from the flush failure. The
++ * descriptor caches reference memory we're about to free,
++ * but falcon_reconfigure_mac_wrapper() won't reconnect
++ * the MACs because of the pending reset. */
++ netif_err(efx, drv, efx->net_dev,
++ "Resetting to recover from flush failure\n");
++ efx_schedule_reset(efx, RESET_TYPE_ALL);
++ } else if (rc) {
++ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
++ } else {
++ netif_dbg(efx, drv, efx->net_dev,
++ "successfully flushed all queues\n");
++ }
+ }
+
+ efx_for_each_channel(channel, efx) {
+@@ -714,6 +719,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ unsigned i;
+ int rc;
+
++ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+@@ -757,6 +763,7 @@ out:
+
+ efx_init_channels(efx);
+ efx_start_all(efx);
++ netif_device_attach(efx->net_dev);
+ return rc;
+
+ rollback:
+@@ -1525,8 +1532,12 @@ static void efx_stop_all(struct efx_nic *efx)
+ /* Flush efx_mac_work(), refill_workqueue, monitor_work */
+ efx_flush_all(efx);
+
+- /* Stop the kernel transmit interface late, so the watchdog
+- * timer isn't ticking over the flush */
++ /* Stop the kernel transmit interface. This is only valid if
++ * the device is stopped or detached; otherwise the watchdog
++ * may fire immediately.
++ */
++ WARN_ON(netif_running(efx->net_dev) &&
++ netif_device_present(efx->net_dev));
+ if (efx_dev_registered(efx)) {
+ netif_tx_stop_all_queues(efx->net_dev);
+ netif_tx_lock_bh(efx->net_dev);
+@@ -1827,10 +1838,11 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+ if (new_mtu > EFX_MAX_MTU)
+ return -EINVAL;
+
+- efx_stop_all(efx);
+-
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
++ efx_device_detach_sync(efx);
++ efx_stop_all(efx);
++
+ efx_fini_channels(efx);
+
+ mutex_lock(&efx->mac_lock);
+@@ -1843,6 +1855,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+ efx_init_channels(efx);
+
+ efx_start_all(efx);
++ netif_device_attach(efx->net_dev);
+ return rc;
+ }
+
+@@ -2132,7 +2145,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+@@ -2580,7 +2593,7 @@ static int efx_pm_freeze(struct device *dev)
+
+ efx->state = STATE_FINI;
+
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
+index 1355245..9668d29 100644
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -149,4 +149,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
+ extern void efx_link_set_advertising(struct efx_nic *efx, u32);
+ extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
++static inline void efx_device_detach_sync(struct efx_nic *efx)
++{
++ struct net_device *dev = efx->net_dev;
++
++ /* Lock/freeze all TX queues so that we can be sure the
++ * TX scheduler is stopped when we're done and before
++ * netif_device_present() becomes false.
++ */
++ netif_tx_lock_bh(dev);
++ netif_device_detach(dev);
++ netif_tx_unlock_bh(dev);
++}
++
+ #endif /* EFX_EFX_H */
+diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
+index 97b606b..26cd6c0 100644
+--- a/drivers/net/ethernet/sfc/falcon.c
++++ b/drivers/net/ethernet/sfc/falcon.c
+@@ -1762,6 +1762,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
++ .finish_flush = efx_port_dummy_op_void,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+@@ -1804,6 +1805,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
++ .finish_flush = efx_port_dummy_op_void,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
+index 81a4253..c1000ce 100644
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -30,7 +30,7 @@
+ #define REBOOT_FLAG_PORT0 0x3f8
+ #define REBOOT_FLAG_PORT1 0x3fc
+
+-#define MCDI_RPC_TIMEOUT 10 /*seconds */
++#define MCDI_RPC_TIMEOUT (10 * HZ)
+
+ #define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
+@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+ static int efx_mcdi_poll(struct efx_nic *efx)
+ {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+- unsigned int time, finish;
++ unsigned long time, finish;
+ unsigned int respseq, respcmd, error;
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int rc, spins;
+@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+ * and poll once a jiffy (approximately)
+ */
+ spins = TICK_USEC;
+- finish = get_seconds() + MCDI_RPC_TIMEOUT;
++ finish = jiffies + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+ schedule_timeout_uninterruptible(1);
+ }
+
+- time = get_seconds();
++ time = jiffies;
+
+ rmb();
+ efx_readd(efx, &reg, pdu);
+@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+ EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ break;
+
+- if (time >= finish)
++ if (time_after(time, finish))
+ return -ETIMEDOUT;
+ }
+
+@@ -250,7 +250,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
+ if (wait_event_timeout(
+ mcdi->wq,
+ atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
+- msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
++ MCDI_RPC_TIMEOUT) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
+@@ -666,9 +666,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list)
+ {
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+- size_t outlen;
++ size_t outlen, offset, i;
+ int port_num = efx_port_num(efx);
+- int offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+@@ -688,10 +687,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
+ if (mac_address)
+ memcpy(mac_address, outbuf + offset, ETH_ALEN);
+- if (fw_subtype_list)
+- memcpy(fw_subtype_list,
+- outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
+- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
++ if (fw_subtype_list) {
++ offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
++ for (i = 0;
++ i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / 2;
++ i++) {
++ fw_subtype_list[i] =
++ le16_to_cpup((__le16 *)(outbuf + offset));
++ offset += 2;
++ }
++ }
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
+index aced2a7..b61eea0 100644
+--- a/drivers/net/ethernet/sfc/mcdi.h
++++ b/drivers/net/ethernet/sfc/mcdi.h
+@@ -126,5 +126,6 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
+ extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+ extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+ extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
++extern int efx_mcdi_set_mac(struct efx_nic *efx);
+
+ #endif /* EFX_MCDI_H */
+diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
+index 50c2077..da269d7 100644
+--- a/drivers/net/ethernet/sfc/mcdi_mac.c
++++ b/drivers/net/ethernet/sfc/mcdi_mac.c
+@@ -13,7 +13,7 @@
+ #include "mcdi.h"
+ #include "mcdi_pcol.h"
+
+-static int efx_mcdi_set_mac(struct efx_nic *efx)
++int efx_mcdi_set_mac(struct efx_nic *efx)
+ {
+ u32 reject, fcntl;
+ u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
+@@ -45,6 +45,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
++ if (efx->fc_disable)
++ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
+index b8e251a..8bcb8fd 100644
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -213,6 +213,7 @@ struct efx_tx_queue {
+ * If both this and page are %NULL, the buffer slot is currently free.
+ * @page: The associated page buffer, if any.
+ * If both this and skb are %NULL, the buffer slot is currently free.
++ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
+ * @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
+ */
+@@ -222,7 +223,8 @@ struct efx_rx_buffer {
+ struct sk_buff *skb;
+ struct page *page;
+ } u;
+- unsigned int len;
++ u16 page_offset;
++ u16 len;
+ bool is_page;
+ };
+
+@@ -689,6 +691,9 @@ struct efx_filter_state;
+ * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
+ * @multicast_hash: Multicast hash table
+ * @wanted_fc: Wanted flow control flags
++ * @fc_disable: When non-zero flow control is disabled. Typically used to
++ * ensure that network back pressure doesn't delay dma queue flushes.
++ * Serialised by the rtnl lock.
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+@@ -782,6 +787,7 @@ struct efx_nic {
+ bool promiscuous;
+ union efx_multicast_hash multicast_hash;
+ u8 wanted_fc;
++ unsigned fc_disable;
+
+ atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+@@ -835,6 +841,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
++ * @finish_flush: Clean up after flushing the DMA queues
+ * @update_stats: Update statistics not provided by event handling
+ * @start_stats: Start the regular fetching of statistics
+ * @stop_stats: Stop the regular fetching of statistics
+@@ -880,6 +887,7 @@ struct efx_nic_type {
+ void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ void (*prepare_flush)(struct efx_nic *efx);
++ void (*finish_flush)(struct efx_nic *efx);
+ void (*update_stats)(struct efx_nic *efx);
+ void (*start_stats)(struct efx_nic *efx);
+ void (*stop_stats)(struct efx_nic *efx);
+diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
+index 3edfbaf..2e9ca10 100644
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -371,7 +371,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
++ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
++ && tx_queue->write_count - write_count == 1;
+ }
+
+ /* For each entry inserted into the software descriptor ring, create a
+@@ -1261,13 +1262,27 @@ int efx_nic_flush_queues(struct efx_nic *efx)
+ }
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+- tx_queue->flushed != FLUSH_DONE)
+- ++tx_pending;
++ tx_queue->flushed != FLUSH_DONE) {
++ efx_oword_t txd_ptr_tbl;
++
++ efx_reado_table(efx, &txd_ptr_tbl,
++ FR_BZ_TX_DESC_PTR_TBL,
++ tx_queue->queue);
++ if (EFX_OWORD_FIELD(txd_ptr_tbl,
++ FRF_AZ_TX_DESCQ_FLUSH) ||
++ EFX_OWORD_FIELD(txd_ptr_tbl,
++ FRF_AZ_TX_DESCQ_EN))
++ ++tx_pending;
++ else
++ tx_queue->flushed = FLUSH_DONE;
++ }
+ }
+ }
+
+- if (rx_pending == 0 && tx_pending == 0)
++ if (rx_pending == 0 && tx_pending == 0) {
++ efx->type->finish_flush(efx);
+ return 0;
++ }
+
+ msleep(EFX_FLUSH_INTERVAL);
+ efx_poll_flush_events(efx);
+@@ -1293,6 +1308,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
+ }
+ }
+
++ efx->type->finish_flush(efx);
+ return -ETIMEDOUT;
+ }
+
+diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
+index 66ece48..58302a2 100644
+--- a/drivers/net/ethernet/sfc/nic.h
++++ b/drivers/net/ethernet/sfc/nic.h
+@@ -210,6 +210,8 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+ /* Global Resources */
+ extern int efx_nic_flush_queues(struct efx_nic *efx);
++extern void siena_prepare_flush(struct efx_nic *efx);
++extern void siena_finish_flush(struct efx_nic *efx);
+ extern void falcon_start_nic_stats(struct efx_nic *efx);
+ extern void falcon_stop_nic_stats(struct efx_nic *efx);
+ extern void falcon_setup_xaui(struct efx_nic *efx);
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index 5ef4cc0..9ce8665 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -95,11 +95,7 @@ static unsigned int rx_refill_limit = 95;
+ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
+ {
+- /* Offset is always within one page, so we don't need to consider
+- * the page order.
+- */
+- return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+- efx->type->rx_buffer_hash_size);
++ return buf->page_offset + efx->type->rx_buffer_hash_size;
+ }
+ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
+ {
+@@ -194,6 +190,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
++ unsigned int page_offset;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+@@ -220,12 +217,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
++ page_offset = sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
++ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->is_page = true;
+ ++rx_queue->added_count;
+@@ -237,6 +236,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
++ page_offset += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
+ }
+@@ -246,7 +246,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+ }
+
+ static void efx_unmap_rx_buffer(struct efx_nic *efx,
+- struct efx_rx_buffer *rx_buf)
++ struct efx_rx_buffer *rx_buf,
++ unsigned int used_len)
+ {
+ if (rx_buf->is_page && rx_buf->u.page) {
+ struct efx_rx_page_state *state;
+@@ -257,6 +258,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ state->dma_addr,
+ efx_rx_buf_size(efx),
+ PCI_DMA_FROMDEVICE);
++ } else if (used_len) {
++ dma_sync_single_for_cpu(&efx->pci_dev->dev,
++ rx_buf->dma_addr, used_len,
++ DMA_FROM_DEVICE);
+ }
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
+@@ -279,7 +284,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
+ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+ {
+- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
++ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
+ efx_free_rx_buffer(rx_queue->efx, rx_buf);
+ }
+
+@@ -550,10 +555,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ goto out;
+ }
+
+- /* Release card resources - assumes all RX buffers consumed in-order
+- * per RX queue
++ /* Release and/or sync DMA mapping - assumes all RX buffers
++ * consumed in-order per RX queue
+ */
+- efx_unmap_rx_buffer(efx, rx_buf);
++ efx_unmap_rx_buffer(efx, rx_buf, len);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
+diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
+index 822f6c2..4907885 100644
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -698,7 +698,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
+index cc2549c..c58b973 100644
+--- a/drivers/net/ethernet/sfc/siena.c
++++ b/drivers/net/ethernet/sfc/siena.c
+@@ -137,6 +137,18 @@ static void siena_remove_port(struct efx_nic *efx)
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+ }
+
++void siena_prepare_flush(struct efx_nic *efx)
++{
++ if (efx->fc_disable++ == 0)
++ efx_mcdi_set_mac(efx);
++}
++
++void siena_finish_flush(struct efx_nic *efx)
++{
++ if (--efx->fc_disable == 0)
++ efx_mcdi_set_mac(efx);
++}
++
+ static const struct efx_nic_register_test siena_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+@@ -624,7 +636,8 @@ const struct efx_nic_type siena_a0_nic_type = {
+ .reset = siena_reset_hw,
+ .probe_port = siena_probe_port,
+ .remove_port = siena_remove_port,
+- .prepare_flush = efx_port_dummy_op_void,
++ .prepare_flush = siena_prepare_flush,
++ .finish_flush = siena_finish_flush,
+ .update_stats = siena_update_nic_stats,
+ .start_stats = siena_start_nic_stats,
+ .stop_stats = siena_stop_nic_stats,
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 97f342e..544ac06 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -585,6 +585,7 @@ void macvlan_common_setup(struct net_device *dev)
+ ether_setup(dev);
+
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
++ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->netdev_ops = &macvlan_netdev_ops;
+ dev->destructor = free_netdev;
+ dev->header_ops = &macvlan_hard_header_ops,
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index 01b104e..bff6908 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -630,6 +630,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ goto done;
+
+ spin_lock_irqsave(&target_list_lock, flags);
++restart:
+ list_for_each_entry(nt, &target_list, list) {
+ netconsole_target_get(nt);
+ if (nt->np.dev == dev) {
+@@ -642,20 +643,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ case NETDEV_UNREGISTER:
+ /*
+ * rtnl_lock already held
++ * we might sleep in __netpoll_cleanup()
+ */
+- if (nt->np.dev) {
+- spin_unlock_irqrestore(
+- &target_list_lock,
+- flags);
+- __netpoll_cleanup(&nt->np);
+- spin_lock_irqsave(&target_list_lock,
+- flags);
+- dev_put(nt->np.dev);
+- nt->np.dev = NULL;
+- }
++ spin_unlock_irqrestore(&target_list_lock, flags);
++ __netpoll_cleanup(&nt->np);
++ spin_lock_irqsave(&target_list_lock, flags);
++ dev_put(nt->np.dev);
++ nt->np.dev = NULL;
+ nt->enabled = 0;
+ stopped = true;
+- break;
++ netconsole_target_put(nt);
++ goto restart;
+ }
+ }
+ netconsole_target_put(nt);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index a12c9bf..f4c5de6 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -417,6 +417,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ * for indefinite time. */
+ skb_orphan(skb);
+
++ nf_reset(skb);
++
+ /* Enqueue packet */
+ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
+
+diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
+index 62b4c29..d48dfb7 100644
+--- a/drivers/net/wireless/mwifiex/join.c
++++ b/drivers/net/wireless/mwifiex/join.c
+@@ -1062,10 +1062,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
+ adhoc_join->bss_descriptor.bssid,
+ adhoc_join->bss_descriptor.ssid);
+
+- for (i = 0; bss_desc->supported_rates[i] &&
+- i < MWIFIEX_SUPPORTED_RATES;
+- i++)
+- ;
++ for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
++ bss_desc->supported_rates[i]; i++)
++ ;
+ rates_size = i;
+
+ /* Copy Data Rates from the Rates recorded in scan response */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+index 814c05d..d3920da 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+@@ -1557,74 +1557,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
+
+ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+ {
+- /* dummy routine needed for callback from rtl_op_configure_filter() */
+-}
+-
+-/*========================================================================== */
+-
+-static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
+- enum nl80211_iftype type)
+-{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+- u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+- struct rtl_phy *rtlphy = &(rtlpriv->phy);
+- u8 filterout_non_associated_bssid = false;
++ u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+
+- switch (type) {
+- case NL80211_IFTYPE_ADHOC:
+- case NL80211_IFTYPE_STATION:
+- filterout_non_associated_bssid = true;
+- break;
+- case NL80211_IFTYPE_UNSPECIFIED:
+- case NL80211_IFTYPE_AP:
+- default:
+- break;
+- }
+- if (filterout_non_associated_bssid) {
++ if (rtlpriv->psc.rfpwr_state != ERFON)
++ return;
++
++ if (check_bssid) {
++ u8 tmp;
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+- switch (rtlphy->current_io_type) {
+- case IO_CMD_RESUME_DM_BY_SCAN:
+- reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+- rtlpriv->cfg->ops->set_hw_reg(hw,
+- HW_VAR_RCR, (u8 *)(&reg_rcr));
+- /* enable update TSF */
+- _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+- break;
+- case IO_CMD_PAUSE_DM_BY_SCAN:
+- reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+- rtlpriv->cfg->ops->set_hw_reg(hw,
+- HW_VAR_RCR, (u8 *)(&reg_rcr));
+- /* disable update TSF */
+- _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+- break;
+- }
++ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
++ tmp = BIT(4);
+ } else {
+- reg_rcr |= (RCR_CBSSID);
+- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+- (u8 *)(&reg_rcr));
+- _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
++ reg_rcr |= RCR_CBSSID;
++ tmp = BIT(4) | BIT(5);
+ }
+- } else if (filterout_non_associated_bssid == false) {
++ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
++ (u8 *) (&reg_rcr));
++ _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
++ } else {
++ u8 tmp;
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+- reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+- (u8 *)(&reg_rcr));
+- _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
++ reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
++ tmp = BIT(4);
+ } else {
+- reg_rcr &= (~RCR_CBSSID);
+- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+- (u8 *)(&reg_rcr));
+- _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
++ reg_rcr &= ~RCR_CBSSID;
++ tmp = BIT(4) | BIT(5);
+ }
++ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
++ rtlpriv->cfg->ops->set_hw_reg(hw,
++ HW_VAR_RCR, (u8 *) (&reg_rcr));
++ _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
+ }
+ }
+
++/*========================================================================== */
++
+ int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+ {
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++
+ if (_rtl92cu_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+- _rtl92cu_set_check_bssid(hw, type);
++
++ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
++ if (type != NL80211_IFTYPE_AP)
++ rtl92cu_set_check_bssid(hw, true);
++ } else {
++ rtl92cu_set_check_bssid(hw, false);
++ }
++
+ return 0;
+ }
+
+@@ -2238,8 +2221,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
+- REG_ARFR0)));
+ }
+
+ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index e18604b..d19b879 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -49,7 +49,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
+ tty->packet = 0;
+ if (!tty->link)
+ return;
+- tty->link->packet = 0;
+ set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+ wake_up_interruptible(&tty->link->read_wait);
+ wake_up_interruptible(&tty->link->write_wait);
+diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
+index ad0f8f5..2dc4d9b 100644
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = {
+ #define UART_NR 4
+
+ static struct uart_sunsu_port sunsu_ports[UART_NR];
++static int nr_inst; /* Number of already registered ports */
+
+ #ifdef CONFIG_SERIO
+
+@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
+ printk("Console: ttyS%d (SU)\n",
+ (sunsu_reg.minor - 64) + co->index);
+
+- /*
+- * Check whether an invalid uart number has been specified, and
+- * if so, search for the first available port that does have
+- * console support.
+- */
+- if (co->index >= UART_NR)
+- co->index = 0;
++ if (co->index > nr_inst)
++ return -ENODEV;
+ port = &sunsu_ports[co->index].port;
+
+ /*
+@@ -1408,7 +1404,6 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
+
+ static int __devinit su_probe(struct platform_device *op)
+ {
+- static int inst;
+ struct device_node *dp = op->dev.of_node;
+ struct uart_sunsu_port *up;
+ struct resource *rp;
+@@ -1418,16 +1413,16 @@ static int __devinit su_probe(struct platform_device *op)
+
+ type = su_get_type(dp);
+ if (type == SU_PORT_PORT) {
+- if (inst >= UART_NR)
++ if (nr_inst >= UART_NR)
+ return -EINVAL;
+- up = &sunsu_ports[inst];
++ up = &sunsu_ports[nr_inst];
+ } else {
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+ }
+
+- up->port.line = inst;
++ up->port.line = nr_inst;
+
+ spin_lock_init(&up->port.lock);
+
+@@ -1461,6 +1456,8 @@ static int __devinit su_probe(struct platform_device *op)
+ }
+ dev_set_drvdata(&op->dev, up);
+
++ nr_inst++;
++
+ return 0;
+ }
+
+@@ -1488,7 +1485,7 @@ static int __devinit su_probe(struct platform_device *op)
+
+ dev_set_drvdata(&op->dev, up);
+
+- inst++;
++ nr_inst++;
+
+ return 0;
+
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 61d08dd..76be3ba 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ struct hc_driver *driver;
+ struct usb_hcd *hcd;
+ int retval;
++ int hcd_irq = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ return -ENODEV;
+ dev->current_state = PCI_D0;
+
+- /* The xHCI driver supports MSI and MSI-X,
+- * so don't fail if the BIOS doesn't provide a legacy IRQ.
++ /*
++ * The xHCI driver has its own irq management
++ * make sure irq setup is not touched for xhci in generic hcd code
+ */
+- if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
+- dev_err(&dev->dev,
+- "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
+- pci_name(dev));
+- retval = -ENODEV;
+- goto disable_pci;
++ if ((driver->flags & HCD_MASK) != HCD_USB3) {
++ if (!dev->irq) {
++ dev_err(&dev->dev,
++ "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
++ pci_name(dev));
++ retval = -ENODEV;
++ goto disable_pci;
++ }
++ hcd_irq = dev->irq;
+ }
+
+ hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
+@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ pci_set_master(dev);
+
+- retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
++ retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
+ if (retval != 0)
+ goto unmap_registers;
+ set_hs_companion(dev, hcd);
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index 901924a..d433fdf 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -213,7 +213,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
+ udc->driver->disconnect(udc->gadget);
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+- usb_gadget_udc_stop(udc->gadget, udc->driver);
++ usb_gadget_udc_stop(udc->gadget, NULL);
+ } else {
+ usb_gadget_stop(udc->gadget, udc->driver);
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 53c8be1..2c0350f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -342,7 +342,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ * generate interrupts. Don't even try to enable MSI.
+ */
+ if (xhci->quirks & XHCI_BROKEN_MSI)
+- return 0;
++ goto legacy_irq;
+
+ /* unregister the legacy interrupt */
+ if (hcd->irq)
+@@ -363,6 +363,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ return -EINVAL;
+ }
+
++ legacy_irq:
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc368c2..c519a31 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -206,8 +206,8 @@ struct xhci_op_regs {
+ /* bits 12:31 are reserved (and should be preserved on writes). */
+
+ /* IMAN - Interrupt Management Register */
+-#define IMAN_IP (1 << 1)
+-#define IMAN_IE (1 << 0)
++#define IMAN_IE (1 << 1)
++#define IMAN_IP (1 << 0)
+
+ /* USBSTS - USB status - status bitmasks */
+ /* HC not running - set to 1 when run/stop bit is cleared. */
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 1a49ca9..e664bac 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -973,10 +973,7 @@ static void garmin_close(struct usb_serial_port *port)
+ if (!serial)
+ return;
+
+- mutex_lock(&port->serial->disc_mutex);
+-
+- if (!port->serial->disconnected)
+- garmin_clear(garmin_data_p);
++ garmin_clear(garmin_data_p);
+
+ /* shutdown our urbs */
+ usb_kill_urb(port->read_urb);
+@@ -985,8 +982,6 @@ static void garmin_close(struct usb_serial_port *port)
+ /* keep reset state so we know that we must start a new session */
+ if (garmin_data_p->state != STATE_RESET)
+ garmin_data_p->state = STATE_DISCONNECTED;
+-
+- mutex_unlock(&port->serial->disc_mutex);
+ }
+
+
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 3de751d..1f145bf 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -2796,6 +2796,7 @@ static struct usb_serial_driver edgeport_2port_device = {
+ .set_termios = edge_set_termios,
+ .tiocmget = edge_tiocmget,
+ .tiocmset = edge_tiocmset,
++ .get_icount = edge_get_icount,
+ .write = edge_write,
+ .write_room = edge_write_room,
+ .chars_in_buffer = edge_chars_in_buffer,
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index dc1ce62..2482d5e 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -168,6 +168,7 @@ static void destroy_serial(struct kref *kref)
+ }
+ }
+
++ usb_put_intf(serial->interface);
+ usb_put_dev(serial->dev);
+ kfree(serial);
+ }
+@@ -624,7 +625,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
+ }
+ serial->dev = usb_get_dev(dev);
+ serial->type = driver;
+- serial->interface = interface;
++ serial->interface = usb_get_intf(interface);
+ kref_init(&serial->kref);
+ mutex_init(&serial->disc_mutex);
+ serial->minor = SERIAL_TTY_NO_MINOR;
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index fa8a1b2..7b8d564 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -488,6 +488,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
+
++/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
++UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
++ "Samsung",
++ "YP-Z3",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+ * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
+ * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index b76071e..5c58128 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -234,7 +234,8 @@ static void handle_tx(struct vhost_net *net)
+ msg.msg_controllen = 0;
+ ubufs = NULL;
+ } else {
+- struct ubuf_info *ubuf = &vq->ubuf_info[head];
++ struct ubuf_info *ubuf;
++ ubuf = vq->ubuf_info + vq->upend_idx;
+
+ vq->heads[vq->upend_idx].len = len;
+ ubuf->callback = vhost_zerocopy_callback;
+diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
+index cfd1ce3..1d36db1 100644
+--- a/fs/cifs/asn1.c
++++ b/fs/cifs/asn1.c
+@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ }
+ }
+
+- /* mechlistMIC */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- /* Check if we have reached the end of the blob, but with
+- no mechListMic (e.g. NTLMSSP instead of KRB5) */
+- if (ctx.error == ASN1_ERR_DEC_EMPTY)
+- goto decode_negtoken_exit;
+- cFYI(1, "Error decoding last part negTokenInit exit3");
+- return 0;
+- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+- /* tag = 3 indicating mechListMIC */
+- cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- return 0;
+- }
+-
+- /* sequence */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- cFYI(1, "Error decoding last part negTokenInit exit5");
+- return 0;
+- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+- || (tag != ASN1_SEQ)) {
+- cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- }
+-
+- /* sequence of */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- cFYI(1, "Error decoding last part negTokenInit exit 7");
+- return 0;
+- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+- cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- return 0;
+- }
+-
+- /* general string */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- cFYI(1, "Error decoding last part negTokenInit exit9");
+- return 0;
+- } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
+- || (tag != ASN1_GENSTR)) {
+- cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- return 0;
+- }
+- cFYI(1, "Need to call asn1_octets_decode() function for %s",
+- ctx.pointer); /* is this UTF-8 or ASCII? */
+-decode_negtoken_exit:
++ /*
++ * We currently ignore anything at the end of the SPNEGO blob after
++ * the mechTypes have been parsed, since none of that info is
++ * used at the moment.
++ */
+ return 1;
+ }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b3a2a40..25bb97f 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -90,6 +90,30 @@ extern mempool_t *cifs_sm_req_poolp;
+ extern mempool_t *cifs_req_poolp;
+ extern mempool_t *cifs_mid_poolp;
+
++/*
++ * Bumps refcount for cifs super block.
++ * Note that it should be only called if a referece to VFS super block is
++ * already held, e.g. in open-type syscalls context. Otherwise it can race with
++ * atomic_dec_and_test in deactivate_locked_super.
++ */
++void
++cifs_sb_active(struct super_block *sb)
++{
++ struct cifs_sb_info *server = CIFS_SB(sb);
++
++ if (atomic_inc_return(&server->active) == 1)
++ atomic_inc(&sb->s_active);
++}
++
++void
++cifs_sb_deactive(struct super_block *sb)
++{
++ struct cifs_sb_info *server = CIFS_SB(sb);
++
++ if (atomic_dec_and_test(&server->active))
++ deactivate_super(sb);
++}
++
+ static int
+ cifs_read_super(struct super_block *sb)
+ {
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 30ff560..c91ea81 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type;
+ extern const struct address_space_operations cifs_addr_ops;
+ extern const struct address_space_operations cifs_addr_ops_smallbuf;
+
++/* Functions related to super block operations */
++extern void cifs_sb_active(struct super_block *sb);
++extern void cifs_sb_deactive(struct super_block *sb);
++
+ /* Functions related to inodes */
+ extern const struct inode_operations cifs_dir_inode_ops;
+ extern struct inode *cifs_root_iget(struct super_block *);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 51574d4..c55808e 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -265,6 +265,8 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
+ mutex_init(&pCifsFile->fh_mutex);
+ INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+
++ cifs_sb_active(inode->i_sb);
++
+ spin_lock(&cifs_file_list_lock);
+ list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
+ /* if readable file instance put first in list*/
+@@ -293,7 +295,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ struct inode *inode = cifs_file->dentry->d_inode;
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct super_block *sb = inode->i_sb;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsLockInfo *li, *tmp;
+
+ spin_lock(&cifs_file_list_lock);
+@@ -345,6 +348,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
++ cifs_sb_deactive(sb);
+ kfree(cifs_file);
+ }
+
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 484ffee..2845a1f 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -571,7 +571,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
+ brelse(bitmap_bh);
+ printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
+ ", computed = %llu, %llu\n",
+- EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
++ EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
+ desc_count, bitmap_count);
+ return bitmap_count;
+ #else
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 8cb184c..60b6ca5 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -309,9 +309,9 @@ struct ext4_group_desc
+ */
+
+ struct flex_groups {
+- atomic_t free_inodes;
+- atomic_t free_clusters;
+- atomic_t used_dirs;
++ atomic64_t free_clusters;
++ atomic_t free_inodes;
++ atomic_t used_dirs;
+ };
+
+ #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index b48e0dc..ce0bc25 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2960,6 +2960,7 @@ static int ext4_split_extent(handle_t *handle,
+ int err = 0;
+ int uninitialized;
+ int split_flag1, flags1;
++ int allocated = map->m_len;
+
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -2979,6 +2980,8 @@ static int ext4_split_extent(handle_t *handle,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (err)
+ goto out;
++ } else {
++ allocated = ee_len - (map->m_lblk - ee_block);
+ }
+
+ ext4_ext_drop_refs(path);
+@@ -3001,7 +3004,7 @@ static int ext4_split_extent(handle_t *handle,
+
+ ext4_ext_show_leaf(inode, path);
+ out:
+- return err ? err : map->m_len;
++ return err ? err : allocated;
+ }
+
+ #define EXT4_EXT_ZERO_LEN 7
+@@ -3663,6 +3666,7 @@ out:
+ allocated - map->m_len);
+ allocated = map->m_len;
+ }
++ map->m_len = allocated;
+
+ /*
+ * If we have done fallocate with the offset that is already
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 6266799..6d1f577 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -294,8 +294,8 @@ error_return:
+ }
+
+ struct orlov_stats {
++ __u64 free_clusters;
+ __u32 free_inodes;
+- __u32 free_clusters;
+ __u32 used_dirs;
+ };
+
+@@ -312,7 +312,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+
+ if (flex_size > 1) {
+ stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+- stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
++ stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
+ stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+ return;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4b2bb75..3270ffd 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -142,7 +142,8 @@ void ext4_evict_inode(struct inode *inode)
+ * don't use page cache.
+ */
+ if (ext4_should_journal_data(inode) &&
+- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
++ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
++ inode->i_ino != EXT4_JOURNAL_INO) {
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 553ff71..7b18563 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2866,8 +2866,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi,
+ ac->ac_b_ex.fe_group);
+- atomic_sub(ac->ac_b_ex.fe_len,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_sub(ac->ac_b_ex.fe_len,
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ }
+
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -3485,7 +3485,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ win = offs;
+
+ ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
+- EXT4_B2C(sbi, win);
++ EXT4_NUM_B2C(sbi, win);
+ BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ }
+@@ -4634,7 +4634,7 @@ do_more:
+ EXT4_BLOCKS_PER_GROUP(sb);
+ count -= overflow;
+ }
+- count_clusters = EXT4_B2C(sbi, count);
++ count_clusters = EXT4_NUM_B2C(sbi, count);
+ bitmap_bh = ext4_read_block_bitmap(sb, block_group);
+ if (!bitmap_bh) {
+ err = -EIO;
+@@ -4724,8 +4724,8 @@ do_more:
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+- atomic_add(count_clusters,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(count_clusters,
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ }
+
+ ext4_mb_unload_buddy(&e4b);
+@@ -4865,12 +4865,12 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+ ext4_unlock_group(sb, block_group);
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+- EXT4_B2C(sbi, blocks_freed));
++ EXT4_NUM_B2C(sbi, blocks_freed));
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+- atomic_add(EXT4_B2C(sbi, blocks_freed),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ }
+
+ ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 33129c0..6e67b97 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -938,7 +938,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+
+ /* Update the free space counts */
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+- EXT4_B2C(sbi, input->free_blocks_count));
++ EXT4_NUM_B2C(sbi, input->free_blocks_count));
+ percpu_counter_add(&sbi->s_freeinodes_counter,
+ EXT4_INODES_PER_GROUP(sb));
+
+@@ -946,8 +946,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+ sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group;
+ flex_group = ext4_flex_group(sbi, input->group);
+- atomic_add(EXT4_B2C(sbi, input->free_blocks_count),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(EXT4_NUM_B2C(sbi, input->free_blocks_count),
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic_add(EXT4_INODES_PER_GROUP(sb),
+ &sbi->s_flex_groups[flex_group].free_inodes);
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 24ac7a2..cc386b2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2047,8 +2047,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ flex_group = ext4_flex_group(sbi, i);
+ atomic_add(ext4_free_inodes_count(sb, gdp),
+ &sbi->s_flex_groups[flex_group].free_inodes);
+- atomic_add(ext4_free_group_clusters(sb, gdp),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(ext4_free_group_clusters(sb, gdp),
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic_add(ext4_used_dirs_count(sb, gdp),
+ &sbi->s_flex_groups[flex_group].used_dirs);
+ }
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index 516eb21..fd88add 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -135,6 +135,7 @@ isofs_export_encode_fh(struct dentry *dentry,
+ len = 3;
+ fh32[0] = ei->i_iget5_block;
+ fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
++ fh16[3] = 0; /* avoid leaking uninitialized data */
+ fh32[2] = inode->i_generation;
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index d7dd774..6ac5bb1 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1016,9 +1016,12 @@ out:
+ void jbd2_journal_set_triggers(struct buffer_head *bh,
+ struct jbd2_buffer_trigger_type *type)
+ {
+- struct journal_head *jh = bh2jh(bh);
++ struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
+
++ if (WARN_ON(!jh))
++ return;
+ jh->b_triggers = type;
++ jbd2_journal_put_journal_head(jh);
+ }
+
+ void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
+@@ -1070,17 +1073,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ {
+ transaction_t *transaction = handle->h_transaction;
+ journal_t *journal = transaction->t_journal;
+- struct journal_head *jh = bh2jh(bh);
++ struct journal_head *jh;
+ int ret = 0;
+
+- jbd_debug(5, "journal_head %p\n", jh);
+- JBUFFER_TRACE(jh, "entry");
+ if (is_handle_aborted(handle))
+ goto out;
+- if (!buffer_jbd(bh)) {
++ jh = jbd2_journal_grab_journal_head(bh);
++ if (!jh) {
+ ret = -EUCLEAN;
+ goto out;
+ }
++ jbd_debug(5, "journal_head %p\n", jh);
++ JBUFFER_TRACE(jh, "entry");
+
+ jbd_lock_bh_state(bh);
+
+@@ -1171,6 +1175,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ spin_unlock(&journal->j_list_lock);
+ out_unlock_bh:
+ jbd_unlock_bh_state(bh);
++ jbd2_journal_put_journal_head(jh);
+ out:
+ JBUFFER_TRACE(jh, "exit");
+ WARN_ON(ret); /* All errors are bugs, so dump the stack */
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 7737c54..00f08b3 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -427,12 +427,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
+
+ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ {
+- struct inode * inode;
++ struct inode *inode = new_inode_pseudo(sb);
+
+- inode = iget_locked(sb, de->low_ino);
+- if (!inode)
+- return NULL;
+- if (inode->i_state & I_NEW) {
++ if (inode) {
++ inode->i_ino = de->low_ino;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ PROC_I(inode)->fd = 0;
+ PROC_I(inode)->pde = de;
+@@ -461,9 +459,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ inode->i_fop = de->proc_fops;
+ }
+ }
+- unlock_new_inode(inode);
+- } else
+- pde_put(de);
++ }
+ return inode;
+ }
+
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 4639e13..71c97fb 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1293,6 +1293,7 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
+ *lenp = 3;
+ fid->udf.block = location.logicalBlockNum;
+ fid->udf.partref = location.partitionReferenceNum;
++ fid->udf.parent_partref = 0;
+ fid->udf.generation = inode->i_generation;
+
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
+index 555c0ae..743f7a5 100644
+--- a/include/asm-generic/signal.h
++++ b/include/asm-generic/signal.h
+@@ -99,6 +99,10 @@ typedef unsigned long old_sigset_t;
+
+ #include <asm-generic/signal-defs.h>
+
++#ifdef SA_RESTORER
++#define __ARCH_HAS_SA_RESTORER
++#endif
++
+ struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index ce95a4b..8469f3f 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -484,7 +484,8 @@ struct efivars {
+ * 1) ->list - adds, removals, reads, writes
+ * 2) ops.[gs]et_variable() calls.
+ * It must not be held when creating sysfs entries or calling kmalloc.
+- * ops.get_next_variable() is only called from register_efivars(),
++ * ops.get_next_variable() is only called from register_efivars()
++ * or efivar_update_sysfs_entries(),
+ * which is protected by the BKL, so that path is safe.
+ */
+ spinlock_t lock;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index b669be6..9b9b2aa 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1186,6 +1186,12 @@ static inline void perf_event_disable(struct perf_event *event) { }
+ static inline void perf_event_task_tick(void) { }
+ #endif
+
++#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
++extern void perf_restore_debug_store(void);
++#else
++static inline void perf_restore_debug_store(void) { }
++#endif
++
+ #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+
+ /*
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 53dc7e7..da65890 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -455,7 +455,7 @@ struct sk_buff {
+ union {
+ __u32 mark;
+ __u32 dropcount;
+- __u32 avail_size;
++ __u32 reserved_tailroom;
+ };
+
+ __u16 vlan_tci;
+@@ -1332,7 +1332,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
+ */
+ static inline int skb_availroom(const struct sk_buff *skb)
+ {
+- return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
++ if (skb_is_nonlinear(skb))
++ return 0;
++
++ return skb->end - skb->tail - skb->reserved_tailroom;
+ }
+
+ /**
+diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
+index 16ff29a..b289bd2 100644
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -33,6 +33,13 @@ struct inet_frag_queue {
+
+ #define INETFRAGS_HASHSZ 64
+
++/* averaged:
++ * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
++ * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
++ * struct frag_queue))
++ */
++#define INETFRAGS_MAXDEPTH 128
++
+ struct inet_frags {
+ struct hlist_head hash[INETFRAGS_HASHSZ];
+ rwlock_t lock;
+@@ -64,6 +71,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key, unsigned int hash)
+ __releases(&f->lock);
++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
++ const char *prefix);
+
+ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+ {
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 10422ef..2124004 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -129,18 +129,16 @@ struct fib_result_nl {
+ };
+
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+-
+ #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
+-
+-#define FIB_TABLE_HASHSZ 2
+-
+ #else /* CONFIG_IP_ROUTE_MULTIPATH */
+-
+ #define FIB_RES_NH(res) ((res).fi->fib_nh[0])
++#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+
++#ifdef CONFIG_IP_MULTIPLE_TABLES
+ #define FIB_TABLE_HASHSZ 256
+-
+-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
++#else
++#define FIB_TABLE_HASHSZ 2
++#endif
+
+ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 71e1816..ea76d30 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -481,7 +481,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ if (force_default || ka->sa.sa_handler != SIG_IGN)
+ ka->sa.sa_handler = SIG_DFL;
+ ka->sa.sa_flags = 0;
+-#ifdef SA_RESTORER
++#ifdef __ARCH_HAS_SA_RESTORER
+ ka->sa.sa_restorer = NULL;
+ #endif
+ sigemptyset(&ka->sa.sa_mask);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 6c880e8..0943d2a 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2725,8 +2725,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ continue;
+ }
+
+- hlist_del(&entry->node);
+- call_rcu(&entry->rcu, ftrace_free_entry_rcu);
++ hlist_del_rcu(&entry->node);
++ call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
+ }
+ }
+ __disable_ftrace_function_probe();
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5638104..17edb14 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -652,7 +652,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ void
+ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ {
+- struct ring_buffer *buf = tr->buffer;
++ struct ring_buffer *buf;
+
+ if (trace_stop_count)
+ return;
+@@ -664,6 +664,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ }
+ arch_spin_lock(&ftrace_max_lock);
+
++ buf = tr->buffer;
+ tr->buffer = max_tr.buffer;
+ max_tr.buffer = buf;
+
+@@ -2635,11 +2636,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+ return -EINVAL;
+ }
+
+-static void set_tracer_flags(unsigned int mask, int enabled)
++/* Some tracers require overwrite to stay enabled */
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
++{
++ if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
++ return -1;
++
++ return 0;
++}
++
++int set_tracer_flag(unsigned int mask, int enabled)
+ {
+ /* do nothing if flag is already set */
+ if (!!(trace_flags & mask) == !!enabled)
+- return;
++ return 0;
++
++ /* Give the tracer a chance to approve the change */
++ if (current_trace->flag_changed)
++ if (current_trace->flag_changed(current_trace, mask, !!enabled))
++ return -EINVAL;
+
+ if (enabled)
+ trace_flags |= mask;
+@@ -2649,8 +2664,14 @@ static void set_tracer_flags(unsigned int mask, int enabled)
+ if (mask == TRACE_ITER_RECORD_CMD)
+ trace_event_enable_cmd_record(enabled);
+
+- if (mask == TRACE_ITER_OVERWRITE)
++ if (mask == TRACE_ITER_OVERWRITE) {
+ ring_buffer_change_overwrite(global_trace.buffer, enabled);
++#ifdef CONFIG_TRACER_MAX_TRACE
++ ring_buffer_change_overwrite(max_tr.buffer, enabled);
++#endif
++ }
++
++ return 0;
+ }
+
+ static ssize_t
+@@ -2660,7 +2681,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ char buf[64];
+ char *cmp;
+ int neg = 0;
+- int ret;
++ int ret = 0;
+ int i;
+
+ if (cnt >= sizeof(buf))
+@@ -2677,21 +2698,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ cmp += 2;
+ }
+
++ mutex_lock(&trace_types_lock);
++
+ for (i = 0; trace_options[i]; i++) {
+ if (strcmp(cmp, trace_options[i]) == 0) {
+- set_tracer_flags(1 << i, !neg);
++ ret = set_tracer_flag(1 << i, !neg);
+ break;
+ }
+ }
+
+ /* If no option could be set, test the specific tracer options */
+- if (!trace_options[i]) {
+- mutex_lock(&trace_types_lock);
++ if (!trace_options[i])
+ ret = set_tracer_option(current_trace, cmp, neg);
+- mutex_unlock(&trace_types_lock);
+- if (ret)
+- return ret;
+- }
++
++ mutex_unlock(&trace_types_lock);
++
++ if (ret)
++ return ret;
+
+ *ppos += cnt;
+
+@@ -3015,6 +3038,9 @@ static int tracing_set_tracer(const char *buf)
+ goto out;
+
+ trace_branch_disable();
++
++ current_trace->enabled = false;
++
+ if (current_trace && current_trace->reset)
+ current_trace->reset(tr);
+ if (current_trace && current_trace->use_max_tr) {
+@@ -3044,6 +3070,7 @@ static int tracing_set_tracer(const char *buf)
+ goto out;
+ }
+
++ current_trace->enabled = true;
+ trace_branch_enable(tr);
+ out:
+ mutex_unlock(&trace_types_lock);
+@@ -4378,7 +4405,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+- set_tracer_flags(1 << index, val);
++
++ mutex_lock(&trace_types_lock);
++ ret = set_tracer_flag(1 << index, val);
++ mutex_unlock(&trace_types_lock);
++
++ if (ret < 0)
++ return ret;
+
+ *ppos += cnt;
+
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 092e1f8..c3c3f6b 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -271,10 +271,14 @@ struct tracer {
+ enum print_line_t (*print_line)(struct trace_iterator *iter);
+ /* If you handled the flag setting, return 0 */
+ int (*set_flag)(u32 old_flags, u32 bit, int set);
++ /* Return 0 if OK with change, else return non-zero */
++ int (*flag_changed)(struct tracer *tracer,
++ u32 mask, int set);
+ struct tracer *next;
+ struct tracer_flags *flags;
+ int print_max;
+ int use_max_tr;
++ bool enabled;
+ };
+
+
+@@ -815,6 +819,9 @@ extern struct list_head ftrace_events;
+ extern const char *__start___trace_bprintk_fmt[];
+ extern const char *__stop___trace_bprintk_fmt[];
+
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
++int set_tracer_flag(unsigned int mask, int enabled);
++
+ #undef FTRACE_ENTRY
+ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
+ extern struct ftrace_event_call \
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 20dad0d..1626e1a 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -32,7 +32,7 @@ enum {
+
+ static int trace_type __read_mostly;
+
+-static int save_lat_flag;
++static int save_flags;
+
+ static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+ static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+@@ -546,8 +546,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
+
+ static void __irqsoff_tracer_init(struct trace_array *tr)
+ {
+- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+- trace_flags |= TRACE_ITER_LATENCY_FMT;
++ save_flags = trace_flags;
++
++ /* non overwrite screws up the latency tracers */
++ set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+
+ tracing_max_latency = 0;
+ irqsoff_trace = tr;
+@@ -561,10 +564,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
+
+ static void irqsoff_tracer_reset(struct trace_array *tr)
+ {
++ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ stop_irqsoff_tracer(tr, is_graph());
+
+- if (!save_lat_flag)
+- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++ set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+
+ static void irqsoff_tracer_start(struct trace_array *tr)
+@@ -597,6 +603,7 @@ static struct tracer irqsoff_tracer __read_mostly =
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_irqsoff,
+ #endif
+@@ -630,6 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_preemptoff,
+ #endif
+@@ -665,6 +673,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_preemptirqsoff,
+ #endif
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index e4a70c0..6857e0c 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
+ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+ static void wakeup_graph_return(struct ftrace_graph_ret *trace);
+
+-static int save_lat_flag;
++static int save_flags;
+
+ #define TRACE_DISPLAY_GRAPH 1
+
+@@ -528,8 +528,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
+
+ static int __wakeup_tracer_init(struct trace_array *tr)
+ {
+- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+- trace_flags |= TRACE_ITER_LATENCY_FMT;
++ save_flags = trace_flags;
++
++ /* non overwrite screws up the latency tracers */
++ set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+
+ tracing_max_latency = 0;
+ wakeup_trace = tr;
+@@ -551,12 +554,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
+
+ static void wakeup_tracer_reset(struct trace_array *tr)
+ {
++ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ stop_wakeup_tracer(tr);
+ /* make sure we put back any tasks we are tracing */
+ wakeup_reset(tr);
+
+- if (!save_lat_flag)
+- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++ set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+
+ static void wakeup_tracer_start(struct trace_array *tr)
+@@ -582,6 +588,7 @@ static struct tracer wakeup_tracer __read_mostly =
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+ #endif
+@@ -603,6 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+ #endif
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d6c0fdf..4c7d42a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2092,8 +2092,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
+ /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+ unsigned long hugetlb_total_pages(void)
+ {
+- struct hstate *h = &default_hstate;
+- return h->nr_huge_pages * pages_per_huge_page(h);
++ struct hstate *h;
++ unsigned long nr_total_pages = 0;
++
++ for_each_hstate(h)
++ nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
++ return nr_total_pages;
+ }
+
+ static int hugetlb_acct_memory(struct hstate *h, long delta)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2aac4ec..b23bbbf 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3299,6 +3299,7 @@ ncls:
+ }
+ switch (rx_handler(&skb)) {
+ case RX_HANDLER_CONSUMED:
++ ret = NET_RX_SUCCESS;
+ goto out;
+ case RX_HANDLER_ANOTHER:
+ goto another_round;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5229c7f..3b5e680 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -973,6 +973,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ * report anything.
+ */
+ ivi.spoofchk = -1;
++ memset(ivi.mac, 0, sizeof(ivi.mac));
+ if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
+ break;
+ vf_mac.vf =
+@@ -2041,7 +2042,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
+
+ while (RTA_OK(attr, attrlen)) {
+- unsigned flavor = attr->rta_type;
++ unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
+ if (flavor) {
+ if (flavor > rta_max[sz_idx])
+ return -EINVAL;
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
+index d860530..2f9517d 100644
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -336,6 +336,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_GPERM_HWADDR;
+
++ memset(perm_addr, 0, sizeof(perm_addr));
+ netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
+
+ ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
+@@ -1238,6 +1239,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->ieee_getets) {
+ struct ieee_ets ets;
++ memset(&ets, 0, sizeof(ets));
+ err = ops->ieee_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+@@ -1245,6 +1247,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->ieee_getpfc) {
+ struct ieee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->ieee_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+@@ -1277,6 +1280,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+ /* get peer info if available */
+ if (ops->ieee_peer_getets) {
+ struct ieee_ets ets;
++ memset(&ets, 0, sizeof(ets));
+ err = ops->ieee_peer_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+@@ -1284,6 +1288,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->ieee_peer_getpfc) {
+ struct ieee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->ieee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+@@ -1463,6 +1468,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
+ /* peer info if available */
+ if (ops->cee_peer_getpg) {
+ struct cee_pg pg;
++ memset(&pg, 0, sizeof(pg));
+ err = ops->cee_peer_getpg(netdev, &pg);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+@@ -1470,6 +1476,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->cee_peer_getpfc) {
+ struct cee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->cee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
+index 5d8cf80..8b0866f 100644
+--- a/net/ieee802154/6lowpan.h
++++ b/net/ieee802154/6lowpan.h
+@@ -87,7 +87,7 @@
+ (memcmp(addr1, addr2, length >> 3) == 0)
+
+ /* local link, i.e. FE80::/10 */
+-#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
++#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
+
+ /*
+ * check whether we can compress the IID to 16 bits,
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 5ff2a51..210b710 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -21,6 +21,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+
++#include <net/sock.h>
+ #include <net/inet_frag.h>
+
+ static void inet_frag_secret_rebuild(unsigned long dummy)
+@@ -271,6 +272,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ {
+ struct inet_frag_queue *q;
+ struct hlist_node *n;
++ int depth = 0;
+
+ hlist_for_each_entry(q, n, &f->hash[hash], list) {
+ if (q->net == nf && f->match(q, key)) {
+@@ -278,9 +280,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ read_unlock(&f->lock);
+ return q;
+ }
++ depth++;
+ }
+ read_unlock(&f->lock);
+
+- return inet_frag_create(nf, f, key);
++ if (depth <= INETFRAGS_MAXDEPTH)
++ return inet_frag_create(nf, f, key);
++ else
++ return ERR_PTR(-ENOBUFS);
+ }
+ EXPORT_SYMBOL(inet_frag_find);
++
++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
++ const char *prefix)
++{
++ static const char msg[] = "inet_frag_find: Fragment hash bucket"
++ " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
++ ". Dropping fragment.\n";
++
++ if (PTR_ERR(q) == -ENOBUFS)
++ LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
++}
++EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index a4e7131..b2cfe83 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -20,6 +20,8 @@
+ * Patrick McHardy : LRU queue of frag heads for evictor.
+ */
+
++#define pr_fmt(fmt) "IPv4: " fmt
++
+ #include <linux/compiler.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -293,14 +295,12 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+ hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
+
+ q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
+- if (q == NULL)
+- goto out_nomem;
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
++ return NULL;
++ }
+
+ return container_of(q, struct ipq, q);
+-
+-out_nomem:
+- LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
+- return NULL;
+ }
+
+ /* Is the fragment too far ahead to be part of ipq? */
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 42dd1a9..40eb4fc 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -358,7 +358,6 @@ int ip_options_compile(struct net *net,
+ }
+ switch (optptr[3]&0xF) {
+ case IPOPT_TS_TSONLY:
+- opt->ts = optptr - iph;
+ if (skb)
+ timeptr = &optptr[optptr[2]-1];
+ opt->ts_needtime = 1;
+@@ -369,7 +368,6 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 2;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ if (rt) {
+ memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ timeptr = &optptr[optptr[2]+3];
+@@ -383,7 +381,6 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 2;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ {
+ __be32 addr;
+ memcpy(&addr, &optptr[optptr[2]-1], 4);
+@@ -416,12 +413,12 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 3;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ if (skb) {
+ optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
+ opt->is_changed = 1;
+ }
+ }
++ opt->ts = optptr - iph;
+ break;
+ case IPOPT_RA:
+ if (optlen < 4) {
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 52edbb8..fe381c2 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -704,7 +704,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+ * Make sure that we have exactly size bytes
+ * available to the caller, no more, no less.
+ */
+- skb->avail_size = size;
++ skb->reserved_tailroom = skb->end - skb->tail - size;
+ return skb;
+ }
+ __kfree_skb(skb);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e865ed1..1b1f7af 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5494,6 +5494,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ if (tcp_checksum_complete_user(sk, skb))
+ goto csum_error;
+
++ if ((int)skb->truesize > sk->sk_forward_alloc)
++ goto step5;
++
+ /* Predicted packet is in window by definition.
+ * seq == rcv_nxt and rcv_wup <= rcv_nxt.
+ * Hence, check seq<=rcv_wup reduces to:
+@@ -5505,9 +5508,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+- if ((int)skb->truesize > sk->sk_forward_alloc)
+- goto step5;
+-
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
+
+ /* Bulk data transfer: receiver */
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 921cbac..9bb7400 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1096,7 +1096,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
+ eat = min_t(int, len, skb_headlen(skb));
+ if (eat) {
+ __skb_pull(skb, eat);
+- skb->avail_size -= eat;
+ len -= eat;
+ if (!len)
+ return;
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index a46c64e..f8d24dd 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -265,7 +265,8 @@ int ip6_mc_input(struct sk_buff *skb)
+ * IPv6 multicast router mode is now supported ;)
+ */
+ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
+- !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
++ !(ipv6_addr_type(&hdr->daddr) &
++ (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
+ likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
+ /*
+ * Okay, we try to forward - split and duplicate
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 38f00b0..52e2f65 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -14,6 +14,8 @@
+ * 2 of the License, or (at your option) any later version.
+ */
+
++#define pr_fmt(fmt) "IPv6-nf: " fmt
++
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+@@ -176,13 +178,12 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+
+ q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
+ local_bh_enable();
+- if (q == NULL)
+- goto oom;
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
++ return NULL;
++ }
+
+ return container_of(q, struct nf_ct_frag6_queue, q);
+-
+-oom:
+- return NULL;
+ }
+
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index dfb164e..2b0a4ca 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -26,6 +26,9 @@
+ * YOSHIFUJI,H. @USAGI Always remove fragment header to
+ * calculate ICV correctly.
+ */
++
++#define pr_fmt(fmt) "IPv6: " fmt
++
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+@@ -240,9 +243,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
+ hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+
+ q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
+- if (q == NULL)
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+-
++ }
+ return container_of(q, struct frag_queue, q);
+ }
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 791c1fa..18ea73c 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1920,7 +1920,8 @@ void rt6_purge_dflt_routers(struct net *net)
+ restart:
+ read_lock_bh(&table->tb6_lock);
+ for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
+- if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
++ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
++ (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ ip6_del_rt(rt);
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index b1bd16f..6f60175 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -360,6 +360,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(ps->tunnel_sock);
++ sock_put(sk);
+
+ return error;
+
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index d463f5a..23267b3 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ struct netlbl_unlhsh_walk_arg cb_arg;
+ u32 skip_bkt = cb->args[0];
+ u32 skip_chain = cb->args[1];
+- u32 skip_addr4 = cb->args[2];
+- u32 skip_addr6 = cb->args[3];
+ u32 iter_bkt;
+ u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
+ struct netlbl_unlhsh_iface *iface;
+@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ continue;
+ netlbl_af4list_foreach_rcu(addr4,
+ &iface->addr4_list) {
+- if (iter_addr4++ < skip_addr4)
++ if (iter_addr4++ < cb->args[2])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(
+ NLBL_UNLABEL_C_STATICLIST,
+@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ netlbl_af6list_foreach_rcu(addr6,
+ &iface->addr6_list) {
+- if (iter_addr6++ < skip_addr6)
++ if (iter_addr6++ < cb->args[3])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(
+ NLBL_UNLABEL_C_STATICLIST,
+@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+
+ unlabel_staticlist_return:
+ rcu_read_unlock();
+- cb->args[0] = skip_bkt;
+- cb->args[1] = skip_chain;
+- cb->args[2] = skip_addr4;
+- cb->args[3] = skip_addr6;
++ cb->args[0] = iter_bkt;
++ cb->args[1] = iter_chain;
++ cb->args[2] = iter_addr4;
++ cb->args[3] = iter_addr6;
+ return skb->len;
+ }
+
+@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+ {
+ struct netlbl_unlhsh_walk_arg cb_arg;
+ struct netlbl_unlhsh_iface *iface;
+- u32 skip_addr4 = cb->args[0];
+- u32 skip_addr6 = cb->args[1];
+- u32 iter_addr4 = 0;
++ u32 iter_addr4 = 0, iter_addr6 = 0;
+ struct netlbl_af4list *addr4;
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+- u32 iter_addr6 = 0;
+ struct netlbl_af6list *addr6;
+ #endif
+
+@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+ goto unlabel_staticlistdef_return;
+
+ netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) {
+- if (iter_addr4++ < skip_addr4)
++ if (iter_addr4++ < cb->args[0])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
+ iface,
+@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+ }
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
+- if (iter_addr6++ < skip_addr6)
++ if (iter_addr6++ < cb->args[1])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
+ iface,
+@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+
+ unlabel_staticlistdef_return:
+ rcu_read_unlock();
+- cb->args[0] = skip_addr4;
+- cb->args[1] = skip_addr6;
++ cb->args[0] = iter_addr4;
++ cb->args[1] = iter_addr6;
+ return skb->len;
+ }
+
+diff --git a/net/rds/message.c b/net/rds/message.c
+index f0a4658..aff589c 100644
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -197,6 +197,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
+ {
+ struct rds_message *rm;
+
++ if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
++ return NULL;
++
+ rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
+ if (!rm)
+ goto out;
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index acd2edb..3c04692 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1050,7 +1050,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
+ transports) {
+
+ if (transport == active)
+- break;
++ continue;
+ list_for_each_entry(chunk, &transport->transmitted,
+ transmitted_list) {
+ if (key == chunk->subh.data_hdr->tsn) {
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 891f5db..cb1c430 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2044,7 +2044,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
+ }
+
+ /* Delete the tempory new association. */
+- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
++ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ /* Restore association pointer to provide SCTP command interpeter
+diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
+index 48665ec..8ab2951 100644
+--- a/security/selinux/xfrm.c
++++ b/security/selinux/xfrm.c
+@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+
+ if (old_ctx) {
+ new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index b0187e7..7747d26 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2771,7 +2771,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
+ if (val & AC_DIG1_PROFESSIONAL)
+ sbits |= IEC958_AES0_PROFESSIONAL;
+ if (sbits & IEC958_AES0_PROFESSIONAL) {
+- if (sbits & AC_DIG1_EMPHASIS)
++ if (val & AC_DIG1_EMPHASIS)
+ sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
+ } else {
+ if (val & AC_DIG1_EMPHASIS)
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index c9269ce..984b5b1 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1236,7 +1236,7 @@ static int patch_cxt5045(struct hda_codec *codec)
+ }
+
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+
+ return 0;
+ }
+@@ -2027,7 +2027,7 @@ static int patch_cxt5051(struct hda_codec *codec)
+ }
+
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+
+ conexant_init_jacks(codec);
+ if (spec->auto_mic & AUTO_MIC_PORTB)
+@@ -3225,7 +3225,7 @@ static int patch_cxt5066(struct hda_codec *codec)
+ }
+
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+
+ return 0;
+ }
+@@ -4556,7 +4556,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ spec->capture_stream = &cx_auto_pcm_analog_capture;
+ codec->patch_ops = cx_auto_patch_ops;
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+ return 0;
+ }
+
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 9121dee..f4540bf 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -711,8 +711,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
+ case UAC2_CLOCK_SELECTOR: {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve the channel info */
+- if (check_input_term(state, d->baSourceID[0], term) < 0)
+- return -ENODEV;
++ err = check_input_term(state, d->baSourceID[0], term);
++ if (err < 0)
++ return err;
+ term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->id = id;
+ term->name = uac_selector_unit_iSelector(d);
+@@ -1263,8 +1264,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
+ return err;
+
+ /* determine the input source type and name */
+- if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
+- return -EINVAL;
++ err = check_input_term(state, hdr->bSourceID, &iterm);
++ if (err < 0)
++ return err;
+
+ master_bits = snd_usb_combine_bytes(bmaControls, csize);
+ /* master configuration quirks */
+@@ -2018,7 +2020,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ state.oterm.type = le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+- if (err < 0)
++ if (err < 0 && err != -EINVAL)
+ return err;
+ } else { /* UAC_VERSION_2 */
+ struct uac2_output_terminal_descriptor *desc = p;
+@@ -2030,12 +2032,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ state.oterm.type = le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+- if (err < 0)
++ if (err < 0 && err != -EINVAL)
+ return err;
+
+ /* for UAC2, use the same approach to also add the clock selectors */
+ err = parse_audio_unit(&state, desc->bCSourceID);
+- if (err < 0)
++ if (err < 0 && err != -EINVAL)
+ return err;
+ }
+ }
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 0961d88..5e19410 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -393,13 +393,19 @@ int main(void)
+ len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+ addr_p, &addr_l);
+
+- if (len < 0 || addr.nl_pid) {
++ if (len < 0) {
+ syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+ addr.nl_pid, errno, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
++ if (addr.nl_pid) {
++ syslog(LOG_WARNING, "Received packet from untrusted pid:%u",
++ addr.nl_pid);
++ continue;
++ }
++
+ incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
+ incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
+
+diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
+index bf54c48..6c164dc 100644
+--- a/tools/perf/util/trace-event-parse.c
++++ b/tools/perf/util/trace-event-parse.c
+@@ -1582,8 +1582,6 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+- while (type == EVENT_OP)
+- type = process_op(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
diff --git a/3.2.54/1042_linux-3.2.43.patch b/3.2.54/1042_linux-3.2.43.patch
new file mode 100644
index 0000000..a3f878b
--- /dev/null
+++ b/3.2.54/1042_linux-3.2.43.patch
@@ -0,0 +1,2442 @@
+diff --git a/Makefile b/Makefile
+index d44f009..59130db 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
+index 5f85d8b..a09b6c3 100644
+--- a/arch/tile/kernel/setup.c
++++ b/arch/tile/kernel/setup.c
+@@ -914,7 +914,7 @@ void __cpuinit setup_cpu(int boot)
+ #ifdef CONFIG_BLK_DEV_INITRD
+
+ static int __initdata set_initramfs_file;
+-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
++static char __initdata initramfs_file[128] = "initramfs";
+
+ static int __init setup_initramfs_file(char *str)
+ {
+@@ -928,9 +928,9 @@ static int __init setup_initramfs_file(char *str)
+ early_param("initramfs_file", setup_initramfs_file);
+
+ /*
+- * We look for an additional "initramfs.cpio.gz" file in the hvfs.
+- * If there is one, we allocate some memory for it and it will be
+- * unpacked to the initramfs after any built-in initramfs_data.
++ * We look for a file called "initramfs" in the hvfs. If there is one, we
++ * allocate some memory for it and it will be unpacked to the initramfs.
++ * If it's compressed, the initd code will uncompress it first.
+ */
+ static void __init load_hv_initrd(void)
+ {
+@@ -940,10 +940,16 @@ static void __init load_hv_initrd(void)
+
+ fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
+ if (fd == HV_ENOENT) {
+- if (set_initramfs_file)
++ if (set_initramfs_file) {
+ pr_warning("No such hvfs initramfs file '%s'\n",
+ initramfs_file);
+- return;
++ return;
++ } else {
++ /* Try old backwards-compatible name. */
++ fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
++ if (fd == HV_ENOENT)
++ return;
++ }
+ }
+ BUG_ON(fd < 0);
+ stat = hv_fs_fstat(fd);
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index 887f68f..db30542 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -30,8 +30,9 @@ new_skb(ulong len)
+ {
+ struct sk_buff *skb;
+
+- skb = alloc_skb(len, GFP_ATOMIC);
++ skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
+ if (skb) {
++ skb_reserve(skb, MAX_HEADER);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->protocol = __constant_htons(ETH_P_AOE);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 8c6787a..a365562 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -907,6 +907,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
++
++ /* Grab the block_device to prevent its destruction after we
++ * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
++ */
++ bdgrab(bdev);
+ return 0;
+
+ out_clr:
+@@ -1003,8 +1008,10 @@ static int loop_clr_fd(struct loop_device *lo)
+ memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+ memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+ memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+- if (bdev)
++ if (bdev) {
++ bdput(bdev);
+ invalidate_bdev(bdev);
++ }
+ set_capacity(lo->lo_disk, 0);
+ loop_sysfs_exit(lo);
+ if (bdev) {
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 15ec4db..85fdd4b 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -758,13 +758,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ }
+ }
+
+- /*
+- * We set it one so that the last submit_bio does not have to call
+- * atomic_inc.
+- */
+ atomic_set(&pending_req->pendcnt, nbio);
+-
+- /* Get a reference count for the disk queue and start sending I/O */
+ blk_start_plug(&plug);
+
+ for (i = 0; i < nbio; i++)
+@@ -792,6 +786,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ fail_put_bio:
+ for (i = 0; i < nbio; i++)
+ bio_put(biolist[i]);
++ atomic_set(&pending_req->pendcnt, 1);
+ __end_block_io_op(pending_req, -EINVAL);
+ msleep(1); /* back off a bit */
+ return -EIO;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 574ce73..853fdf8 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -72,14 +72,23 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x03F0, 0x311D) },
+
+ /* Atheros AR3012 with sflash firmware*/
++ { USB_DEVICE(0x0CF3, 0x0036) },
+ { USB_DEVICE(0x0CF3, 0x3004) },
++ { USB_DEVICE(0x0CF3, 0x3008) },
+ { USB_DEVICE(0x0CF3, 0x311D) },
++ { USB_DEVICE(0x0CF3, 0x817a) },
+ { USB_DEVICE(0x13d3, 0x3375) },
++ { USB_DEVICE(0x04CA, 0x3004) },
+ { USB_DEVICE(0x04CA, 0x3005) },
++ { USB_DEVICE(0x04CA, 0x3006) },
++ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0489, 0xe057) },
++ { USB_DEVICE(0x13d3, 0x3393) },
++ { USB_DEVICE(0x0489, 0xe04e) },
++ { USB_DEVICE(0x0489, 0xe056) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+@@ -99,14 +108,23 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+ static struct usb_device_id ath3k_blist_tbl[] = {
+
+ /* Atheros AR3012 with sflash firmware*/
++ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index c5e44a3..6b784b7 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -139,14 +139,23 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros 3012 with sflash firmware */
++ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 7795d1e..d5ae736 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -131,7 +131,8 @@ struct ports_device {
+ spinlock_t ports_lock;
+
+ /* To protect the vq operations for the control channel */
+- spinlock_t cvq_lock;
++ spinlock_t c_ivq_lock;
++ spinlock_t c_ovq_lock;
+
+ /* The current config space is stored here */
+ struct virtio_console_config config;
+@@ -457,11 +458,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ vq = portdev->c_ovq;
+
+ sg_init_one(sg, &cpkt, sizeof(cpkt));
++
++ spin_lock(&portdev->c_ovq_lock);
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
+ cpu_relax();
+ }
++ spin_unlock(&portdev->c_ovq_lock);
+ return 0;
+ }
+
+@@ -1466,23 +1470,23 @@ static void control_work_handler(struct work_struct *work)
+ portdev = container_of(work, struct ports_device, control_work);
+ vq = portdev->c_ivq;
+
+- spin_lock(&portdev->cvq_lock);
++ spin_lock(&portdev->c_ivq_lock);
+ while ((buf = virtqueue_get_buf(vq, &len))) {
+- spin_unlock(&portdev->cvq_lock);
++ spin_unlock(&portdev->c_ivq_lock);
+
+ buf->len = len;
+ buf->offset = 0;
+
+ handle_control_message(portdev, buf);
+
+- spin_lock(&portdev->cvq_lock);
++ spin_lock(&portdev->c_ivq_lock);
+ if (add_inbuf(portdev->c_ivq, buf) < 0) {
+ dev_warn(&portdev->vdev->dev,
+ "Error adding buffer to queue\n");
+ free_buf(buf);
+ }
+ }
+- spin_unlock(&portdev->cvq_lock);
++ spin_unlock(&portdev->c_ivq_lock);
+ }
+
+ static void out_intr(struct virtqueue *vq)
+@@ -1721,10 +1725,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
+ if (multiport) {
+ unsigned int nr_added_bufs;
+
+- spin_lock_init(&portdev->cvq_lock);
++ spin_lock_init(&portdev->c_ivq_lock);
++ spin_lock_init(&portdev->c_ovq_lock);
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+
+- nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
++ nr_added_bufs = fill_queue(portdev->c_ivq,
++ &portdev->c_ivq_lock);
+ if (!nr_added_bufs) {
+ dev_err(&vdev->dev,
+ "Error allocating buffers for control queue\n");
+diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
+index cdae207..d4cd56a 100644
+--- a/drivers/eisa/pci_eisa.c
++++ b/drivers/eisa/pci_eisa.c
+@@ -19,8 +19,7 @@
+ /* There is only *one* pci_eisa device per machine, right ? */
+ static struct eisa_root_device pci_eisa_root;
+
+-static int __init pci_eisa_init(struct pci_dev *pdev,
+- const struct pci_device_id *ent)
++static int __init pci_eisa_init(struct pci_dev *pdev)
+ {
+ int rc;
+
+@@ -45,22 +44,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
+ return 0;
+ }
+
+-static struct pci_device_id pci_eisa_pci_tbl[] = {
+- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+- PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
+- { 0, }
+-};
++/*
++ * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init().
++ * Otherwise pnp resource will get enabled early and could prevent eisa
++ * to be initialized.
++ * Also need to make sure pci_eisa_init_early() is called after
++ * x86/pci_subsys_init().
++ * So need to use subsys_initcall_sync with it.
++ */
++static int __init pci_eisa_init_early(void)
++{
++ struct pci_dev *dev = NULL;
++ int ret;
+
+-static struct pci_driver __refdata pci_eisa_driver = {
+- .name = "pci_eisa",
+- .id_table = pci_eisa_pci_tbl,
+- .probe = pci_eisa_init,
+-};
++ for_each_pci_dev(dev)
++ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) {
++ ret = pci_eisa_init(dev);
++ if (ret)
++ return ret;
++ }
+
+-static int __init pci_eisa_init_module (void)
+-{
+- return pci_register_driver (&pci_eisa_driver);
++ return 0;
+ }
+-
+-device_initcall(pci_eisa_init_module);
+-MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
++subsys_initcall_sync(pci_eisa_init_early);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e665bdf..08075f2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -539,6 +539,9 @@
+ #define USB_VENDOR_ID_MONTEREY 0x0566
+ #define USB_DEVICE_ID_GENIUS_KB29E 0x3004
+
++#define USB_VENDOR_ID_MSI 0x1770
++#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
++
+ #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
+ #define USB_DEVICE_ID_N_S_HARMONY 0xc359
+
+@@ -621,6 +624,9 @@
+ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+ #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
+
++#define USB_VENDOR_ID_REALTEK 0x0bda
++#define USB_DEVICE_ID_REALTEK_READER 0x0152
++
+ #define USB_VENDOR_ID_ROCCAT 0x1e7d
+ #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
+ #define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 3899989..259b9f4 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -47,9 +47,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ rdesc[559] = 0x45;
+ }
+ /* the same as above (s/usage/physical/) */
+- if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
+- !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
+- &rdesc[94], 4)) {
++ if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
++ rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
++ rdesc[97] == 0xff) {
+ rdesc[94] = 0x35;
+ rdesc[96] = 0x45;
+ }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index e26eddf..96a1e0f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -71,11 +71,13 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 014504d..3767853 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -755,9 +755,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
+ if (++priv->tx_outstanding == ipoib_sendq_size) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+- ipoib_warn(priv, "request notify on send CQ failed\n");
+ netif_stop_queue(dev);
++ rc = ib_req_notify_cq(priv->send_cq,
++ IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
++ if (rc < 0)
++ ipoib_warn(priv, "request notify on send CQ failed\n");
++ else if (rc)
++ ipoib_send_comp_handler(priv->send_cq, dev);
+ }
+ }
+ }
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 0f074e0..07cb1a6 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1874,16 +1874,16 @@ static int device_change_notifier(struct notifier_block *nb,
+
+ /* allocate a protection domain if a device is added */
+ dma_domain = find_protection_domain(devid);
+- if (dma_domain)
+- goto out;
+- dma_domain = dma_ops_domain_alloc();
+- if (!dma_domain)
+- goto out;
+- dma_domain->target_dev = devid;
+-
+- spin_lock_irqsave(&iommu_pd_list_lock, flags);
+- list_add_tail(&dma_domain->list, &iommu_pd_list);
+- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++ if (!dma_domain) {
++ dma_domain = dma_ops_domain_alloc();
++ if (!dma_domain)
++ goto out;
++ dma_domain->target_dev = devid;
++
++ spin_lock_irqsave(&iommu_pd_list_lock, flags);
++ list_add_tail(&dma_domain->list, &iommu_pd_list);
++ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++ }
+
+ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 63e3c47..fc07f90 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1934,12 +1934,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ return -EINVAL;
+ }
+
++ write_unlock_bh(&bond->lock);
+ /* unregister rx_handler early so bond_handle_frame wouldn't be called
+ * for this slave anymore.
+ */
+ netdev_rx_handler_unregister(slave_dev);
+- write_unlock_bh(&bond->lock);
+- synchronize_net();
+ write_lock_bh(&bond->lock);
+
+ if (!bond->params.fail_over_mac) {
+@@ -3422,6 +3421,28 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+
+ /*-------------------------- Device entry points ----------------------------*/
+
++static void bond_work_init_all(struct bonding *bond)
++{
++ INIT_DELAYED_WORK(&bond->mcast_work,
++ bond_resend_igmp_join_requests_delayed);
++ INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
++ INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
++ INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
++ else
++ INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
++ INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
++}
++
++static void bond_work_cancel_all(struct bonding *bond)
++{
++ cancel_delayed_work_sync(&bond->mii_work);
++ cancel_delayed_work_sync(&bond->arp_work);
++ cancel_delayed_work_sync(&bond->alb_work);
++ cancel_delayed_work_sync(&bond->ad_work);
++ cancel_delayed_work_sync(&bond->mcast_work);
++}
++
+ static int bond_open(struct net_device *bond_dev)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+@@ -3444,41 +3465,27 @@ static int bond_open(struct net_device *bond_dev)
+ }
+ read_unlock(&bond->lock);
+
+- INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
++ bond_work_init_all(bond);
+
+ if (bond_is_lb(bond)) {
+ /* bond_alb_initialize must be called before the timer
+ * is started.
+ */
+- if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
+- /* something went wrong - fail the open operation */
++ if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+ return -ENOMEM;
+- }
+-
+- INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
+ queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ }
+
+- if (bond->params.miimon) { /* link check interval, in milliseconds. */
+- INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++ if (bond->params.miimon) /* link check interval, in milliseconds. */
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+- }
+
+ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
+- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_activebackup_arp_mon);
+- else
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_loadbalance_arp_mon);
+-
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+- INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
+ queue_delayed_work(bond->wq, &bond->ad_work, 0);
+ /* register to receive LACPDUs */
+ bond->recv_probe = bond_3ad_lacpdu_recv;
+@@ -3493,34 +3500,10 @@ static int bond_close(struct net_device *bond_dev)
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ write_lock_bh(&bond->lock);
+-
+ bond->send_peer_notif = 0;
+-
+ write_unlock_bh(&bond->lock);
+
+- if (bond->params.miimon) { /* link check interval, in milliseconds. */
+- cancel_delayed_work_sync(&bond->mii_work);
+- }
+-
+- if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
+- cancel_delayed_work_sync(&bond->arp_work);
+- }
+-
+- switch (bond->params.mode) {
+- case BOND_MODE_8023AD:
+- cancel_delayed_work_sync(&bond->ad_work);
+- break;
+- case BOND_MODE_TLB:
+- case BOND_MODE_ALB:
+- cancel_delayed_work_sync(&bond->alb_work);
+- break;
+- default:
+- break;
+- }
+-
+- if (delayed_work_pending(&bond->mcast_work))
+- cancel_delayed_work_sync(&bond->mcast_work);
+-
++ bond_work_cancel_all(bond);
+ if (bond_is_lb(bond)) {
+ /* Must be called only after all
+ * slaves have been released
+@@ -4364,26 +4347,6 @@ static void bond_setup(struct net_device *bond_dev)
+ bond_dev->features |= bond_dev->hw_features;
+ }
+
+-static void bond_work_cancel_all(struct bonding *bond)
+-{
+- if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
+- cancel_delayed_work_sync(&bond->mii_work);
+-
+- if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
+- cancel_delayed_work_sync(&bond->arp_work);
+-
+- if (bond->params.mode == BOND_MODE_ALB &&
+- delayed_work_pending(&bond->alb_work))
+- cancel_delayed_work_sync(&bond->alb_work);
+-
+- if (bond->params.mode == BOND_MODE_8023AD &&
+- delayed_work_pending(&bond->ad_work))
+- cancel_delayed_work_sync(&bond->ad_work);
+-
+- if (delayed_work_pending(&bond->mcast_work))
+- cancel_delayed_work_sync(&bond->mcast_work);
+-}
+-
+ /*
+ * Destroy a bonding device.
+ * Must be under rtnl_lock when this function is called.
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index a03fde9..8ed48c2 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -184,6 +184,11 @@ int bond_create_slave_symlinks(struct net_device *master,
+ sprintf(linkname, "slave_%s", slave->name);
+ ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
+ linkname);
++
++ /* free the master link created earlier in case of error */
++ if (ret)
++ sysfs_remove_link(&(slave->dev.kobj), "master");
++
+ return ret;
+
+ }
+@@ -514,6 +519,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no arp_interval value specified.\n",
+ bond->dev->name);
+@@ -521,7 +528,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ goto out;
+ }
+ if (new_value < 0) {
+- pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
++ pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
+ bond->dev->name, new_value, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+@@ -536,18 +543,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ bond->dev->name, new_value);
+ bond->params.arp_interval = new_value;
+- if (bond->params.miimon) {
+- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+- bond->dev->name, bond->dev->name);
+- bond->params.miimon = 0;
+- if (delayed_work_pending(&bond->mii_work)) {
+- cancel_delayed_work(&bond->mii_work);
+- flush_workqueue(bond->wq);
++ if (new_value) {
++ if (bond->params.miimon) {
++ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
++ bond->dev->name, bond->dev->name);
++ bond->params.miimon = 0;
+ }
+- }
+- if (!bond->params.arp_targets[0]) {
+- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+- bond->dev->name);
++ if (!bond->params.arp_targets[0])
++ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
++ bond->dev->name);
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+@@ -555,19 +559,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ * timer will get fired off when the open function
+ * is called.
+ */
+- if (!delayed_work_pending(&bond->arp_work)) {
+- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_activebackup_arp_mon);
+- else
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_loadbalance_arp_mon);
+-
++ if (!new_value) {
++ cancel_delayed_work_sync(&bond->arp_work);
++ } else {
++ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
+ }
+-
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
+@@ -707,7 +707,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ }
+ if (new_value < 0) {
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+- bond->dev->name, new_value, 1, INT_MAX);
++ bond->dev->name, new_value, 0, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+ } else {
+@@ -762,8 +762,8 @@ static ssize_t bonding_store_updelay(struct device *d,
+ goto out;
+ }
+ if (new_value < 0) {
+- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+- bond->dev->name, new_value, 1, INT_MAX);
++ pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
++ bond->dev->name, new_value, 0, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+ } else {
+@@ -963,6 +963,8 @@ static ssize_t bonding_store_miimon(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no miimon value specified.\n",
+ bond->dev->name);
+@@ -971,50 +973,43 @@ static ssize_t bonding_store_miimon(struct device *d,
+ }
+ if (new_value < 0) {
+ pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+- bond->dev->name, new_value, 1, INT_MAX);
++ bond->dev->name, new_value, 0, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+- } else {
+- pr_info("%s: Setting MII monitoring interval to %d.\n",
+- bond->dev->name, new_value);
+- bond->params.miimon = new_value;
+- if (bond->params.updelay)
+- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+- bond->dev->name,
+- bond->params.updelay * bond->params.miimon);
+- if (bond->params.downdelay)
+- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+- bond->dev->name,
+- bond->params.downdelay * bond->params.miimon);
+- if (bond->params.arp_interval) {
+- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+- bond->dev->name);
+- bond->params.arp_interval = 0;
+- if (bond->params.arp_validate) {
+- bond->params.arp_validate =
+- BOND_ARP_VALIDATE_NONE;
+- }
+- if (delayed_work_pending(&bond->arp_work)) {
+- cancel_delayed_work(&bond->arp_work);
+- flush_workqueue(bond->wq);
+- }
+- }
+-
+- if (bond->dev->flags & IFF_UP) {
+- /* If the interface is up, we may need to fire off
+- * the MII timer. If the interface is down, the
+- * timer will get fired off when the open function
+- * is called.
+- */
+- if (!delayed_work_pending(&bond->mii_work)) {
+- INIT_DELAYED_WORK(&bond->mii_work,
+- bond_mii_monitor);
+- queue_delayed_work(bond->wq,
+- &bond->mii_work, 0);
+- }
++ }
++ pr_info("%s: Setting MII monitoring interval to %d.\n",
++ bond->dev->name, new_value);
++ bond->params.miimon = new_value;
++ if (bond->params.updelay)
++ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
++ bond->dev->name,
++ bond->params.updelay * bond->params.miimon);
++ if (bond->params.downdelay)
++ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
++ bond->dev->name,
++ bond->params.downdelay * bond->params.miimon);
++ if (new_value && bond->params.arp_interval) {
++ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
++ bond->dev->name);
++ bond->params.arp_interval = 0;
++ if (bond->params.arp_validate)
++ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
++ }
++ if (bond->dev->flags & IFF_UP) {
++ /* If the interface is up, we may need to fire off
++ * the MII timer. If the interface is down, the
++ * timer will get fired off when the open function
++ * is called.
++ */
++ if (!new_value) {
++ cancel_delayed_work_sync(&bond->mii_work);
++ } else {
++ cancel_delayed_work_sync(&bond->arp_work);
++ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ }
+ }
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
+diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
+index c7f3d4e..c31c2d6 100644
+--- a/drivers/net/can/sja1000/plx_pci.c
++++ b/drivers/net/can/sja1000/plx_pci.c
+@@ -309,7 +309,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+ */
+ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ REG_CR_BASICCAN_INITIAL &&
+- (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
++ (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ flag = 1;
+
+@@ -321,7 +321,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+ * See states on p. 23 of the Datasheet.
+ */
+ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+- priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
++ priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ return flag;
+
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 192b0d1..6a1acfe 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -91,7 +91,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+- priv->read_reg(priv, REG_SR);
++ priv->read_reg(priv, SJA1000_REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+ }
+
+@@ -497,7 +497,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+
+ while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+ n++;
+- status = priv->read_reg(priv, REG_SR);
++ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller due to hw unplug */
+ if (status == 0xFF && sja1000_is_absent(priv))
+ return IRQ_NONE;
+@@ -516,7 +516,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ /* receive interrupt */
+ while (status & SR_RBS) {
+ sja1000_rx(dev);
+- status = priv->read_reg(priv, REG_SR);
++ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller */
+ if (status == 0xFF && sja1000_is_absent(priv))
+ return IRQ_NONE;
+diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
+index 23fff06..2a79543 100644
+--- a/drivers/net/can/sja1000/sja1000.h
++++ b/drivers/net/can/sja1000/sja1000.h
+@@ -56,7 +56,7 @@
+ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
+ #define REG_MOD 0x00
+ #define REG_CMR 0x01
+-#define REG_SR 0x02
++#define SJA1000_REG_SR 0x02
+ #define REG_IR 0x03
+ #define REG_IER 0x04
+ #define REG_ALC 0x0B
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+index 829b5ad..edfdf6b 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+@@ -438,7 +438,6 @@ struct atl1e_adapter {
+ struct atl1e_hw hw;
+ struct atl1e_hw_stats hw_stats;
+
+- bool have_msi;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 95483bc..c69dc29 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1867,37 +1867,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+-
+- if (adapter->have_msi)
+- pci_disable_msi(adapter->pdev);
+ }
+
+ static int atl1e_request_irq(struct atl1e_adapter *adapter)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+- int flags = 0;
+ int err = 0;
+
+- adapter->have_msi = true;
+- err = pci_enable_msi(adapter->pdev);
+- if (err) {
+- netdev_dbg(adapter->netdev,
+- "Unable to allocate MSI interrupt Error: %d\n", err);
+- adapter->have_msi = false;
+- } else
+- netdev->irq = pdev->irq;
+-
+-
+- if (!adapter->have_msi)
+- flags |= IRQF_SHARED;
+- err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
+- netdev->name, netdev);
++ err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED,
++ netdev->name, netdev);
+ if (err) {
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
+- if (adapter->have_msi)
+- pci_disable_msi(adapter->pdev);
+ return err;
+ }
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c86fa50..c6b9903 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -13433,8 +13433,11 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
+ if (j + len > block_end)
+ goto partno;
+
+- memcpy(tp->fw_ver, &vpd_data[j], len);
+- strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
++ if (len >= sizeof(tp->fw_ver))
++ len = sizeof(tp->fw_ver) - 1;
++ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
++ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
++ &vpd_data[j]);
+ }
+
+ partno:
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 2a22f52..2f2e98b 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
+ tmp = readl(reg);
+ }
+
++/*
++ * Sleep, either by using msleep() or if we are suspending, then
++ * use mdelay() to sleep.
++ */
++static void dm9000_msleep(board_info_t *db, unsigned int ms)
++{
++ if (db->in_suspend)
++ mdelay(ms);
++ else
++ msleep(ms);
++}
++
++/* Read a word from phyxcer */
++static int
++dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
++{
++ board_info_t *db = netdev_priv(dev);
++ unsigned long flags;
++ unsigned int reg_save;
++ int ret;
++
++ mutex_lock(&db->addr_lock);
++
++ spin_lock_irqsave(&db->lock, flags);
++
++ /* Save previous register address */
++ reg_save = readb(db->io_addr);
++
++ /* Fill the phyxcer register into REG_0C */
++ iow(db, DM9000_EPAR, DM9000_PHY | reg);
++
++ /* Issue phyxcer read command */
++ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
++
++ writeb(reg_save, db->io_addr);
++ spin_unlock_irqrestore(&db->lock, flags);
++
++ dm9000_msleep(db, 1); /* Wait read complete */
++
++ spin_lock_irqsave(&db->lock, flags);
++ reg_save = readb(db->io_addr);
++
++ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
++
++ /* The read data keeps on REG_0D & REG_0E */
++ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
++
++ /* restore the previous address */
++ writeb(reg_save, db->io_addr);
++ spin_unlock_irqrestore(&db->lock, flags);
++
++ mutex_unlock(&db->addr_lock);
++
++ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
++ return ret;
++}
++
++/* Write a word to phyxcer */
++static void
++dm9000_phy_write(struct net_device *dev,
++ int phyaddr_unused, int reg, int value)
++{
++ board_info_t *db = netdev_priv(dev);
++ unsigned long flags;
++ unsigned long reg_save;
++
++ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
++ mutex_lock(&db->addr_lock);
++
++ spin_lock_irqsave(&db->lock, flags);
++
++ /* Save previous register address */
++ reg_save = readb(db->io_addr);
++
++ /* Fill the phyxcer register into REG_0C */
++ iow(db, DM9000_EPAR, DM9000_PHY | reg);
++
++ /* Fill the written data into REG_0D & REG_0E */
++ iow(db, DM9000_EPDRL, value);
++ iow(db, DM9000_EPDRH, value >> 8);
++
++ /* Issue phyxcer write command */
++ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
++
++ writeb(reg_save, db->io_addr);
++ spin_unlock_irqrestore(&db->lock, flags);
++
++ dm9000_msleep(db, 1); /* Wait write complete */
++
++ spin_lock_irqsave(&db->lock, flags);
++ reg_save = readb(db->io_addr);
++
++ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
++
++ /* restore the previous address */
++ writeb(reg_save, db->io_addr);
++
++ spin_unlock_irqrestore(&db->lock, flags);
++ mutex_unlock(&db->addr_lock);
++}
++
+ /* dm9000_set_io
+ *
+ * select the specified set of io routines to use with the
+@@ -793,6 +894,9 @@ dm9000_init_dm9000(struct net_device *dev)
+
+ iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+
++ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
++ dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
++
+ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
+
+ /* if wol is needed, then always set NCR_WAKEEN otherwise we end
+@@ -1199,109 +1303,6 @@ dm9000_open(struct net_device *dev)
+ return 0;
+ }
+
+-/*
+- * Sleep, either by using msleep() or if we are suspending, then
+- * use mdelay() to sleep.
+- */
+-static void dm9000_msleep(board_info_t *db, unsigned int ms)
+-{
+- if (db->in_suspend)
+- mdelay(ms);
+- else
+- msleep(ms);
+-}
+-
+-/*
+- * Read a word from phyxcer
+- */
+-static int
+-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+-{
+- board_info_t *db = netdev_priv(dev);
+- unsigned long flags;
+- unsigned int reg_save;
+- int ret;
+-
+- mutex_lock(&db->addr_lock);
+-
+- spin_lock_irqsave(&db->lock,flags);
+-
+- /* Save previous register address */
+- reg_save = readb(db->io_addr);
+-
+- /* Fill the phyxcer register into REG_0C */
+- iow(db, DM9000_EPAR, DM9000_PHY | reg);
+-
+- iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
+-
+- writeb(reg_save, db->io_addr);
+- spin_unlock_irqrestore(&db->lock,flags);
+-
+- dm9000_msleep(db, 1); /* Wait read complete */
+-
+- spin_lock_irqsave(&db->lock,flags);
+- reg_save = readb(db->io_addr);
+-
+- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
+-
+- /* The read data keeps on REG_0D & REG_0E */
+- ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+-
+- /* restore the previous address */
+- writeb(reg_save, db->io_addr);
+- spin_unlock_irqrestore(&db->lock,flags);
+-
+- mutex_unlock(&db->addr_lock);
+-
+- dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+- return ret;
+-}
+-
+-/*
+- * Write a word to phyxcer
+- */
+-static void
+-dm9000_phy_write(struct net_device *dev,
+- int phyaddr_unused, int reg, int value)
+-{
+- board_info_t *db = netdev_priv(dev);
+- unsigned long flags;
+- unsigned long reg_save;
+-
+- dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+- mutex_lock(&db->addr_lock);
+-
+- spin_lock_irqsave(&db->lock,flags);
+-
+- /* Save previous register address */
+- reg_save = readb(db->io_addr);
+-
+- /* Fill the phyxcer register into REG_0C */
+- iow(db, DM9000_EPAR, DM9000_PHY | reg);
+-
+- /* Fill the written data into REG_0D & REG_0E */
+- iow(db, DM9000_EPDRL, value);
+- iow(db, DM9000_EPDRH, value >> 8);
+-
+- iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
+-
+- writeb(reg_save, db->io_addr);
+- spin_unlock_irqrestore(&db->lock, flags);
+-
+- dm9000_msleep(db, 1); /* Wait write complete */
+-
+- spin_lock_irqsave(&db->lock,flags);
+- reg_save = readb(db->io_addr);
+-
+- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+-
+- /* restore the previous address */
+- writeb(reg_save, db->io_addr);
+-
+- spin_unlock_irqrestore(&db->lock, flags);
+- mutex_unlock(&db->addr_lock);
+-}
+-
+ static void
+ dm9000_shutdown(struct net_device *dev)
+ {
+@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
+ db->flags |= DM9000_PLATF_SIMPLE_PHY;
+ #endif
+
+- dm9000_reset(db);
++ /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
++ * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
++ * while probe stage.
++ */
++
++ iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+
+ /* try multiple times, DM9000 sometimes gets the read wrong */
+ for (i = 0; i < 8; i++) {
+diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
+index 55688bd..9ce058a 100644
+--- a/drivers/net/ethernet/davicom/dm9000.h
++++ b/drivers/net/ethernet/davicom/dm9000.h
+@@ -69,7 +69,9 @@
+ #define NCR_WAKEEN (1<<6)
+ #define NCR_FCOL (1<<4)
+ #define NCR_FDX (1<<3)
+-#define NCR_LBK (3<<1)
++
++#define NCR_RESERVED (3<<1)
++#define NCR_MAC_LBK (1<<1)
+ #define NCR_RST (1<<0)
+
+ #define NSR_SPEED (1<<7)
+@@ -167,5 +169,12 @@
+ #define ISR_LNKCHNG (1<<5)
+ #define ISR_UNDERRUN (1<<4)
+
++/* Davicom MII registers.
++ */
++
++#define MII_DM_DSPCR 0x1b /* DSP Control Register */
++
++#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
++
+ #endif /* _DM9000X_H_ */
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index cc96a5a..41396fa 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -8003,12 +8003,15 @@ static int __init ixgbe_init_module(void)
+ pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_copyright);
+
++ ret = pci_register_driver(&ixgbe_driver);
++ if (ret)
++ return ret;
++
+ #ifdef CONFIG_IXGBE_DCA
+ dca_register_notify(&dca_notifier);
+ #endif
+
+- ret = pci_register_driver(&ixgbe_driver);
+- return ret;
++ return 0;
+ }
+
+ module_init(ixgbe_init_module);
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 69fc888..94f9a8f 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -1066,7 +1066,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+
+- tp = space - 2048/8;
++ tp = space - 8192/8;
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+ } else {
+diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
+index 3c896ce..a0f229e 100644
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2069,7 +2069,7 @@ enum {
+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
+
+-#define GMAC_DEF_MSK GM_IS_TX_FF_UR
++#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+ };
+
+ /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
+index f56743a..115e374 100644
+--- a/drivers/net/ethernet/micrel/ks8851.c
++++ b/drivers/net/ethernet/micrel/ks8851.c
+@@ -490,7 +490,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ for (; rxfc != 0; rxfc--) {
+ rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+ rxstat = rxh & 0xffff;
+- rxlen = rxh >> 16;
++ rxlen = (rxh >> 16) & 0xfff;
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 43c7b25..495d65c 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -1545,9 +1545,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+- skb->ip_summed = CHECKSUM_NONE;
+- else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&adapter->napi, skb);
+ (*work_done)++;
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 22f2788..fd8115e 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1048,7 +1048,7 @@ static void emac_tx_handler(void *token, int len, int status)
+ struct net_device *ndev = skb->dev;
+
+ if (unlikely(netif_queue_stopped(ndev)))
+- netif_start_queue(ndev);
++ netif_wake_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 7bd219b..f3d17f8 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -720,8 +720,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
+ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ struct usbnet *dev = netdev_priv(netdev);
++ int ret;
++
++ if (new_mtu > MAX_SINGLE_PACKET_SIZE)
++ return -EINVAL;
+
+- int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
++ ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ check_warn_return(ret, "Failed to set mac rx frame length");
+
+ return usbnet_change_mtu(netdev, new_mtu);
+@@ -965,7 +969,7 @@ static int smsc75xx_reset(struct usbnet *dev)
+
+ netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+
+- ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
++ ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ check_warn_return(ret, "Failed to set max rx frame length");
+
+ ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
+@@ -1109,8 +1113,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
+ dev->net->stats.rx_frame_errors++;
+ } else {
+- /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
+- if (unlikely(size > (ETH_FRAME_LEN + 12))) {
++ /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
++ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x", rx_cmd_a);
+ return 0;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+index ae750f9..3965356 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+@@ -946,6 +946,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+
++ /* Use chip chainmask only for calibration */
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+ if (rtt) {
+@@ -1087,6 +1088,9 @@ skip_tx_iqcal:
+ ar9003_hw_rtt_disable(ah);
+ }
+
++ /* Revert chainmask to runtime parameters */
++ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
++
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 5e45604..12975ad 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1482,8 +1482,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ const struct b43_dma_ops *ops;
+ struct b43_dmaring *ring;
+ struct b43_dmadesc_meta *meta;
++ static const struct b43_txstatus fake; /* filled with 0 */
++ const struct b43_txstatus *txstat;
+ int slot, firstused;
+ bool frame_succeed;
++ int skip;
++ static u8 err_out1, err_out2;
+
+ ring = parse_cookie(dev, status->cookie, &slot);
+ if (unlikely(!ring))
+@@ -1496,13 +1500,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
++
++ skip = 0;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+- * malfunction, memory leaks and/or stall of DMA functionality. */
+- b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+- "Expected %d, but got %d\n",
+- ring->index, firstused, slot);
+- return;
++ * malfunction, memory leaks and/or stall of DMA functionality.
++ */
++ if (slot == next_slot(ring, next_slot(ring, firstused))) {
++ /* If a single header/data pair was missed, skip over
++ * the first two slots in an attempt to recover.
++ */
++ slot = firstused;
++ skip = 2;
++ if (!err_out1) {
++ /* Report the error once. */
++ b43dbg(dev->wl,
++ "Skip on DMA ring %d slot %d.\n",
++ ring->index, slot);
++ err_out1 = 1;
++ }
++ } else {
++ /* More than a single header/data pair were missed.
++ * Report this error once.
++ */
++ if (!err_out2)
++ b43dbg(dev->wl,
++ "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
++ ring->index, firstused, slot);
++ err_out2 = 1;
++ return;
++ }
+ }
+
+ ops = ring->ops;
+@@ -1517,11 +1544,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ slot, firstused, ring->index);
+ break;
+ }
++
+ if (meta->skb) {
+ struct b43_private_tx_info *priv_info =
+- b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
++ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+
+- unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
++ unmap_descbuffer(ring, meta->dmaaddr,
++ meta->skb->len, 1);
+ kfree(priv_info->bouncebuffer);
+ priv_info->bouncebuffer = NULL;
+ } else {
+@@ -1533,8 +1562,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ struct ieee80211_tx_info *info;
+
+ if (unlikely(!meta->skb)) {
+- /* This is a scatter-gather fragment of a frame, so
+- * the skb pointer must not be NULL. */
++ /* This is a scatter-gather fragment of a frame,
++ * so the skb pointer must not be NULL.
++ */
+ b43dbg(dev->wl, "TX status unexpected NULL skb "
+ "at slot %d (first=%d) on ring %d\n",
+ slot, firstused, ring->index);
+@@ -1545,9 +1575,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+
+ /*
+ * Call back to inform the ieee80211 subsystem about
+- * the status of the transmission.
++ * the status of the transmission. When skipping over
++ * a missed TX status report, use a status structure
++ * filled with zeros to indicate that the frame was not
++ * sent (frame_count 0) and not acknowledged
+ */
+- frame_succeed = b43_fill_txstatus_report(dev, info, status);
++ if (unlikely(skip))
++ txstat = &fake;
++ else
++ txstat = status;
++
++ frame_succeed = b43_fill_txstatus_report(dev, info,
++ txstat);
+ #ifdef CONFIG_B43_DEBUG
+ if (frame_succeed)
+ ring->nr_succeed_tx_packets++;
+@@ -1575,12 +1614,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ /* Everything unmapped and free'd. So it's not used anymore. */
+ ring->used_slots--;
+
+- if (meta->is_last_fragment) {
++ if (meta->is_last_fragment && !skip) {
+ /* This is the last scatter-gather
+ * fragment of the frame. We are done. */
+ break;
+ }
+ slot = next_slot(ring, slot);
++ if (skip > 0)
++ --skip;
+ }
+ if (ring->stopped) {
+ B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index f099b30..3de9875 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -1146,7 +1146,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
+ rt2x00dev->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
++#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
++#endif
+ BIT(NL80211_IFTYPE_WDS);
+
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index c04ee92..e5fe956 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -812,6 +812,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+ if (unlikely(!_urb)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate urb. Drop skb!\n"));
++ kfree_skb(skb);
+ return;
+ }
+ urb_list = &rtlusb->tx_pending[ep_num];
+diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
+index 4c63f77..2e2b04f 100644
+--- a/drivers/spi/spi-mpc512x-psc.c
++++ b/drivers/spi/spi-mpc512x-psc.c
+@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
+
+ for (i = count; i > 0; i--) {
+ data = tx_buf ? *tx_buf++ : 0;
+- if (len == EOFBYTE)
++ if (len == EOFBYTE && t->cs_change)
+ setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
+ out_8(&fifo->txdata_8, data);
+ len--;
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index c72128f..42cad5c 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -1882,7 +1882,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ case TRIG_NONE:
+ /* continous acquisition */
+ devpriv->ai_continous = 1;
+- devpriv->ai_sample_count = 0;
++ devpriv->ai_sample_count = 1;
+ break;
+ }
+
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 90c8e3a..99fcb8c 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -159,7 +159,7 @@ struct atmel_uart_port {
+ };
+
+ static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
+-static unsigned long atmel_ports_in_use;
++static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
+
+ #ifdef SUPPORT_SYSRQ
+ static struct console atmel_console;
+@@ -1784,15 +1784,14 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
+ if (ret < 0)
+ /* port id not found in platform data nor device-tree aliases:
+ * auto-enumerate it */
+- ret = find_first_zero_bit(&atmel_ports_in_use,
+- sizeof(atmel_ports_in_use));
++ ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
+
+- if (ret > ATMEL_MAX_UART) {
++ if (ret >= ATMEL_MAX_UART) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+- if (test_and_set_bit(ret, &atmel_ports_in_use)) {
++ if (test_and_set_bit(ret, atmel_ports_in_use)) {
+ /* port already in use */
+ ret = -EBUSY;
+ goto err;
+@@ -1866,7 +1865,7 @@ static int __devexit atmel_serial_remove(struct platform_device *pdev)
+
+ /* "port" is allocated statically, so we shouldn't free it */
+
+- clear_bit(port->line, &atmel_ports_in_use);
++ clear_bit(port->line, atmel_ports_in_use);
+
+ clk_put(atmel_port->clk);
+
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 7a367ff..fd89c42 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
+ static struct vcs_poll_data *
+ vcs_poll_data_get(struct file *file)
+ {
+- struct vcs_poll_data *poll = file->private_data;
++ struct vcs_poll_data *poll = file->private_data, *kill = NULL;
+
+ if (poll)
+ return poll;
+@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
+ file->private_data = poll;
+ } else {
+ /* someone else raced ahead of us */
+- vcs_poll_data_free(poll);
++ kill = poll;
+ poll = file->private_data;
+ }
+ spin_unlock(&file->f_lock);
++ if (kill)
++ vcs_poll_data_free(kill);
+
+ return poll;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 5c1f9e7..37b2a89 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1964,8 +1964,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ if (event_trb != ep_ring->dequeue &&
+ event_trb != td->last_trb)
+ td->urb->actual_length =
+- td->urb->transfer_buffer_length
+- - TRB_LEN(le32_to_cpu(event->transfer_len));
++ td->urb->transfer_buffer_length -
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ else
+ td->urb->actual_length = 0;
+
+@@ -1997,7 +1997,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ /* Maybe the event was for the data stage? */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ xhci_dbg(xhci, "Waiting for status "
+ "stage event\n");
+ return 0;
+@@ -2033,7 +2033,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+- if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
++ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+ frame->status = 0;
+ break;
+ }
+@@ -2078,7 +2078,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+ }
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ frame->actual_length = len;
+@@ -2136,7 +2136,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb ||
+- TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+@@ -2164,18 +2164,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+- TRB_LEN(le32_to_cpu(event->transfer_len)));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+- if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+- TRB_LEN(le32_to_cpu(event->transfer_len)));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ td->urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+@@ -2217,7 +2217,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ if (trb_comp_code != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+@@ -2283,7 +2283,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * transfer type
+ */
+ case COMP_SUCCESS:
+- if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
++ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+ break;
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ trb_comp_code = COMP_SHORT_TX;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index c519a31..8b4cce45 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -968,6 +968,10 @@ struct xhci_transfer_event {
+ __le32 flags;
+ };
+
++/* Transfer event TRB length bit mask */
++/* bits 0:23 */
++#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
++
+ /** Transfer Event bit fields **/
+ #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 71c4696..878ff05 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -648,6 +648,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
++ { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 97e0a6b..809c03a 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -584,6 +584,13 @@
+ #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+ /*
++ * Mitsubishi Electric Corp. (http://www.meau.com)
++ * Submitted by Konstantin Holoborodko
++ */
++#define MITSUBISHI_VID 0x06D3
++#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
++
++/*
+ * Definitions for B&B Electronics products.
+ */
+ #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 833dddb..53ab273 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -587,6 +587,7 @@ struct block_device *bdgrab(struct block_device *bdev)
+ ihold(bdev->bd_inode);
+ return bdev;
+ }
++EXPORT_SYMBOL(bdgrab);
+
+ long nr_blockdev_pages(void)
+ {
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f5fbe57..8d4d53d 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3996,7 +3996,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ spin_lock(&block_rsv->lock);
+ spin_lock(&sinfo->lock);
+
+- block_rsv->size = num_bytes;
++ block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
+
+ num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
+ sinfo->bytes_reserved + sinfo->bytes_readonly +
+diff --git a/fs/dcache.c b/fs/dcache.c
+index bb7f4cc..e923bf4 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2445,7 +2445,6 @@ static int prepend_path(const struct path *path,
+ bool slash = false;
+ int error = 0;
+
+- br_read_lock(vfsmount_lock);
+ while (dentry != root->dentry || vfsmnt != root->mnt) {
+ struct dentry * parent;
+
+@@ -2475,8 +2474,6 @@ static int prepend_path(const struct path *path,
+ if (!error && !slash)
+ error = prepend(buffer, buflen, "/", 1);
+
+-out:
+- br_read_unlock(vfsmount_lock);
+ return error;
+
+ global_root:
+@@ -2493,7 +2490,7 @@ global_root:
+ error = prepend(buffer, buflen, "/", 1);
+ if (!error)
+ error = vfsmnt->mnt_ns ? 1 : 2;
+- goto out;
++ return error;
+ }
+
+ /**
+@@ -2520,9 +2517,11 @@ char *__d_path(const struct path *path,
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, root, &res, &buflen);
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+
+ if (error < 0)
+ return ERR_PTR(error);
+@@ -2539,9 +2538,11 @@ char *d_absolute_path(const struct path *path,
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, &root, &res, &buflen);
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+
+ if (error > 1)
+ error = -EINVAL;
+@@ -2605,11 +2606,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
+ return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
+
+ get_fs_root(current->fs, &root);
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ error = path_with_deleted(path, &root, &res, &buflen);
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+ if (error < 0)
+ res = ERR_PTR(error);
+- write_sequnlock(&rename_lock);
+ path_put(&root);
+ return res;
+ }
+@@ -2764,6 +2767,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ get_fs_root_and_pwd(current->fs, &root, &pwd);
+
+ error = -ENOENT;
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ if (!d_unlinked(pwd.dentry)) {
+ unsigned long len;
+@@ -2773,6 +2777,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ prepend(&cwd, &buflen, "\0", 1);
+ error = prepend_path(&pwd, &root, &cwd, &buflen);
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+
+ if (error < 0)
+ goto out;
+@@ -2793,6 +2798,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ }
+ } else {
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+ }
+
+ out:
+diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
+index d055c75..7326e6e 100644
+--- a/fs/nfs/blocklayout/blocklayoutdm.c
++++ b/fs/nfs/blocklayout/blocklayoutdm.c
+@@ -52,7 +52,8 @@ static void dev_remove(dev_t dev)
+ dprintk("Entering %s\n", __func__);
+
+ memset(&msg, 0, sizeof(msg));
+- msg.data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
++ msg.len = sizeof(bl_msg) + bl_msg.totallen;
++ msg.data = kzalloc(msg.len, GFP_NOFS);
+ if (!msg.data)
+ goto out;
+
+@@ -63,7 +64,6 @@ static void dev_remove(dev_t dev)
+ memcpy(msg.data, &bl_msg, sizeof(bl_msg));
+ dataptr = (uint8_t *) msg.data;
+ memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
+- msg.len = sizeof(bl_msg) + bl_msg.totallen;
+
+ add_wait_queue(&bl_wq, &wq);
+ if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6d7c53d..5639efd 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3578,7 +3578,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+- int ret = -ENOMEM, npages, i, acl_len = 0;
++ int ret = -ENOMEM, npages, i;
++ size_t acl_len = 0;
+
+ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ /* As long as we're doing a round trip to the server anyway,
+@@ -6108,22 +6109,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
+ static void nfs4_layoutcommit_release(void *calldata)
+ {
+ struct nfs4_layoutcommit_data *data = calldata;
+- struct pnfs_layout_segment *lseg, *tmp;
+- unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
+
+ pnfs_cleanup_layoutcommit(data);
+- /* Matched by references in pnfs_set_layoutcommit */
+- list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
+- list_del_init(&lseg->pls_lc_list);
+- if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
+- &lseg->pls_flags))
+- put_lseg(lseg);
+- }
+-
+- clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
+- smp_mb__after_clear_bit();
+- wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
+-
+ put_rpccred(data->cred);
+ kfree(data);
+ }
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 3ad6595..d12514a 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1356,11 +1356,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
+
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode == IOMODE_RW &&
+- test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
++ test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+ list_add(&lseg->pls_lc_list, listp);
+ }
+ }
+
++static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
++{
++ struct pnfs_layout_segment *lseg, *tmp;
++ unsigned long *bitlock = &NFS_I(inode)->flags;
++
++ /* Matched by references in pnfs_set_layoutcommit */
++ list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
++ list_del_init(&lseg->pls_lc_list);
++ put_lseg(lseg);
++ }
++
++ clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
++ smp_mb__after_clear_bit();
++ wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
++}
++
+ void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
+ {
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+@@ -1409,6 +1425,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
+
+ if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
+ nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
++ pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
+ }
+
+ /*
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 800c215..24afa96 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -280,7 +280,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ iattr->ia_valid |= ATTR_SIZE;
+ }
+ if (bmval[0] & FATTR4_WORD0_ACL) {
+- int nace;
++ u32 nace;
+ struct nfs4_ace *ace;
+
+ READ_BUF(4); len += 4;
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 6bc346c..04eecc4 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
+ if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
+ return -ENOSPC;
+
+- if (name[0] == '.' && (name[1] == '\0' ||
+- (name[1] == '.' && name[2] == '\0')))
++ if (name[0] == '.' && (namelen < 2 ||
++ (namelen == 2 && name[1] == '.')))
+ return 0;
+
+ dentry = lookup_one_len(name, dbuf->xadir, namelen);
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index fabbb81..3899e24 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -985,6 +985,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
+ filp->f_pos++;
++ else
++ return 0;
+ }
+ if (filp->f_pos == 1) {
+ if (parent_sd->s_parent)
+@@ -993,6 +995,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
+ filp->f_pos++;
++ else
++ return 0;
+ }
+ mutex_lock(&sysfs_mutex);
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+@@ -1023,10 +1027,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ return 0;
+ }
+
++static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ loff_t ret;
++
++ mutex_lock(&inode->i_mutex);
++ ret = generic_file_llseek(file, offset, whence);
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
+
+ const struct file_operations sysfs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = sysfs_readdir,
+ .release = sysfs_dir_release,
+- .llseek = generic_file_llseek,
++ .llseek = sysfs_dir_llseek,
+ };
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index ae0e76b..2f467e5 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1583,6 +1583,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
+ c->remounting_rw = 1;
+ c->ro_mount = 0;
+
++ if (c->space_fixup) {
++ err = ubifs_fixup_free_space(c);
++ if (err)
++ return err;
++ }
++
+ err = check_free_space(c);
+ if (err)
+ goto out;
+@@ -1699,12 +1705,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
+ err = dbg_check_space_info(c);
+ }
+
+- if (c->space_fixup) {
+- err = ubifs_fixup_free_space(c);
+- if (err)
+- goto out;
+- }
+-
+ mutex_unlock(&c->umount_mutex);
+ return err;
+
+diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
+index 5142a82..604f5fc 100644
+--- a/fs/udf/udf_sb.h
++++ b/fs/udf/udf_sb.h
+@@ -82,7 +82,7 @@ struct udf_virtual_data {
+ struct udf_bitmap {
+ __u32 s_extLength;
+ __u32 s_extPosition;
+- __u16 s_nr_groups;
++ int s_nr_groups;
+ struct buffer_head **s_block_bitmap;
+ };
+
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 47b4a27..5ef859a 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -108,7 +108,7 @@ struct thermal_zone_device {
+ /* Adding event notification support elements */
+ #define THERMAL_GENL_FAMILY_NAME "thermal_event"
+ #define THERMAL_GENL_VERSION 0x01
+-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group"
++#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+
+ enum events {
+ THERMAL_AUX0,
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index fd4a7b1..cd068b2 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -66,7 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
+ */
+ int tick_check_broadcast_device(struct clock_event_device *dev)
+ {
+- if ((tick_broadcast_device.evtdev &&
++ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
++ (tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+ (dev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 0;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index eae90af..dff37a6 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1573,7 +1573,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+- vma = mm->mmap_cache;
++ vma = ACCESS_ONCE(mm->mmap_cache);
+ if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+ struct rb_node * rb_node;
+
+diff --git a/mm/nommu.c b/mm/nommu.c
+index f59e170..f0cd7ab 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -807,7 +807,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ struct vm_area_struct *vma;
+
+ /* check the cache first */
+- vma = mm->mmap_cache;
++ vma = ACCESS_ONCE(mm->mmap_cache);
+ if (vma && vma->vm_start <= addr && vma->vm_end > addr)
+ return vma;
+
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 5471628..963f285 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -110,13 +110,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ grp = rtnl_dereference(real_dev->vlgrp);
+ BUG_ON(!grp);
+
+- /* Take it out of our own structures, but be sure to interlock with
+- * HW accelerating devices or SW vlan input packet processing if
+- * VLAN is not 0 (leave it there for 802.1p).
+- */
+- if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
+- ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+-
+ grp->nr_vlans--;
+
+ if (vlan->flags & VLAN_FLAG_GVRP)
+@@ -139,6 +132,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ call_rcu(&grp->rcu, vlan_rcu_free);
+ }
+
++ /* Take it out of our own structures, but be sure to interlock with
++ * HW accelerating devices or SW vlan input packet processing if
++ * VLAN is not 0 (leave it there for 802.1p).
++ */
++ if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
++ ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
++
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put(real_dev);
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index b23bbbf..720aea0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3193,6 +3193,7 @@ int netdev_rx_handler_register(struct net_device *dev,
+ if (dev->rx_handler)
+ return -EBUSY;
+
++ /* Note: rx_handler_data must be set before rx_handler */
+ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+ rcu_assign_pointer(dev->rx_handler, rx_handler);
+
+@@ -3213,6 +3214,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
+
+ ASSERT_RTNL();
+ RCU_INIT_POINTER(dev->rx_handler, NULL);
++ /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
++ * section has a guarantee to see a non NULL rx_handler_data
++ * as well.
++ */
++ synchronize_net();
+ RCU_INIT_POINTER(dev->rx_handler_data, NULL);
+ }
+ EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 1b1f7af..3124e17 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2265,11 +2265,8 @@ void tcp_enter_loss(struct sock *sk, int how)
+ if (tcp_is_reno(tp))
+ tcp_reset_reno_sack(tp);
+
+- if (!how) {
+- /* Push undo marker, if it was plain RTO and nothing
+- * was retransmitted. */
+- tp->undo_marker = tp->snd_una;
+- } else {
++ tp->undo_marker = tp->snd_una;
++ if (how) {
+ tp->sacked_out = 0;
+ tp->fackets_out = 0;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 9bb7400..5c1807c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1587,8 +1587,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ goto send_now;
+ }
+
+- /* Ok, it looks like it is advisable to defer. */
+- tp->tso_deferred = 1 | (jiffies << 1);
++ /* Ok, it looks like it is advisable to defer.
++ * Do not rearm the timer if already set to not break TCP ACK clocking.
++ */
++ if (!tp->tso_deferred)
++ tp->tso_deferred = 1 | (jiffies << 1);
+
+ return 1;
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index b27baed..8589c2d 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4658,26 +4658,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
+
+ static int __net_init addrconf_init_net(struct net *net)
+ {
+- int err;
++ int err = -ENOMEM;
+ struct ipv6_devconf *all, *dflt;
+
+- err = -ENOMEM;
+- all = &ipv6_devconf;
+- dflt = &ipv6_devconf_dflt;
++ all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
++ if (all == NULL)
++ goto err_alloc_all;
+
+- if (!net_eq(net, &init_net)) {
+- all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
+- if (all == NULL)
+- goto err_alloc_all;
++ dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
++ if (dflt == NULL)
++ goto err_alloc_dflt;
+
+- dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+- if (dflt == NULL)
+- goto err_alloc_dflt;
+- } else {
+- /* these will be inherited by all namespaces */
+- dflt->autoconf = ipv6_defaults.autoconf;
+- dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+- }
++ /* these will be inherited by all namespaces */
++ dflt->autoconf = ipv6_defaults.autoconf;
++ dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+
+ net->ipv6.devconf_all = all;
+ net->ipv6.devconf_dflt = dflt;
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index f8d24dd..6a4f4f3 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -111,6 +111,27 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
+ ipv6_addr_loopback(&hdr->daddr))
+ goto err;
+
++ /* RFC4291 Errata ID: 3480
++ * Interface-Local scope spans only a single interface on a
++ * node and is useful only for loopback transmission of
++ * multicast. Packets with interface-local scope received
++ * from another node must be discarded.
++ */
++ if (!(skb->pkt_type == PACKET_LOOPBACK ||
++ dev->flags & IFF_LOOPBACK) &&
++ ipv6_addr_is_multicast(&hdr->daddr) &&
++ IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
++ goto err;
++
++ /* RFC4291 2.7
++ * Nodes must not originate a packet to a multicast address whose scope
++ * field contains the reserved value 0; if such a packet is received, it
++ * must be silently dropped.
++ */
++ if (ipv6_addr_is_multicast(&hdr->daddr) &&
++ IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
++ goto err;
++
+ /*
+ * RFC4291 2.7
+ * Multicast addresses must not be used as source addresses in IPv6
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index c24f25a..f4b49c5 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2584,8 +2584,10 @@ bed:
+ NULL, NULL, NULL);
+
+ /* Check if the we got some results */
+- if (!self->cachedaddr)
+- return -EAGAIN; /* Didn't find any devices */
++ if (!self->cachedaddr) {
++ err = -EAGAIN; /* Didn't find any devices */
++ goto out;
++ }
+ daddr = self->cachedaddr;
+ /* Cleanup */
+ self->cachedaddr = 0;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 482fa57..874f8ff 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -134,6 +134,7 @@ int genl_register_mc_group(struct genl_family *family,
+ int err = 0;
+
+ BUG_ON(grp->name[0] == '\0');
++ BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
+
+ genl_lock();
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 18c5a50..dc6af27 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -139,6 +139,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+ list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
+ task->tk_waitqueue = queue;
+ queue->qlen++;
++ /* barrier matches the read in rpc_wake_up_task_queue_locked() */
++ smp_wmb();
+ rpc_set_queued(task);
+
+ dprintk("RPC: %5u added to queue %p \"%s\"\n",
+@@ -389,8 +391,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
+ */
+ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
+ {
+- if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
+- __rpc_do_wake_up_task(queue, task);
++ if (RPC_IS_QUEUED(task)) {
++ smp_rmb();
++ if (task->tk_waitqueue == queue)
++ __rpc_do_wake_up_task(queue, task);
++ }
+ }
+
+ /*
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 317bfe3..18978b6 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -371,7 +371,7 @@ static void unix_sock_destructor(struct sock *sk)
+ #endif
+ }
+
+-static int unix_release_sock(struct sock *sk, int embrion)
++static void unix_release_sock(struct sock *sk, int embrion)
+ {
+ struct unix_sock *u = unix_sk(sk);
+ struct dentry *dentry;
+@@ -444,8 +444,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
+
+ if (unix_tot_inflight)
+ unix_gc(); /* Garbage collect fds */
+-
+- return 0;
+ }
+
+ static void init_peercred(struct sock *sk)
+@@ -682,9 +680,10 @@ static int unix_release(struct socket *sock)
+ if (!sk)
+ return 0;
+
++ unix_release_sock(sk, 0);
+ sock->sk = NULL;
+
+- return unix_release_sock(sk, 0);
++ return 0;
+ }
+
+ static int unix_autobind(struct socket *sock)
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 0b3f5d7..b70eaa2 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1067,12 +1067,12 @@ long keyctl_instantiate_key_iov(key_serial_t id,
+ ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+ if (ret < 0)
+- return ret;
++ goto err;
+ if (ret == 0)
+ goto no_payload_free;
+
+ ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+-
++err:
+ if (iov != iovstack)
+ kfree(iov);
+ return ret;
+diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
+index 971eaf0..4969c98 100644
+--- a/sound/soc/imx/imx-ssi.c
++++ b/sound/soc/imx/imx-ssi.c
+@@ -573,6 +573,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)
+
+ if (imx_ssi->ac97_reset)
+ imx_ssi->ac97_reset(ac97);
++ /* First read sometimes fails, do a dummy read */
++ imx_ssi_ac97_read(ac97, 0);
+ }
+
+ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
+@@ -581,6 +583,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
+
+ if (imx_ssi->ac97_warm_reset)
+ imx_ssi->ac97_warm_reset(ac97);
++
++ /* First read sometimes fails, do a dummy read */
++ imx_ssi_ac97_read(ac97, 0);
+ }
+
+ struct snd_ac97_bus_ops soc_ac97_ops = {
+diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
+index db74005..1a757c3 100644
+--- a/sound/soc/sh/dma-sh7760.c
++++ b/sound/soc/sh/dma-sh7760.c
+@@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ return 0;
+ }
+
+-static struct snd_soc_platform sh7760_soc_platform = {
+- .pcm_ops = &camelot_pcm_ops,
++static struct snd_soc_platform_driver sh7760_soc_platform = {
++ .ops = &camelot_pcm_ops,
+ .pcm_new = camelot_pcm_new,
+ .pcm_free = camelot_pcm_free,
+ };
diff --git a/3.2.54/1043_linux-3.2.44.patch b/3.2.54/1043_linux-3.2.44.patch
new file mode 100644
index 0000000..3d5e6ff
--- /dev/null
+++ b/3.2.54/1043_linux-3.2.44.patch
@@ -0,0 +1,2808 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index ddbf18e..897f223 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -948,6 +948,20 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ i8k.restricted [HW] Allow controlling fans only if SYS_ADMIN
+ capability is set.
+
++ i915.invert_brightness=
++ [DRM] Invert the sense of the variable that is used to
++ set the brightness of the panel backlight. Normally a
++ brightness value of 0 indicates backlight switched off,
++ and the maximum of the brightness value sets the backlight
++ to maximum brightness. If this parameter is set to 0
++ (default) and the machine requires it, or this parameter
++ is set to 1, a brightness value of 0 sets the backlight
++ to maximum brightness, and the maximum of the brightness
++ value switches the backlight off.
++ -1 -- never invert brightness
++ 0 -- machine default
++ 1 -- force brightness inversion
++
+ icn= [HW,ISDN]
+ Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
+
+diff --git a/Makefile b/Makefile
+index 59130db..566750c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 43
++SUBLEVEL = 44
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
+index 4112200..c52a8ef 100644
+--- a/arch/alpha/kernel/sys_nautilus.c
++++ b/arch/alpha/kernel/sys_nautilus.c
+@@ -189,6 +189,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
+ extern void free_reserved_mem(void *, void *);
+ extern void pcibios_claim_one_bus(struct pci_bus *);
+
++static struct resource irongate_io = {
++ .name = "Irongate PCI IO",
++ .flags = IORESOURCE_IO,
++};
+ static struct resource irongate_mem = {
+ .name = "Irongate PCI MEM",
+ .flags = IORESOURCE_MEM,
+@@ -210,6 +214,7 @@ nautilus_init_pci(void)
+
+ irongate = pci_get_bus_and_slot(0, 0);
+ bus->self = irongate;
++ bus->resource[0] = &irongate_io;
+ bus->resource[1] = &irongate_mem;
+
+ pci_bus_size_bridges(bus);
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index ecebb89..a559ee7 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -326,7 +326,10 @@ validate_event(struct pmu_hw_events *hw_events,
+ struct hw_perf_event fake_event = event->hw;
+ struct pmu *leader_pmu = event->group_leader->pmu;
+
+- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
++ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
++ return 1;
++
++ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ return 1;
+
+ return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
+diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
+index e0b0e7a..09f8851 100644
+--- a/arch/arm/mm/cache-feroceon-l2.c
++++ b/arch/arm/mm/cache-feroceon-l2.c
+@@ -342,6 +342,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
++ outer_cache.inv_all = l2_inv_all;
+
+ enable_l2();
+
+diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
+index 88fb3d9..927a639 100644
+--- a/arch/arm/mm/proc-arm920.S
++++ b/arch/arm/mm/proc-arm920.S
+@@ -380,7 +380,7 @@ ENTRY(cpu_arm920_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+ .globl cpu_arm920_suspend_size
+ .equ cpu_arm920_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_arm920_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
+index 9f8fd91..090f18f 100644
+--- a/arch/arm/mm/proc-arm926.S
++++ b/arch/arm/mm/proc-arm926.S
+@@ -395,7 +395,7 @@ ENTRY(cpu_arm926_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+ .globl cpu_arm926_suspend_size
+ .equ cpu_arm926_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_arm926_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
+index 7d91545..6594aef 100644
+--- a/arch/arm/mm/proc-sa1100.S
++++ b/arch/arm/mm/proc-sa1100.S
+@@ -169,7 +169,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
+
+ .globl cpu_sa1100_suspend_size
+ .equ cpu_sa1100_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_sa1100_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c3, c0, 0 @ domain ID
+diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
+index d061d2f..8168d99 100644
+--- a/arch/arm/mm/proc-v6.S
++++ b/arch/arm/mm/proc-v6.S
+@@ -129,7 +129,7 @@ ENTRY(cpu_v6_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
+ .globl cpu_v6_suspend_size
+ .equ cpu_v6_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_v6_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
+diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
+index abf0507..5c4969d 100644
+--- a/arch/arm/mm/proc-xsc3.S
++++ b/arch/arm/mm/proc-xsc3.S
+@@ -407,7 +407,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
+
+ .globl cpu_xsc3_suspend_size
+ .equ cpu_xsc3_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_xsc3_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
+index 3277904..b09d036 100644
+--- a/arch/arm/mm/proc-xscale.S
++++ b/arch/arm/mm/proc-xscale.S
+@@ -521,7 +521,7 @@ ENTRY(cpu_xscale_set_pte_ext)
+
+ .globl cpu_xscale_suspend_size
+ .equ cpu_xscale_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_xscale_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index dc36ea6..eb19b6c 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
+ (0x1UL << 4), &dummy1, &dummy2);
+ if (lpar_rc == H_SUCCESS)
+ return i;
+- BUG_ON(lpar_rc != H_NOT_FOUND);
++
++ /*
++ * The test for adjunct partition is performed before the
++ * ANDCOND test. H_RESOURCE may be returned, so we need to
++ * check for that as well.
++ */
++ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
+
+ slot_offset++;
+ slot_offset &= 0x7;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index b4973f4..cfb5a40 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -393,8 +393,8 @@ struct kvm_vcpu_arch {
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+- unsigned int time_offset;
+- struct page *time_page;
++ struct gfn_to_hva_cache pv_time;
++ bool pv_time_enabled;
+
+ struct {
+ u64 msr_val;
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index a7d2db9..91e758b 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -740,7 +740,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
+ }
+
+-void arch_flush_lazy_mmu_mode(void);
++static inline void arch_flush_lazy_mmu_mode(void)
++{
++ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
++}
+
+ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 8e8b9a4..faf2c04 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -91,6 +91,7 @@ struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
++ void (*flush)(void);
+ };
+
+ struct pv_time_ops {
+@@ -680,6 +681,7 @@ void paravirt_end_context_switch(struct task_struct *next);
+
+ void paravirt_enter_lazy_mmu(void);
+ void paravirt_leave_lazy_mmu(void);
++void paravirt_flush_lazy_mmu(void);
+
+ void _paravirt_nop(void);
+ u32 _paravirt_ident_32(u32);
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index d90272e..84c938f 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -261,6 +261,18 @@ void paravirt_leave_lazy_mmu(void)
+ leave_lazy(PARAVIRT_LAZY_MMU);
+ }
+
++void paravirt_flush_lazy_mmu(void)
++{
++ preempt_disable();
++
++ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
++ arch_leave_lazy_mmu_mode();
++ arch_enter_lazy_mmu_mode();
++ }
++
++ preempt_enable();
++}
++
+ void paravirt_start_context_switch(struct task_struct *prev)
+ {
+ BUG_ON(preemptible());
+@@ -290,18 +302,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+ return percpu_read(paravirt_lazy_mode);
+ }
+
+-void arch_flush_lazy_mmu_mode(void)
+-{
+- preempt_disable();
+-
+- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+- arch_leave_lazy_mmu_mode();
+- arch_enter_lazy_mmu_mode();
+- }
+-
+- preempt_enable();
+-}
+-
+ struct pv_info pv_info = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
++ .flush = paravirt_nop,
+ },
+
+ .set_fixmap = native_set_fixmap,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f4063fd..e82a53a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1105,7 +1105,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ {
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+- void *shared_kaddr;
+ unsigned long this_tsc_khz;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp;
+@@ -1141,7 +1140,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+
+ local_irq_restore(flags);
+
+- if (!vcpu->time_page)
++ if (!vcpu->pv_time_enabled)
+ return 0;
+
+ /*
+@@ -1199,14 +1198,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ */
+ vcpu->hv_clock.version += 2;
+
+- shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+-
+- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+- sizeof(vcpu->hv_clock));
+-
+- kunmap_atomic(shared_kaddr, KM_USER0);
+-
+- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
+ return 0;
+ }
+
+@@ -1486,7 +1480,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+ return 0;
+ }
+
+- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
++ sizeof(u32)))
+ return 1;
+
+ vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+@@ -1496,10 +1491,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+
+ static void kvmclock_reset(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.time_page) {
+- kvm_release_page_dirty(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ vcpu->arch.pv_time_enabled = false;
+ }
+
+ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+@@ -1591,6 +1583,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
+ kvmclock_reset(vcpu);
+
+ vcpu->arch.time = data;
+@@ -1600,16 +1593,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ if (!(data & 1))
+ break;
+
+- /* ...but clean it before doing the actual write */
+- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
++ gpa_offset = data & ~(PAGE_MASK | 1);
+
+- vcpu->arch.time_page =
+- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+-
+- if (is_error_page(vcpu->arch.time_page)) {
+- kvm_release_page_clean(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL,
++ sizeof(struct pvclock_vcpu_time_info)))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
+ break;
+ }
+ case MSR_KVM_ASYNC_PF_EN:
+@@ -1625,7 +1616,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ return 1;
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+- data & KVM_STEAL_VALID_BITS))
++ data & KVM_STEAL_VALID_BITS,
++ sizeof(struct kvm_steal_time)))
+ return 1;
+
+ vcpu->arch.st.msr_val = data;
+@@ -6549,6 +6541,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
+ goto fail_free_mce_banks;
+
++ vcpu->arch.pv_time_enabled = false;
+ kvm_async_pf_hash_reset(vcpu);
+
+ return 0;
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index cf4603b..8f4fda4 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1328,6 +1328,7 @@ __init void lguest_init(void)
+ pv_mmu_ops.read_cr3 = lguest_read_cr3;
+ pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
+ pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
++ pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
+ pv_mmu_ops.pte_update = lguest_pte_update;
+ pv_mmu_ops.pte_update_defer = lguest_pte_update;
+
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 7b73c88..53a7b69 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -377,10 +377,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+ if (pgd_none(*pgd_ref))
+ return -1;
+
+- if (pgd_none(*pgd))
++ if (pgd_none(*pgd)) {
+ set_pgd(pgd, *pgd_ref);
+- else
++ arch_flush_lazy_mmu_mode();
++ } else {
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++ }
+
+ /*
+ * Below here mismatches are bugs because these lower tables
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 2b8b0de..fe00be6 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -2079,6 +2079,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+ .lazy_mode = {
+ .enter = paravirt_enter_lazy_mmu,
+ .leave = xen_leave_lazy_mmu,
++ .flush = paravirt_flush_lazy_mmu,
+ },
+
+ .set_fixmap = xen_set_fixmap,
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index f0b2ca8..1789e7a 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -200,6 +200,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
+ unsigned long val; \
+ ssize_t ret; \
+ ret = queue_var_store(&val, page, count); \
++ if (ret < 0) \
++ return ret; \
+ if (neg) \
+ val = !val; \
+ \
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index 1a25263..b97b186 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
+
+ struct crypto_rfc4543_req_ctx {
+ u8 auth_tag[16];
++ u8 assocbuf[32];
+ struct scatterlist cipher[1];
+ struct scatterlist payload[2];
+ struct scatterlist assoc[2];
+@@ -1142,9 +1143,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
+ scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
+ assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
+
+- sg_init_table(assoc, 2);
+- sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+- req->assoc->offset);
++ if (req->assoc->length == req->assoclen) {
++ sg_init_table(assoc, 2);
++ sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
++ req->assoc->offset);
++ } else {
++ BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
++
++ scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
++ req->assoclen, 0);
++
++ sg_init_table(assoc, 2);
++ sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
++ }
+ scatterwalk_crypto_chain(assoc, payload, 0, 2);
+
+ aead_request_set_tfm(subreq, ctx->child);
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index df47397..ddfc1c1 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -150,6 +150,7 @@ enum piix_controller_ids {
+ tolapai_sata,
+ piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
+ ich8_sata_snb,
++ ich8_2port_sata_snb,
+ };
+
+ struct piix_map_db {
+@@ -326,7 +327,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+- { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point-LP) */
+@@ -519,6 +520,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
+ [ich8m_apple_sata] = &ich8m_apple_map_db,
+ [tolapai_sata] = &tolapai_map_db,
+ [ich8_sata_snb] = &ich8_map_db,
++ [ich8_2port_sata_snb] = &ich8_2port_map_db,
+ };
+
+ static struct ata_port_info piix_port_info[] = {
+@@ -660,6 +662,16 @@ static struct ata_port_info piix_port_info[] = {
+ .port_ops = &piix_sata_ops,
+ },
+
++ [ich8_2port_sata_snb] =
++ {
++ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
++ | PIIX_FLAG_PIO16,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &piix_sata_ops,
++ },
++
+ };
+
+ static struct pci_bits piix_enable_bits[] = {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c9540c0..288b635 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2401,6 +2401,9 @@ int ata_dev_configure(struct ata_device *dev)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+ dev->max_sectors);
+
++ if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
++ dev->max_sectors = ATA_MAX_SECTORS_LBA48;
++
+ if (ap->ops->dev_config)
+ ap->ops->dev_config(dev);
+
+@@ -4057,6 +4060,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ /* Weird ATAPI devices */
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
++ { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 012a9d2..144d37c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -274,6 +274,7 @@ enum intel_pch {
+
+ #define QUIRK_PIPEA_FORCE (1<<0)
+ #define QUIRK_LVDS_SSC_DISABLE (1<<1)
++#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+
+ struct intel_fbdev;
+ struct intel_fbc_work;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 17961df..897ca06 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -25,6 +25,7 @@
+ */
+
+ #include <linux/cpufreq.h>
++#include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <linux/input.h>
+ #include <linux/i2c.h>
+@@ -8831,6 +8832,16 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ }
+
++/*
++ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
++ * brightness value
++ */
++static void quirk_invert_brightness(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
++}
++
+ struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+@@ -8838,6 +8849,34 @@ struct intel_quirk {
+ void (*hook)(struct drm_device *dev);
+ };
+
++/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
++struct intel_dmi_quirk {
++ void (*hook)(struct drm_device *dev);
++ const struct dmi_system_id (*dmi_id_list)[];
++};
++
++static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
++{
++ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
++ return 1;
++}
++
++static const struct intel_dmi_quirk intel_dmi_quirks[] = {
++ {
++ .dmi_id_list = &(const struct dmi_system_id[]) {
++ {
++ .callback = intel_dmi_reverse_brightness,
++ .ident = "NCR Corporation",
++ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, ""),
++ },
++ },
++ { } /* terminating entry */
++ },
++ .hook = quirk_invert_brightness,
++ },
++};
++
+ struct intel_quirk intel_quirks[] = {
+ /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+ { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+@@ -8865,6 +8904,18 @@ struct intel_quirk intel_quirks[] = {
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
++
++ /* Acer Aspire 5734Z must invert backlight brightness */
++ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
++
++ /* Acer/eMachines G725 */
++ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
++
++ /* Acer/eMachines e725 */
++ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
++
++ /* Acer/Packard Bell NCL20 */
++ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+ };
+
+ static void intel_init_quirks(struct drm_device *dev)
+@@ -8882,6 +8933,10 @@ static void intel_init_quirks(struct drm_device *dev)
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
++ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
++ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
++ intel_dmi_quirks[i].hook(dev);
++ }
+ }
+
+ /* Disable the VGA plane that we never use */
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 04d79fd..72b8949 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -28,6 +28,7 @@
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
++#include <linux/moduleparam.h>
+ #include "intel_drv.h"
+
+ #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+@@ -191,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
+ return max;
+ }
+
++static int i915_panel_invert_brightness;
++MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
++ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
++ "report PCI device ID, subsystem vendor and subsystem device ID "
++ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
++ "It will then be included in an upcoming module version.");
++module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
++static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (i915_panel_invert_brightness < 0)
++ return val;
++
++ if (i915_panel_invert_brightness > 0 ||
++ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
++ return intel_panel_get_max_backlight(dev) - val;
++
++ return val;
++}
++
+ u32 intel_panel_get_backlight(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -211,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
+ }
+ }
+
++ val = intel_panel_compute_brightness(dev, val);
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+ }
+@@ -228,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
++ level = intel_panel_compute_brightness(dev, level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index 58434e8..37fe246 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -26,6 +26,7 @@
+ #include <linux/fb.h>
+
+ #include <linux/pci.h>
++#include <linux/console.h>
+ #include <linux/vga_switcheroo.h>
+
+ struct vga_switcheroo_client {
+@@ -256,8 +257,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+
+ if (new_client->fb_info) {
+ struct fb_event event;
++ console_lock();
+ event.info = new_client->fb_info;
+ fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
++ console_unlock();
+ }
+
+ ret = vgasr_priv.handler->switchto(new_client->id);
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index 1201a15..08e7e72 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't power on device\n", __func__);
++ pm_runtime_put_noidle(dev);
++ module_put(dev->driver->owner);
+ return ret;
+ }
+
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index e7dc732..1d90e26 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -1154,7 +1154,11 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+ unsigned long off;
+ u32 len;
+
+- if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
++ /* This is broken because it assumes the MTD device is map-based
++ and that mtd->priv is a valid struct map_info. It should be
++ replaced with something that uses the mtd_get_unmapped_area()
++ operation properly. */
++ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
+ off = vma->vm_pgoff << PAGE_SHIFT;
+ start = map->phys;
+ len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
+diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
+index c3dd9d0..1ee5f0c 100644
+--- a/drivers/net/can/sja1000/sja1000_of_platform.c
++++ b/drivers/net/can/sja1000/sja1000_of_platform.c
+@@ -94,8 +94,8 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource res;
+- const u32 *prop;
+- int err, irq, res_size, prop_size;
++ u32 prop;
++ int err, irq, res_size;
+ void __iomem *base;
+
+ err = of_address_to_resource(np, 0, &res);
+@@ -136,27 +136,27 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+ priv->read_reg = sja1000_ofp_read_reg;
+ priv->write_reg = sja1000_ofp_write_reg;
+
+- prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
+- if (prop && (prop_size == sizeof(u32)))
+- priv->can.clock.freq = *prop / 2;
++ err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
++ if (!err)
++ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
+
+- prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
+- if (prop && (prop_size == sizeof(u32)))
+- priv->ocr |= *prop & OCR_MODE_MASK;
++ err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
++ if (!err)
++ priv->ocr |= prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+- prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
+- if (prop && (prop_size == sizeof(u32)))
+- priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
++ err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
++ if (!err)
++ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+- prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
+- if (prop && (prop_size == sizeof(u32)) && *prop) {
+- u32 divider = priv->can.clock.freq * 2 / *prop;
++ err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
++ if (!err && prop) {
++ u32 divider = priv->can.clock.freq * 2 / prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+@@ -166,8 +166,7 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+- prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
+- if (!prop)
++ if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
+ priv->cdr |= CDR_CBP; /* default */
+
+ priv->irq_flags = IRQF_SHARED;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index a6153f1..d812790 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3516,6 +3516,30 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
+ }
+ }
+
++static void rtl_speed_down(struct rtl8169_private *tp)
++{
++ u32 adv;
++ int lpa;
++
++ rtl_writephy(tp, 0x1f, 0x0000);
++ lpa = rtl_readphy(tp, MII_LPA);
++
++ if (lpa & (LPA_10HALF | LPA_10FULL))
++ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
++ else if (lpa & (LPA_100HALF | LPA_100FULL))
++ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
++ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
++ else
++ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
++ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
++ (tp->mii.supports_gmii ?
++ ADVERTISED_1000baseT_Half |
++ ADVERTISED_1000baseT_Full : 0);
++
++ rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
++ adv);
++}
++
+ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+@@ -3541,9 +3565,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
+ if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ return false;
+
+- rtl_writephy(tp, 0x1f, 0x0000);
+- rtl_writephy(tp, MII_BMCR, 0x0000);
+-
++ rtl_speed_down(tp);
+ rtl_wol_suspend_quirk(tp);
+
+ return true;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+index 06b3f0d..c16bea4 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+@@ -648,7 +648,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+- {0x00008264, 0x9bc00010},
++ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 966661c..84890d5 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -801,7 +801,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
+ * required version.
+ */
+ if (priv->fw_version_major != MAJOR_VERSION_REQ ||
+- priv->fw_version_minor != MINOR_VERSION_REQ) {
++ priv->fw_version_minor < MINOR_VERSION_REQ) {
+ dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
+ MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
+ return -EINVAL;
+diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
+index 17148bb..10fe07d 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
++++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
+@@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+- ERROR(rt2x00dev, "Indirect register access failed: "
+- "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
++ printk_once(KERN_ERR "%s() Indirect register access failed: "
++ "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
+ *reg = ~0;
+
+ return 0;
+diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
+index 2264331..b96766b 100644
+--- a/drivers/platform/x86/msi-wmi.c
++++ b/drivers/platform/x86/msi-wmi.c
+@@ -176,7 +176,7 @@ static void msi_wmi_notify(u32 value, void *context)
+ pr_debug("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
+ key->code, ktime_to_us(diff));
+- return;
++ goto msi_wmi_notify_exit;
+ }
+ last_pressed[key->code - SCANCODE_BASE] = cur;
+
+@@ -195,6 +195,8 @@ static void msi_wmi_notify(u32 value, void *context)
+ pr_info("Unknown key pressed - %x\n", eventcode);
+ } else
+ pr_info("Unknown event received\n");
++
++msi_wmi_notify_exit:
+ kfree(response.pointer);
+ }
+
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 0364ca2..d5f4eb8 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -393,8 +393,9 @@ static inline int core_alua_state_standby(
+ case REPORT_LUNS:
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
++ return 0;
+ case MAINTENANCE_IN:
+- switch (cdb[1]) {
++ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+@@ -435,8 +436,9 @@ static inline int core_alua_state_unavailable(
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
++ return 0;
+ case MAINTENANCE_IN:
+- switch (cdb[1]) {
++ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+@@ -475,8 +477,9 @@ static inline int core_alua_state_transition(
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
++ return 0;
+ case MAINTENANCE_IN:
+- switch (cdb[1]) {
++ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 9176b2e..898c1de 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1445,6 +1445,7 @@ static inline void transport_generic_prepare_cdb(
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
++ case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+@@ -2683,7 +2684,7 @@ static int transport_generic_cmd_sequencer(
+ /*
+ * Check for emulated MI_REPORT_TARGET_PGS.
+ */
+- if (cdb[1] == MI_REPORT_TARGET_PGS &&
++ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_task =
+ target_emulate_report_target_port_groups;
+diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
+index dd9a574..f6fb292 100644
+--- a/drivers/thermal/thermal_sys.c
++++ b/drivers/thermal/thermal_sys.c
+@@ -1399,6 +1399,7 @@ static int __init thermal_init(void)
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
++ return result;
+ }
+ result = genetlink_init();
+ return result;
+diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
+index 18e875b..3ca6c0d 100644
+--- a/drivers/usb/serial/ark3116.c
++++ b/drivers/usb/serial/ark3116.c
+@@ -68,7 +68,6 @@ static int is_irda(struct usb_serial *serial)
+ }
+
+ struct ark3116_private {
+- wait_queue_head_t delta_msr_wait;
+ struct async_icount icount;
+ int irda; /* 1 for irda device */
+
+@@ -148,7 +147,6 @@ static int ark3116_attach(struct usb_serial *serial)
+ if (!priv)
+ return -ENOMEM;
+
+- init_waitqueue_head(&priv->delta_msr_wait);
+ mutex_init(&priv->hw_lock);
+ spin_lock_init(&priv->status_lock);
+
+@@ -460,10 +458,14 @@ static int ark3116_ioctl(struct tty_struct *tty,
+ case TIOCMIWAIT:
+ for (;;) {
+ struct async_icount prev = priv->icount;
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ if ((prev.rng == priv->icount.rng) &&
+ (prev.dsr == priv->icount.dsr) &&
+ (prev.dcd == priv->icount.dcd) &&
+@@ -584,7 +586,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+ }
+
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index 6ae1c06..c4d95b0 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -82,7 +82,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
+
+ struct ch341_private {
+ spinlock_t lock; /* access lock */
+- wait_queue_head_t delta_msr_wait; /* wait queue for modem status */
+ unsigned baud_rate; /* set baud rate */
+ u8 line_control; /* set line control value RTS/DTR */
+ u8 line_status; /* active status of modem control inputs */
+@@ -262,7 +261,6 @@ static int ch341_attach(struct usb_serial *serial)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ priv->baud_rate = DEFAULT_BAUD_RATE;
+ priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
+
+@@ -299,7 +297,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on)
+ priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+
+ static void ch341_close(struct usb_serial_port *port)
+@@ -503,7 +501,7 @@ static void ch341_read_int_callback(struct urb *urb)
+ tty_kref_put(tty);
+ }
+
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+
+ exit:
+@@ -529,11 +527,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (!multi_change) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->line_status;
+ multi_change = priv->multi_status_change;
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index d9906eb..01a44d3 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -150,7 +150,6 @@ struct cypress_private {
+ int baud_rate; /* stores current baud rate in
+ integer form */
+ int isthrottled; /* if throttled, discard reads */
+- wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */
+ char prev_status, diff_status; /* used for TIOCMIWAIT */
+ /* we pass a pointer to this as the argument sent to
+ cypress_set_termios old_termios */
+@@ -488,7 +487,6 @@ static int generic_startup(struct usb_serial *serial)
+ kfree(priv);
+ return -ENOMEM;
+ }
+- init_waitqueue_head(&priv->delta_msr_wait);
+
+ usb_reset_configuration(serial->dev);
+
+@@ -928,12 +926,16 @@ static int cypress_ioctl(struct tty_struct *tty,
+ switch (cmd) {
+ /* This code comes from drivers/char/serial.c and ftdi_sio.c */
+ case TIOCMIWAIT:
+- while (priv != NULL) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ for (;;) {
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+- else {
++
++ if (port->serial->disconnected)
++ return -EIO;
++
++ {
+ char diff = priv->diff_status;
+ if (diff == 0)
+ return -EIO; /* no change => error */
+@@ -1261,7 +1263,7 @@ static void cypress_read_int_callback(struct urb *urb)
+ if (priv->current_status != priv->prev_status) {
+ priv->diff_status |= priv->current_status ^
+ priv->prev_status;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ priv->prev_status = priv->current_status;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 878ff05..06394e5a 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -74,9 +74,7 @@ struct ftdi_private {
+ int flags; /* some ASYNC_xxxx flags are supported */
+ unsigned long last_dtr_rts; /* saved modem control outputs */
+ struct async_icount icount;
+- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+ char prev_status; /* Used for TIOCMIWAIT */
+- bool dev_gone; /* Used to abort TIOCMIWAIT */
+ char transmit_empty; /* If transmitter is empty or not */
+ struct usb_serial_port *port;
+ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
+@@ -1708,10 +1706,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
+ kref_init(&priv->kref);
+ mutex_init(&priv->cfg_lock);
+ memset(&priv->icount, 0x00, sizeof(priv->icount));
+- init_waitqueue_head(&priv->delta_msr_wait);
+
+ priv->flags = ASYNC_LOW_LATENCY;
+- priv->dev_gone = false;
+
+ if (quirk && quirk->port_probe)
+ quirk->port_probe(priv);
+@@ -1869,8 +1865,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
+
+ dbg("%s", __func__);
+
+- priv->dev_gone = true;
+- wake_up_interruptible_all(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ remove_sysfs_attrs(port);
+
+@@ -2025,7 +2020,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
+ if (diff_status & FTDI_RS0_RLSD)
+ priv->icount.dcd++;
+
+- wake_up_interruptible_all(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ priv->prev_status = status;
+ }
+
+@@ -2424,11 +2419,15 @@ static int ftdi_ioctl(struct tty_struct *tty,
+ */
+ case TIOCMIWAIT:
+ cprev = priv->icount;
+- while (!priv->dev_gone) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ for (;;) {
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = priv->icount;
+ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+@@ -2438,8 +2437,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
+ }
+ cprev = cnow;
+ }
+- return -EIO;
+- break;
+ case TIOCSERGETLSR:
+ return get_lsr_info(port, (struct serial_struct __user *)arg);
+ break;
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index 2ee8075..0af0b41 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -114,7 +114,6 @@ struct edgeport_port {
+ wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
+ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */
+ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
+- wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
+
+ struct async_icount icount;
+ struct usb_serial_port *port; /* loop back to the owner of this object */
+@@ -885,7 +884,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
+ /* initialize our wait queues */
+ init_waitqueue_head(&edge_port->wait_open);
+ init_waitqueue_head(&edge_port->wait_chase);
+- init_waitqueue_head(&edge_port->delta_msr_wait);
+ init_waitqueue_head(&edge_port->wait_command);
+
+ /* initialize our icount structure */
+@@ -1703,13 +1701,17 @@ static int edge_ioctl(struct tty_struct *tty,
+ dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
+ cprev = edge_port->icount;
+ while (1) {
+- prepare_to_wait(&edge_port->delta_msr_wait,
++ prepare_to_wait(&port->delta_msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
+ schedule();
+- finish_wait(&edge_port->delta_msr_wait, &wait);
++ finish_wait(&port->delta_msr_wait, &wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = edge_port->icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+@@ -2090,7 +2092,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
+ icount->dcd++;
+ if (newMsr & EDGEPORT_MSR_DELTA_RI)
+ icount->rng++;
+- wake_up_interruptible(&edge_port->delta_msr_wait);
++ wake_up_interruptible(&edge_port->port->delta_msr_wait);
+ }
+
+ /* Save the new modem status */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 1f145bf..f42119d 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -98,9 +98,6 @@ struct edgeport_port {
+ int close_pending;
+ int lsr_event;
+ struct async_icount icount;
+- wait_queue_head_t delta_msr_wait; /* for handling sleeping while
+- waiting for msr change to
+- happen */
+ struct edgeport_serial *edge_serial;
+ struct usb_serial_port *port;
+ __u8 bUartMode; /* Port type, 0: RS232, etc. */
+@@ -1557,7 +1554,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
+ icount->dcd++;
+ if (msr & EDGEPORT_MSR_DELTA_RI)
+ icount->rng++;
+- wake_up_interruptible(&edge_port->delta_msr_wait);
++ wake_up_interruptible(&edge_port->port->delta_msr_wait);
+ }
+
+ /* Save the new modem status */
+@@ -1876,7 +1873,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
+ dev = port->serial->dev;
+
+ memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
+- init_waitqueue_head(&edge_port->delta_msr_wait);
+
+ /* turn off loopback */
+ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
+@@ -2574,10 +2570,14 @@ static int edge_ioctl(struct tty_struct *tty,
+ dbg("%s - (%d) TIOCMIWAIT", __func__, port->number);
+ cprev = edge_port->icount;
+ while (1) {
+- interruptible_sleep_on(&edge_port->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = edge_port->icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index de0bb8e..96a62dd 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -168,8 +168,6 @@ struct mct_u232_private {
+ unsigned char last_msr; /* Modem Status Register */
+ unsigned int rx_flags; /* Throttling flags */
+ struct async_icount icount;
+- wait_queue_head_t msr_wait; /* for handling sleeping while waiting
+- for msr change to happen */
+ };
+
+ #define THROTTLED 0x01
+@@ -449,7 +447,6 @@ static int mct_u232_startup(struct usb_serial *serial)
+ if (!priv)
+ return -ENOMEM;
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->msr_wait);
+ usb_set_serial_port_data(serial->port[0], priv);
+
+ init_waitqueue_head(&serial->port[0]->write_wait);
+@@ -675,7 +672,7 @@ static void mct_u232_read_int_callback(struct urb *urb)
+ tty_kref_put(tty);
+ }
+ #endif
+- wake_up_interruptible(&priv->msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ exit:
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -896,13 +893,17 @@ static int mct_u232_ioctl(struct tty_struct *tty,
+ cprev = mct_u232_port->icount;
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+ for ( ; ; ) {
+- prepare_to_wait(&mct_u232_port->msr_wait,
++ prepare_to_wait(&port->delta_msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
+ schedule();
+- finish_wait(&mct_u232_port->msr_wait, &wait);
++ finish_wait(&port->delta_msr_wait, &wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&mct_u232_port->lock, flags);
+ cnow = mct_u232_port->icount;
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 43a38aa..e89ee48 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -240,7 +240,6 @@ struct moschip_port {
+ char open;
+ char open_ports;
+ wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
+- wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
+ int delta_msr_cond;
+ struct async_icount icount;
+ struct usb_serial_port *port; /* loop back to the owner of this object */
+@@ -453,6 +452,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
+ icount->rng++;
+ smp_wmb();
+ }
++
++ mos7840_port->delta_msr_cond = 1;
++ wake_up_interruptible(&port->port->delta_msr_wait);
+ }
+ }
+
+@@ -1115,7 +1117,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ /* initialize our wait queues */
+ init_waitqueue_head(&mos7840_port->wait_chase);
+- init_waitqueue_head(&mos7840_port->delta_msr_wait);
+
+ /* initialize our icount structure */
+ memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount));
+@@ -2073,8 +2074,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
+ mos7840_port->read_urb_busy = false;
+ }
+ }
+- wake_up(&mos7840_port->delta_msr_wait);
+- mos7840_port->delta_msr_cond = 1;
+ dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
+ mos7840_port->shadowLCR);
+ }
+@@ -2284,13 +2283,18 @@ static int mos7840_ioctl(struct tty_struct *tty,
+ while (1) {
+ /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */
+ mos7840_port->delta_msr_cond = 0;
+- wait_event_interruptible(mos7840_port->delta_msr_wait,
+- (mos7840_port->
++ wait_event_interruptible(port->delta_msr_wait,
++ (port->serial->disconnected ||
++ mos7840_port->
+ delta_msr_cond == 1));
+
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = mos7840_port->icount;
+ smp_rmb();
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
+index 4c29e6c..8ceaa89 100644
+--- a/drivers/usb/serial/oti6858.c
++++ b/drivers/usb/serial/oti6858.c
+@@ -196,7 +196,6 @@ struct oti6858_private {
+ u8 setup_done;
+ struct delayed_work delayed_setup_work;
+
+- wait_queue_head_t intr_wait;
+ struct usb_serial_port *port; /* USB port with which associated */
+ };
+
+@@ -357,7 +356,6 @@ static int oti6858_startup(struct usb_serial *serial)
+ break;
+
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->intr_wait);
+ /* INIT_WORK(&priv->setup_work, setup_line, serial->port[i]); */
+ /* INIT_WORK(&priv->write_work, send_data, serial->port[i]); */
+ priv->port = port;
+@@ -705,11 +703,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (1) {
+- wait_event_interruptible(priv->intr_wait,
++ wait_event_interruptible(port->delta_msr_wait,
++ port->serial->disconnected ||
+ priv->status.pin_state != prev);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->status.pin_state & PIN_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -821,7 +823,7 @@ static void oti6858_read_int_callback(struct urb *urb)
+
+ if (!priv->transient) {
+ if (xs->pin_state != priv->status.pin_state)
+- wake_up_interruptible(&priv->intr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
+ }
+
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 5532ea5..fd86e0e 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -150,7 +150,6 @@ enum pl2303_type {
+
+ struct pl2303_private {
+ spinlock_t lock;
+- wait_queue_head_t delta_msr_wait;
+ u8 line_control;
+ u8 line_status;
+ enum pl2303_type type;
+@@ -204,7 +203,6 @@ static int pl2303_startup(struct usb_serial *serial)
+ if (!priv)
+ goto cleanup;
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ priv->type = type;
+ usb_set_serial_port_data(serial->port[i], priv);
+ }
+@@ -599,11 +597,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (1) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->line_status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -725,7 +726,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (priv->line_status & UART_BREAK_ERROR)
+ usb_serial_handle_break(port);
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+@@ -792,7 +793,7 @@ static void pl2303_process_read_urb(struct urb *urb)
+ line_status = priv->line_status;
+ priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ if (!urb->actual_length)
+ return;
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index 180ea6c..ba6b438 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -163,7 +163,6 @@ static struct usb_driver spcp8x5_driver = {
+ struct spcp8x5_private {
+ spinlock_t lock;
+ enum spcp8x5_type type;
+- wait_queue_head_t delta_msr_wait;
+ u8 line_control;
+ u8 line_status;
+ };
+@@ -197,7 +196,6 @@ static int spcp8x5_startup(struct usb_serial *serial)
+ goto cleanup;
+
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ priv->type = type;
+ usb_set_serial_port_data(serial->port[i] , priv);
+ }
+@@ -502,7 +500,7 @@ static void spcp8x5_process_read_urb(struct urb *urb)
+ priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ /* wake up the wait for termios */
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ if (!urb->actual_length)
+ return;
+@@ -552,12 +550,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
+
+ while (1) {
+ /* wake up in bulk read */
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->line_status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index fff7f17..bf1f8ea 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -78,7 +78,6 @@ struct ssu100_port_private {
+ spinlock_t status_lock;
+ u8 shadowLSR;
+ u8 shadowMSR;
+- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+ struct async_icount icount;
+ };
+
+@@ -387,8 +386,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ while (1) {
+- wait_event_interruptible(priv->delta_msr_wait,
+- ((priv->icount.rng != prev.rng) ||
++ wait_event_interruptible(port->delta_msr_wait,
++ (port->serial->disconnected ||
++ (priv->icount.rng != prev.rng) ||
+ (priv->icount.dsr != prev.dsr) ||
+ (priv->icount.dcd != prev.dcd) ||
+ (priv->icount.cts != prev.cts)));
+@@ -396,6 +396,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->status_lock, flags);
+ cur = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+@@ -478,7 +481,6 @@ static int ssu100_attach(struct usb_serial *serial)
+ }
+
+ spin_lock_init(&priv->status_lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ usb_set_serial_port_data(port, priv);
+
+ return ssu100_initdevice(serial->dev);
+@@ -564,7 +566,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+ }
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 2856474..4b805be 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -75,7 +75,6 @@ struct ti_port {
+ int tp_flags;
+ int tp_closing_wait;/* in .01 secs */
+ struct async_icount tp_icount;
+- wait_queue_head_t tp_msr_wait; /* wait for msr change */
+ wait_queue_head_t tp_write_wait;
+ struct ti_device *tp_tdev;
+ struct usb_serial_port *tp_port;
+@@ -447,7 +446,6 @@ static int ti_startup(struct usb_serial *serial)
+ tport->tp_uart_base_addr = (i == 0 ?
+ TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
+ tport->tp_closing_wait = closing_wait;
+- init_waitqueue_head(&tport->tp_msr_wait);
+ init_waitqueue_head(&tport->tp_write_wait);
+ if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE,
+ GFP_KERNEL)) {
+@@ -848,9 +846,13 @@ static int ti_ioctl(struct tty_struct *tty,
+ dbg("%s - (%d) TIOCMIWAIT", __func__, port->number);
+ cprev = tport->tp_icount;
+ while (1) {
+- interruptible_sleep_on(&tport->tp_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = tport->tp_icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+@@ -1481,7 +1483,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr)
+ icount->dcd++;
+ if (msr & TI_MSR_DELTA_RI)
+ icount->rng++;
+- wake_up_interruptible(&tport->tp_msr_wait);
++ wake_up_interruptible(&tport->tp_port->delta_msr_wait);
+ spin_unlock_irqrestore(&tport->tp_lock, flags);
+ }
+
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 2482d5e..850faa4 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -905,6 +905,7 @@ int usb_serial_probe(struct usb_interface *interface,
+ port->port.ops = &serial_port_ops;
+ port->serial = serial;
+ spin_lock_init(&port->lock);
++ init_waitqueue_head(&port->delta_msr_wait);
+ /* Keep this for private driver use for the moment but
+ should probably go away */
+ INIT_WORK(&port->work, usb_serial_port_work);
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 9b8bcab..7a36dff 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -843,6 +843,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
+ *
+ * Maps a virtual console @unit to a frame buffer device
+ * @newidx.
++ *
++ * This should be called with the console lock held.
+ */
+ static int set_con2fb_map(int unit, int newidx, int user)
+ {
+@@ -860,7 +862,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
+
+ if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
+ info_idx = newidx;
+- return fbcon_takeover(0);
++ return do_fbcon_takeover(0);
+ }
+
+ if (oldidx != -1)
+@@ -868,7 +870,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
+
+ found = search_fb_in_map(newidx);
+
+- console_lock();
+ con2fb_map[unit] = newidx;
+ if (!err && !found)
+ err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
+@@ -895,7 +896,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ if (!search_fb_in_map(info_idx))
+ info_idx = newidx;
+
+- console_unlock();
+ return err;
+ }
+
+@@ -3026,6 +3026,7 @@ static inline int fbcon_unbind(void)
+ }
+ #endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
++/* called with console_lock held */
+ static int fbcon_fb_unbind(int idx)
+ {
+ int i, new_idx = -1, ret = 0;
+@@ -3052,6 +3053,7 @@ static int fbcon_fb_unbind(int idx)
+ return ret;
+ }
+
++/* called with console_lock held */
+ static int fbcon_fb_unregistered(struct fb_info *info)
+ {
+ int i, idx;
+@@ -3089,6 +3091,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ return 0;
+ }
+
++/* called with console_lock held */
+ static void fbcon_remap_all(int idx)
+ {
+ int i;
+@@ -3133,6 +3136,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
+ }
+ #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+
++/* called with console_lock held */
+ static int fbcon_fb_registered(struct fb_info *info)
+ {
+ int ret = 0, i, idx;
+@@ -3285,6 +3289,7 @@ static int fbcon_event_notify(struct notifier_block *self,
+ ret = fbcon_fb_unregistered(info);
+ break;
+ case FB_EVENT_SET_CONSOLE_MAP:
++ /* called with console lock held */
+ con2fb = event->data;
+ ret = set_con2fb_map(con2fb->console - 1,
+ con2fb->framebuffer, 1);
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index c133dde..babbb07 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1154,8 +1154,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ event.data = &con2fb;
+ if (!lock_fb_info(info))
+ return -ENODEV;
++ console_lock();
+ event.info = info;
+ ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
++ console_unlock();
+ unlock_fb_info(info);
+ break;
+ case FBIOBLANK:
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 49f3c9d..73e4cbc 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1209,6 +1209,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+ mask);
+ }
+
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++ unsigned long index = start >> PAGE_CACHE_SHIFT;
++ unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++ struct page *page;
++
++ while (index <= end_index) {
++ page = find_get_page(inode->i_mapping, index);
++ BUG_ON(!page); /* Pages should be in the extent_io_tree */
++ clear_page_dirty_for_io(page);
++ page_cache_release(page);
++ index++;
++ }
++ return 0;
++}
++
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++ unsigned long index = start >> PAGE_CACHE_SHIFT;
++ unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++ struct page *page;
++
++ while (index <= end_index) {
++ page = find_get_page(inode->i_mapping, index);
++ BUG_ON(!page); /* Pages should be in the extent_io_tree */
++ account_page_redirty(page);
++ __set_page_dirty_nobuffers(page);
++ page_cache_release(page);
++ index++;
++ }
++ return 0;
++}
++
+ /*
+ * helper function to set both pages and extents in the tree writeback
+ */
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index 7604c30..2e32510 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -304,6 +304,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
+ unsigned long *map_len);
+ int extent_range_uptodate(struct extent_io_tree *tree,
+ u64 start, u64 end);
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
+ int extent_clear_unlock_delalloc(struct inode *inode,
+ struct extent_io_tree *tree,
+ u64 start, u64 end, struct page *locked_page,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fd1a06d..1372634 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -343,6 +343,7 @@ static noinline int compress_file_range(struct inode *inode,
+ int i;
+ int will_compress;
+ int compress_type = root->fs_info->compress_type;
++ int redirty = 0;
+
+ /* if this is a small write inside eof, kick off a defragbot */
+ if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
+@@ -404,6 +405,17 @@ again:
+ if (BTRFS_I(inode)->force_compress)
+ compress_type = BTRFS_I(inode)->force_compress;
+
++ /*
++ * we need to call clear_page_dirty_for_io on each
++ * page in the range. Otherwise applications with the file
++ * mmap'd can wander in and change the page contents while
++ * we are compressing them.
++ *
++ * If the compression fails for any reason, we set the pages
++ * dirty again later on.
++ */
++ extent_range_clear_dirty_for_io(inode, start, end);
++ redirty = 1;
+ ret = btrfs_compress_pages(compress_type,
+ inode->i_mapping, start,
+ total_compressed, pages,
+@@ -541,6 +553,8 @@ cleanup_and_bail_uncompressed:
+ __set_page_dirty_nobuffers(locked_page);
+ /* unlocked later on in the async handlers */
+ }
++ if (redirty)
++ extent_range_redirty_for_io(inode, start, end);
+ add_async_extent(async_cow, start, end - start + 1,
+ 0, NULL, 0, BTRFS_COMPRESS_NONE);
+ *num_added += 1;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 19b127c..21faa12 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -316,6 +316,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+ unsigned long src_ptr;
+ unsigned long dst_ptr;
+ int overwrite_root = 0;
++ bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
+
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+ overwrite_root = 1;
+@@ -325,6 +326,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+
+ /* look for the key in the destination tree */
+ ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
++ if (ret < 0)
++ return ret;
++
+ if (ret == 0) {
+ char *src_copy;
+ char *dst_copy;
+@@ -366,6 +370,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++ /*
++ * We need to load the old nbytes into the inode so when we
++ * replay the extents we've logged we get the right nbytes.
++ */
++ if (inode_item) {
++ struct btrfs_inode_item *item;
++ u64 nbytes;
++
++ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
++ struct btrfs_inode_item);
++ nbytes = btrfs_inode_nbytes(path->nodes[0], item);
++ item = btrfs_item_ptr(eb, slot,
++ struct btrfs_inode_item);
++ btrfs_set_inode_nbytes(eb, item, nbytes);
++ }
++ } else if (inode_item) {
++ struct btrfs_inode_item *item;
++
++ /*
++ * New inode, set nbytes to 0 so that the nbytes comes out
++ * properly when we replay the extents.
++ */
++ item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
++ btrfs_set_inode_nbytes(eb, item, 0);
+ }
+ insert:
+ btrfs_release_path(path);
+@@ -488,7 +516,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ u64 extent_end;
+ u64 alloc_hint;
+ u64 start = key->offset;
+- u64 saved_nbytes;
++ u64 nbytes = 0;
+ struct btrfs_file_extent_item *item;
+ struct inode *inode = NULL;
+ unsigned long size;
+@@ -498,10 +526,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ found_type = btrfs_file_extent_type(eb, item);
+
+ if (found_type == BTRFS_FILE_EXTENT_REG ||
+- found_type == BTRFS_FILE_EXTENT_PREALLOC)
+- extent_end = start + btrfs_file_extent_num_bytes(eb, item);
+- else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
++ found_type == BTRFS_FILE_EXTENT_PREALLOC) {
++ nbytes = btrfs_file_extent_num_bytes(eb, item);
++ extent_end = start + nbytes;
++
++ /*
++ * We don't add to the inodes nbytes if we are prealloc or a
++ * hole.
++ */
++ if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
++ nbytes = 0;
++ } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ size = btrfs_file_extent_inline_len(eb, item);
++ nbytes = btrfs_file_extent_ram_bytes(eb, item);
+ extent_end = (start + size + mask) & ~mask;
+ } else {
+ ret = 0;
+@@ -550,7 +587,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ }
+ btrfs_release_path(path);
+
+- saved_nbytes = inode_get_bytes(inode);
+ /* drop any overlapping extents */
+ ret = btrfs_drop_extents(trans, inode, start, extent_end,
+ &alloc_hint, 1);
+@@ -638,7 +674,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ BUG_ON(ret);
+ }
+
+- inode_set_bytes(inode, saved_nbytes);
++ inode_add_bytes(inode, nbytes);
+ btrfs_update_inode(trans, root, inode);
+ out:
+ if (inode)
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index 5849e3e..32b12e5 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -517,7 +517,7 @@ void hfsplus_file_truncate(struct inode *inode)
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE,
+diff --git a/fs/inode.c b/fs/inode.c
+index ee4e66b..e2d3633 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -634,7 +634,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
+ * inode to the back of the list so we don't spin on it.
+ */
+ if (!spin_trylock(&inode->i_lock)) {
+- list_move_tail(&inode->i_lru, &sb->s_inode_lru);
++ list_move(&inode->i_lru, &sb->s_inode_lru);
+ continue;
+ }
+
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 32df2b6..5856c9e 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -937,7 +937,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
+ }
+ }
+
+-static inline bool atapi_command_packet_set(const u16 *dev_id)
++static inline int atapi_command_packet_set(const u16 *dev_id)
+ {
+ return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
+ }
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index d4a62ab..d064502 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -16,6 +16,7 @@
+ #define _KREF_H_
+
+ #include <linux/types.h>
++#include <linux/atomic.h>
+
+ struct kref {
+ atomic_t refcount;
+@@ -27,4 +28,24 @@ int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+ int kref_sub(struct kref *kref, unsigned int count,
+ void (*release) (struct kref *kref));
+
++/**
++ * kref_get_unless_zero - Increment refcount for object unless it is zero.
++ * @kref: object.
++ *
++ * Return non-zero if the increment succeeded. Otherwise return 0.
++ *
++ * This function is intended to simplify locking around refcounting for
++ * objects that can be looked up from a lookup structure, and which are
++ * removed from that lookup structure in the object destructor.
++ * Operations on such objects require at least a read lock around
++ * lookup + kref_get, and a write lock around kref_put + remove from lookup
++ * structure. Furthermore, RCU implementations become extremely tricky.
++ * With a lookup followed by a kref_get_unless_zero *with return value check*
++ * locking in the kref_put path can be deferred to the actual removal from
++ * the lookup structure and RCU lookups become trivial.
++ */
++static inline int __must_check kref_get_unless_zero(struct kref *kref)
++{
++ return atomic_add_unless(&kref->refcount, 1, 0);
++}
+ #endif /* _KREF_H_ */
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 6136821..e6796c1 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -396,7 +396,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- gpa_t gpa);
++ gpa_t gpa, unsigned long len);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
+index fa7cc72..b0bcce0 100644
+--- a/include/linux/kvm_types.h
++++ b/include/linux/kvm_types.h
+@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long hva;
++ unsigned long len;
+ struct kvm_memory_slot *memslot;
+ };
+
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index cafc09a..62467ca 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -392,6 +392,7 @@ enum {
+ ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
+ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
+ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
++ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
+
+ /* DMA mask for user DMA control: User visible values; DO NOT
+ renumber */
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 4948552..9bf9611 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -336,6 +336,22 @@ static inline int of_machine_is_compatible(const char *compat)
+ #define of_match_node(_matches, _node) NULL
+ #endif /* CONFIG_OF */
+
++/**
++ * of_property_read_bool - Findfrom a property
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ *
++ * Search for a property in a device node.
++ * Returns true if the property exist false otherwise.
++ */
++static inline bool of_property_read_bool(const struct device_node *np,
++ const char *propname)
++{
++ struct property *prop = of_find_property(np, propname, NULL);
++
++ return prop ? true : false;
++}
++
+ static inline int of_property_read_u32(const struct device_node *np,
+ const char *propname,
+ u32 *out_value)
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 58969b2..e86bf01 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -91,13 +91,19 @@ do { \
+
+ #else /* !CONFIG_PREEMPT_COUNT */
+
+-#define preempt_disable() do { } while (0)
+-#define preempt_enable_no_resched() do { } while (0)
+-#define preempt_enable() do { } while (0)
++/*
++ * Even if we don't have any preemption, we need preempt disable/enable
++ * to be barriers, so that we don't have things like get_user/put_user
++ * that can cause faults and scheduling migrate into our preempt-protected
++ * region.
++ */
++#define preempt_disable() barrier()
++#define preempt_enable_no_resched() barrier()
++#define preempt_enable() barrier()
+
+-#define preempt_disable_notrace() do { } while (0)
+-#define preempt_enable_no_resched_notrace() do { } while (0)
+-#define preempt_enable_notrace() do { } while (0)
++#define preempt_disable_notrace() barrier()
++#define preempt_enable_no_resched_notrace() barrier()
++#define preempt_enable_notrace() barrier()
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index ad919e0..2acd2e2 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -317,6 +317,7 @@ struct ucred {
+ #define IPX_TYPE 1
+
+ extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
++extern void cred_real_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
+
+ extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+ extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
+index a26e2fb..e2369c1 100644
+--- a/include/linux/spinlock_up.h
++++ b/include/linux/spinlock_up.h
+@@ -16,7 +16,10 @@
+ * In the debug case, 1 means unlocked, 0 means locked. (the values
+ * are inverted, to catch initialization bugs)
+ *
+- * No atomicity anywhere, we are on UP.
++ * No atomicity anywhere, we are on UP. However, we still need
++ * the compiler barriers, because we do not want the compiler to
++ * move potentially faulting instructions (notably user accesses)
++ * into the locked sequence, resulting in non-atomic execution.
+ */
+
+ #ifdef CONFIG_DEBUG_SPINLOCK
+@@ -25,6 +28,7 @@
+ static inline void arch_spin_lock(arch_spinlock_t *lock)
+ {
+ lock->slock = 0;
++ barrier();
+ }
+
+ static inline void
+@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+ {
+ local_irq_save(flags);
+ lock->slock = 0;
++ barrier();
+ }
+
+ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+ char oldval = lock->slock;
+
+ lock->slock = 0;
++ barrier();
+
+ return oldval > 0;
+ }
+
+ static inline void arch_spin_unlock(arch_spinlock_t *lock)
+ {
++ barrier();
+ lock->slock = 1;
+ }
+
+ /*
+ * Read-write spinlocks. No debug version.
+ */
+-#define arch_read_lock(lock) do { (void)(lock); } while (0)
+-#define arch_write_lock(lock) do { (void)(lock); } while (0)
+-#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+-#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+-#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+-#define arch_write_unlock(lock) do { (void)(lock); } while (0)
++#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
++#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
++#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
++#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
++#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
++#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+
+ #else /* DEBUG_SPINLOCK */
+ #define arch_spin_is_locked(lock) ((void)(lock), 0)
+ /* for sched.c and kernel_lock.c: */
+-# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+-# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+-# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+-# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
++# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
++# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
++# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
++# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
+ #endif /* DEBUG_SPINLOCK */
+
+ #define arch_spin_is_contended(lock) (((void)(lock), 0))
+diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
+index b29f70b..237d5f8 100644
+--- a/include/linux/usb/serial.h
++++ b/include/linux/usb/serial.h
+@@ -71,6 +71,7 @@ enum port_dev_state {
+ * port.
+ * @flags: usb serial port flags
+ * @write_wait: a wait_queue_head_t used by the port.
++ * @delta_msr_wait: modem-status-change wait queue
+ * @work: work queue entry for the line discipline waking up.
+ * @throttled: nonzero if the read urb is inactive to throttle the device
+ * @throttle_req: nonzero if the tty wants to throttle us
+@@ -114,6 +115,7 @@ struct usb_serial_port {
+
+ unsigned long flags;
+ wait_queue_head_t write_wait;
++ wait_queue_head_t delta_msr_wait;
+ struct work_struct work;
+ char throttled;
+ char throttle_req;
+diff --git a/include/linux/writeback.h b/include/linux/writeback.h
+index a378c29..7e85d45 100644
+--- a/include/linux/writeback.h
++++ b/include/linux/writeback.h
+@@ -195,6 +195,8 @@ void writeback_set_ratelimit(void);
+ void tag_pages_for_writeback(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
++void account_page_redirty(struct page *page);
++
+ /* pdflush.c */
+ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
+ read-only. */
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 0c0017c..5da0a7b 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -50,7 +50,7 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
+ {
+ scm->pid = get_pid(pid);
+ scm->cred = cred ? get_cred(cred) : NULL;
+- cred_to_ucred(pid, cred, &scm->creds);
++ cred_real_to_ucred(pid, cred, &scm->creds);
+ }
+
+ static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index cdd5607..e4cee8d 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -61,6 +61,7 @@
+ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ {
+
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
+ .clock_base =
+ {
+ {
+@@ -1640,8 +1641,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
+ struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
+ int i;
+
+- raw_spin_lock_init(&cpu_base->lock);
+-
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ cpu_base->clock_base[i].cpu_base = cpu_base;
+ timerqueue_init_head(&cpu_base->clock_base[i].active);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index eeeec4e..d08c9f4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2889,8 +2889,10 @@ static void try_to_wake_up_local(struct task_struct *p)
+ {
+ struct rq *rq = task_rq(p);
+
+- BUG_ON(rq != this_rq());
+- BUG_ON(p == current);
++ if (WARN_ON_ONCE(rq != this_rq()) ||
++ WARN_ON_ONCE(p == current))
++ return;
++
+ lockdep_assert_held(&rq->lock);
+
+ if (!raw_spin_trylock(&p->pi_lock)) {
+diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
+index c685e31..c3ae144 100644
+--- a/kernel/sched_clock.c
++++ b/kernel/sched_clock.c
+@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
+ u64 this_clock, remote_clock;
+ u64 *ptr, old_val, val;
+
++#if BITS_PER_LONG != 64
++again:
++ /*
++ * Careful here: The local and the remote clock values need to
++ * be read out atomic as we need to compare the values and
++ * then update either the local or the remote side. So the
++ * cmpxchg64 below only protects one readout.
++ *
++ * We must reread via sched_clock_local() in the retry case on
++ * 32bit as an NMI could use sched_clock_local() via the
++ * tracer and hit between the readout of
++ * the low32bit and the high 32bit portion.
++ */
++ this_clock = sched_clock_local(my_scd);
++ /*
++ * We must enforce atomic readout on 32bit, otherwise the
++ * update on the remote cpu can hit inbetween the readout of
++ * the low32bit and the high 32bit portion.
++ */
++ remote_clock = cmpxchg64(&scd->clock, 0, 0);
++#else
++ /*
++ * On 64bit the read of [my]scd->clock is atomic versus the
++ * update, so we can avoid the above 32bit dance.
++ */
+ sched_clock_local(my_scd);
+ again:
+ this_clock = my_scd->clock;
+ remote_clock = scd->clock;
++#endif
+
+ /*
+ * Use the opportunity that we have both locks
+diff --git a/kernel/signal.c b/kernel/signal.c
+index ea76d30..3ecf574 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2790,7 +2790,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+
+ static int do_tkill(pid_t tgid, pid_t pid, int sig)
+ {
+- struct siginfo info;
++ struct siginfo info = {};
+
+ info.si_signo = sig;
+ info.si_errno = 0;
+diff --git a/kernel/sys.c b/kernel/sys.c
+index f5939c2..be5fa8b 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -320,7 +320,6 @@ void kernel_restart_prepare(char *cmd)
+ system_state = SYSTEM_RESTART;
+ usermodehelper_disable();
+ device_shutdown();
+- syscore_shutdown();
+ }
+
+ /**
+@@ -366,6 +365,7 @@ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
+ disable_nonboot_cpus();
++ syscore_shutdown();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+@@ -391,6 +391,7 @@ static void kernel_shutdown_prepare(enum system_states state)
+ void kernel_halt(void)
+ {
+ kernel_shutdown_prepare(SYSTEM_HALT);
++ disable_nonboot_cpus();
+ syscore_shutdown();
+ printk(KERN_EMERG "System halted.\n");
+ kmsg_dump(KMSG_DUMP_HALT);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 0943d2a..5527211 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -572,7 +572,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+ free_page(tmp);
+ }
+
+- free_page((unsigned long)stat->pages);
+ stat->pages = NULL;
+ stat->start = NULL;
+
+@@ -2317,7 +2316,7 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
+ }
+
+ static loff_t
+-ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
++ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
+ {
+ loff_t ret;
+
+@@ -3135,7 +3134,7 @@ static const struct file_operations ftrace_filter_fops = {
+ .open = ftrace_filter_open,
+ .read = seq_read,
+ .write = ftrace_filter_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+@@ -3143,7 +3142,7 @@ static const struct file_operations ftrace_notrace_fops = {
+ .open = ftrace_notrace_open,
+ .read = seq_read,
+ .write = ftrace_notrace_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+@@ -3351,8 +3350,8 @@ static const struct file_operations ftrace_graph_fops = {
+ .open = ftrace_graph_open,
+ .read = seq_read,
+ .write = ftrace_graph_write,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_graph_release,
+- .llseek = seq_lseek,
+ };
+ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+@@ -3844,7 +3843,7 @@ static const struct file_operations ftrace_pid_fops = {
+ .open = ftrace_pid_open,
+ .write = ftrace_pid_write,
+ .read = seq_read,
+- .llseek = seq_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_pid_release,
+ };
+
+@@ -3964,12 +3963,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+ ftrace_startup_sysctl();
+
+ /* we are starting ftrace again */
+- if (ftrace_ops_list != &ftrace_list_end) {
+- if (ftrace_ops_list->next == &ftrace_list_end)
+- ftrace_trace_function = ftrace_ops_list->func;
+- else
+- ftrace_trace_function = ftrace_ops_list_func;
+- }
++ if (ftrace_ops_list != &ftrace_list_end)
++ update_ftrace_function();
+
+ } else {
+ /* stopping ftrace calls (just send to ftrace_stub) */
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 640bd98..83bd5b3 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -531,6 +531,13 @@ struct kobject *kobject_get(struct kobject *kobj)
+ return kobj;
+ }
+
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++ if (!kref_get_unless_zero(&kobj->kref))
++ kobj = NULL;
++ return kobj;
++}
++
+ /*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+@@ -785,7 +792,7 @@ struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
+ slow_search:
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4c7d42a..70b4733 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2889,7 +2889,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ break;
+ }
+
+- if (absent ||
++ /*
++ * We need call hugetlb_fault for both hugepages under migration
++ * (in which case hugetlb_fault waits for the migration,) and
++ * hwpoisoned hugepages (in which case we need to prevent the
++ * caller from accessing to them.) In order to do this, we use
++ * here is_swap_pte instead of is_hugetlb_entry_migration and
++ * is_hugetlb_entry_hwpoisoned. This is because it simply covers
++ * both cases, and because we can't follow correct pages
++ * directly from any kind of swap entries.
++ */
++ if (absent || is_swap_pte(huge_ptep_get(pte)) ||
+ ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
+ int ret;
+
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 50f0824..ea3f83b 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1801,6 +1801,24 @@ int __set_page_dirty_nobuffers(struct page *page)
+ EXPORT_SYMBOL(__set_page_dirty_nobuffers);
+
+ /*
++ * Call this whenever redirtying a page, to de-account the dirty counters
++ * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
++ * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
++ * systematic errors in balanced_dirty_ratelimit and the dirty pages position
++ * control.
++ */
++void account_page_redirty(struct page *page)
++{
++ struct address_space *mapping = page->mapping;
++ if (mapping && mapping_cap_account_dirty(mapping)) {
++ current->nr_dirtied--;
++ dec_zone_page_state(page, NR_DIRTIED);
++ dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
++ }
++}
++EXPORT_SYMBOL(account_page_redirty);
++
++/*
+ * When a writepage implementation decides that it doesn't want to write this
+ * page for some reason, it should redirty the locked page via
+ * redirty_page_for_writepage() and it should then unlock the page and return 0
+@@ -1808,6 +1826,7 @@ EXPORT_SYMBOL(__set_page_dirty_nobuffers);
+ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
+ {
+ wbc->pages_skipped++;
++ account_page_redirty(page);
+ return __set_page_dirty_nobuffers(page);
+ }
+ EXPORT_SYMBOL(redirty_page_for_writepage);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 3d79b12..f78f898 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -436,7 +436,7 @@ static int cgw_notifier(struct notifier_block *nb,
+ if (gwj->src.dev == dev || gwj->dst.dev == dev) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+- kfree(gwj);
++ kmem_cache_free(cgw_cache, gwj);
+ }
+ }
+ }
+@@ -850,7 +850,7 @@ static void cgw_remove_all_jobs(void)
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+- kfree(gwj);
++ kmem_cache_free(cgw_cache, gwj);
+ }
+ }
+
+@@ -903,7 +903,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+- kfree(gwj);
++ kmem_cache_free(cgw_cache, gwj);
+ err = 0;
+ break;
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 1e8a882..2c73adf 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -761,6 +761,20 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ }
+ EXPORT_SYMBOL_GPL(cred_to_ucred);
+
++void cred_real_to_ucred(struct pid *pid, const struct cred *cred,
++ struct ucred *ucred)
++{
++ ucred->pid = pid_vnr(pid);
++ ucred->uid = ucred->gid = -1;
++ if (cred) {
++ struct user_namespace *current_ns = current_user_ns();
++
++ ucred->uid = user_ns_map_uid(current_ns, cred, cred->uid);
++ ucred->gid = user_ns_map_gid(current_ns, cred, cred->gid);
++ }
++}
++EXPORT_SYMBOL_GPL(cred_real_to_ucred);
++
+ int sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+ {
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 7747d26..4707b6c 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -163,7 +163,7 @@ const char *snd_hda_get_jack_type(u32 cfg)
+ "Line Out", "Speaker", "HP Out", "CD",
+ "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
+ "Line In", "Aux", "Mic", "Telephony",
+- "SPDIF In", "Digitial In", "Reserved", "Other"
++ "SPDIF In", "Digital In", "Reserved", "Other"
+ };
+
+ return jack_types[(cfg & AC_DEFCFG_DEVICE)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f3e0b24..1b43fde 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5595,7 +5595,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
+ const hda_nid_t *ssids;
+
+ if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 ||
+- codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670)
++ codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 ||
++ codec->vendor_id == 0x10ec0671)
+ ssids = alc663_ssids;
+ else
+ ssids = alc662_ssids;
+@@ -6045,6 +6046,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
+ { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
+ { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
++ { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
+ { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
+ { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
+ { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
+diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
+index 4ad8ebd..4352ffb 100644
+--- a/sound/soc/codecs/wm8903.c
++++ b/sound/soc/codecs/wm8903.c
+@@ -1101,6 +1101,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
+ { "ROP", NULL, "Right Speaker PGA" },
+ { "RON", NULL, "Right Speaker PGA" },
+
++ { "Charge Pump", NULL, "CLK_DSP" },
++
+ { "Left Headphone Output PGA", NULL, "Charge Pump" },
+ { "Right Headphone Output PGA", NULL, "Charge Pump" },
+ { "Left Line Output PGA", NULL, "Charge Pump" },
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 38a607a..fb95069 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -396,7 +396,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ else
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+- 0, cpu_to_le16(wIndex),
++ 0, wIndex,
+ &tmp, sizeof(tmp), 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
+
+@@ -427,7 +427,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ else
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+- cpu_to_le16(wValue), cpu_to_le16(wIndex),
++ wValue, wIndex,
+ NULL, 0, 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 1b275f0..dfbd65d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -482,7 +482,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
+ {
+ int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- cpu_to_le16(1), 0, NULL, 0, 1000);
++ 1, 0, NULL, 0, 1000);
+
+ if (ret < 0)
+ return ret;
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+index 3eed61e..79647cd 100644
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
+ u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
+ u64 redir_content;
+
+- ASSERT(redir_index < IOAPIC_NUM_PINS);
++ if (redir_index < IOAPIC_NUM_PINS)
++ redir_content =
++ ioapic->redirtbl[redir_index].bits;
++ else
++ redir_content = ~0ULL;
+
+- redir_content = ioapic->redirtbl[redir_index].bits;
+ result = (ioapic->ioregsel & 0x1) ?
+ (redir_content >> 32) & 0xffffffff :
+ redir_content & 0xffffffff;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index ec747dc..8bf05f0 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1401,21 +1401,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ }
+
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- gpa_t gpa)
++ gpa_t gpa, unsigned long len)
+ {
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int offset = offset_in_page(gpa);
+- gfn_t gfn = gpa >> PAGE_SHIFT;
++ gfn_t start_gfn = gpa >> PAGE_SHIFT;
++ gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
++ gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
++ gfn_t nr_pages_avail;
+
+ ghc->gpa = gpa;
+ ghc->generation = slots->generation;
+- ghc->memslot = __gfn_to_memslot(slots, gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+- if (!kvm_is_error_hva(ghc->hva))
++ ghc->len = len;
++ ghc->memslot = __gfn_to_memslot(slots, start_gfn);
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+ ghc->hva += offset;
+- else
+- return -EFAULT;
+-
++ } else {
++ /*
++ * If the requested region crosses two memslots, we still
++ * verify that the entire region is valid here.
++ */
++ while (start_gfn <= end_gfn) {
++ ghc->memslot = __gfn_to_memslot(slots, start_gfn);
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
++ &nr_pages_avail);
++ if (kvm_is_error_hva(ghc->hva))
++ return -EFAULT;
++ start_gfn += nr_pages_avail;
++ }
++ /* Use the slow path for cross page reads and writes. */
++ ghc->memslot = NULL;
++ }
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+@@ -1426,8 +1443,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
++ BUG_ON(len > ghc->len);
++
+ if (slots->generation != ghc->generation)
+- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++ if (unlikely(!ghc->memslot))
++ return kvm_write_guest(kvm, ghc->gpa, data, len);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+@@ -1447,8 +1469,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
++ BUG_ON(len > ghc->len);
++
+ if (slots->generation != ghc->generation)
+- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++ if (unlikely(!ghc->memslot))
++ return kvm_read_guest(kvm, ghc->gpa, data, len);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
diff --git a/3.2.54/1044_linux-3.2.45.patch b/3.2.54/1044_linux-3.2.45.patch
new file mode 100644
index 0000000..44e1767
--- /dev/null
+++ b/3.2.54/1044_linux-3.2.45.patch
@@ -0,0 +1,3809 @@
+diff --git a/Makefile b/Makefile
+index 566750c..9072fee 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
+index 035fdc9..a8b71f2 100644
+--- a/arch/arm/mach-u300/include/mach/u300-regs.h
++++ b/arch/arm/mach-u300/include/mach/u300-regs.h
+@@ -102,7 +102,7 @@
+
+ #ifdef CONFIG_MACH_U300_BS335
+ /* Fast UART1 on U335 only */
+-#define U300_UART1_BASE (U300_SLOW_PER_PHYS_BASE+0x7000)
++#define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000)
+ #endif
+
+ /*
+diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
+index 21ab376..1bd14d5 100644
+--- a/arch/ia64/include/asm/futex.h
++++ b/arch/ia64/include/asm/futex.h
+@@ -107,16 +107,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ {
+- register unsigned long r8 __asm ("r8");
++ register unsigned long r8 __asm ("r8") = 0;
+ unsigned long prev;
+ __asm__ __volatile__(
+ " mf;; \n"
+- " mov %0=r0 \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
+ "[2:]"
+- : "=r" (r8), "=r" (prev)
++ : "+r" (r8), "=&r" (prev)
+ : "r" (uaddr), "r" (newval),
+ "rO" ((long) (unsigned) oldval)
+ : "memory");
+diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
+index 43f96ab..8c70961 100644
+--- a/arch/ia64/include/asm/mca.h
++++ b/arch/ia64/include/asm/mca.h
+@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
+ extern int cpe_vector;
+ extern int ia64_cpe_irq;
+ extern void ia64_mca_init(void);
++extern void ia64_mca_irq_init(void);
+ extern void ia64_mca_cpu_init(void *);
+ extern void ia64_os_mca_dispatch(void);
+ extern void ia64_os_mca_dispatch_end(void);
+diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
+index ad69606..f2c41828 100644
+--- a/arch/ia64/kernel/irq.c
++++ b/arch/ia64/kernel/irq.c
+@@ -23,6 +23,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
+
++#include <asm/mca.h>
++
+ /*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
+
+ #endif /* CONFIG_SMP */
+
++int __init arch_early_irq_init(void)
++{
++ ia64_mca_irq_init();
++ return 0;
++}
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ unsigned int vectors_in_migration[NR_IRQS];
+
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index 84fb405..9b97303 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -2071,22 +2071,16 @@ ia64_mca_init(void)
+ printk(KERN_INFO "MCA related initialization done\n");
+ }
+
++
+ /*
+- * ia64_mca_late_init
+- *
+- * Opportunity to setup things that require initialization later
+- * than ia64_mca_init. Setup a timer to poll for CPEs if the
+- * platform doesn't support an interrupt driven mechanism.
+- *
+- * Inputs : None
+- * Outputs : Status
++ * These pieces cannot be done in ia64_mca_init() because it is called before
++ * early_irq_init() which would wipe out our percpu irq registrations. But we
++ * cannot leave them until ia64_mca_late_init() because by then all the other
++ * processors have been brought online and have set their own CMC vectors to
++ * point at a non-existant action. Called from arch_early_irq_init().
+ */
+-static int __init
+-ia64_mca_late_init(void)
++void __init ia64_mca_irq_init(void)
+ {
+- if (!mca_init)
+- return 0;
+-
+ /*
+ * Configure the CMCI/P vector and handler. Interrupts for CMC are
+ * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
+@@ -2105,6 +2099,23 @@ ia64_mca_late_init(void)
+ /* Setup the CPEI/P handler */
+ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ #endif
++}
++
++/*
++ * ia64_mca_late_init
++ *
++ * Opportunity to setup things that require initialization later
++ * than ia64_mca_init. Setup a timer to poll for CPEs if the
++ * platform doesn't support an interrupt driven mechanism.
++ *
++ * Inputs : None
++ * Outputs : Status
++ */
++static int __init
++ia64_mca_late_init(void)
++{
++ if (!mca_init)
++ return 0;
+
+ register_hotcpu_notifier(&mca_cpu_notifier);
+
+diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
+index 4332f7e..a7869f8 100644
+--- a/arch/ia64/kvm/vtlb.c
++++ b/arch/ia64/kvm/vtlb.c
+@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+ "srlz.d;;"
+ "ssm psr.i;;"
+ "srlz.d;;"
+- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
++ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
+
+ return ret;
+ }
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index cdf6b3f..2c49227 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -502,6 +502,7 @@ _GLOBAL(copy_and_flush)
+ sync
+ addi r5,r5,8
+ addi r6,r6,8
++ isync
+ blr
+
+ .align 8
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index b22a83a..24523dc 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -221,7 +221,7 @@ int __node_distance(int a, int b)
+ int distance = LOCAL_DISTANCE;
+
+ if (!form1_affinity)
+- return distance;
++ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
+diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
+index e481f6b..70ec4e9 100644
+--- a/arch/powerpc/platforms/cell/spufs/inode.c
++++ b/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -100,6 +100,7 @@ spufs_new_inode(struct super_block *sb, int mode)
+ if (!inode)
+ goto out;
+
++ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 4f289ff..5aaf0bf 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -67,6 +67,10 @@ static inline int is_zero_pfn(unsigned long pfn)
+
+ #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
++/* TODO: s390 cannot support io_remap_pfn_range... */
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
++ remap_pfn_range(vma, vaddr, pfn, size, prot)
++
+ #endif /* !__ASSEMBLY__ */
+
+ /*
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 38ebb2c..ddbbea3 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -781,6 +781,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+ }
+
++#include <asm/tlbflush.h>
+ #include <asm-generic/pgtable.h>
+
+ /* We provide our own get_unmapped_area to cope with VA holes and
+diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
+index 10bcabc..f856c7f 100644
+--- a/arch/sparc/include/asm/system_64.h
++++ b/arch/sparc/include/asm/system_64.h
+@@ -140,8 +140,7 @@ do { \
+ * and 2 stores in this critical code path. -DaveM
+ */
+ #define switch_to(prev, next, last) \
+-do { flush_tlb_pending(); \
+- save_and_clear_fpu(); \
++do { save_and_clear_fpu(); \
+ /* If you are tempted to conditionalize the following */ \
+ /* so that ASI is only written if it changes, think again. */ \
+ __asm__ __volatile__("wr %%g0, %0, %%asi" \
+diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
+index 2ef4634..f0d6a97 100644
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -11,24 +11,40 @@
+ struct tlb_batch {
+ struct mm_struct *mm;
+ unsigned long tlb_nr;
++ unsigned long active;
+ unsigned long vaddrs[TLB_BATCH_NR];
+ };
+
+ extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+ extern void flush_tsb_user(struct tlb_batch *tb);
++extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
+
+ /* TLB flush operations. */
+
+-extern void flush_tlb_pending(void);
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long vmaddr)
++{
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++}
++
++#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+-#define flush_tlb_range(vma,start,end) \
+- do { (void)(start); flush_tlb_pending(); } while (0)
+-#define flush_tlb_page(vma,addr) flush_tlb_pending()
+-#define flush_tlb_mm(mm) flush_tlb_pending()
++extern void flush_tlb_pending(void);
++extern void arch_enter_lazy_mmu_mode(void);
++extern void arch_leave_lazy_mmu_mode(void);
++#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+ /* Local cpu only. */
+ extern void __flush_tlb_all(void);
+-
++extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
+ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+ #ifndef CONFIG_SMP
+@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
+ __flush_tlb_kernel_range(start,end); \
+ } while (0)
+
++static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
++}
++
+ #else /* CONFIG_SMP */
+
+ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
++extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+
+ #define flush_tlb_kernel_range(start, end) \
+ do { flush_tsb_kernel_range(start,end); \
+ smp_flush_tlb_kernel_range(start, end); \
+ } while (0)
+
++#define global_flush_tlb_page(mm, vaddr) \
++ smp_flush_tlb_page(mm, vaddr)
++
+ #endif /* ! CONFIG_SMP */
+
+ #endif /* _SPARC64_TLBFLUSH_H */
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 7560772..e21d3c0d 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -856,7 +856,7 @@ void smp_tsb_sync(struct mm_struct *mm)
+ }
+
+ extern unsigned long xcall_flush_tlb_mm;
+-extern unsigned long xcall_flush_tlb_pending;
++extern unsigned long xcall_flush_tlb_page;
+ extern unsigned long xcall_flush_tlb_kernel_range;
+ extern unsigned long xcall_fetch_glob_regs;
+ extern unsigned long xcall_receive_signal;
+@@ -1070,23 +1070,56 @@ local_flush_and_out:
+ put_cpu();
+ }
+
++struct tlb_pending_info {
++ unsigned long ctx;
++ unsigned long nr;
++ unsigned long *vaddrs;
++};
++
++static void tlb_pending_func(void *info)
++{
++ struct tlb_pending_info *t = info;
++
++ __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
++}
++
+ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
++ struct tlb_pending_info info;
+ int cpu = get_cpu();
+
++ info.ctx = ctx;
++ info.nr = nr;
++ info.vaddrs = vaddrs;
++
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+ else
+- smp_cross_call_masked(&xcall_flush_tlb_pending,
+- ctx, nr, (unsigned long) vaddrs,
+- mm_cpumask(mm));
++ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
++ &info, 1);
+
+ __flush_tlb_pending(ctx, nr, vaddrs);
+
+ put_cpu();
+ }
+
++void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ unsigned long context = CTX_HWBITS(mm->context);
++ int cpu = get_cpu();
++
++ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
++ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
++ else
++ smp_cross_call_masked(&xcall_flush_tlb_page,
++ context, vaddr, 0,
++ mm_cpumask(mm));
++ __flush_tlb_page(context, vaddr);
++
++ put_cpu();
++}
++
+ void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ {
+ start &= PAGE_MASK;
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index b1f279c..afd021e 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
+ void flush_tlb_pending(void)
+ {
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
++ struct mm_struct *mm = tb->mm;
+
+- if (tb->tlb_nr) {
+- flush_tsb_user(tb);
++ if (!tb->tlb_nr)
++ goto out;
+
+- if (CTX_VALID(tb->mm->context)) {
++ flush_tsb_user(tb);
++
++ if (CTX_VALID(mm->context)) {
++ if (tb->tlb_nr == 1) {
++ global_flush_tlb_page(mm, tb->vaddrs[0]);
++ } else {
+ #ifdef CONFIG_SMP
+ smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
+ &tb->vaddrs[0]);
+@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
+ tb->tlb_nr, &tb->vaddrs[0]);
+ #endif
+ }
+- tb->tlb_nr = 0;
+ }
+
++ tb->tlb_nr = 0;
++
++out:
+ put_cpu_var(tlb_batch);
+ }
+
++void arch_enter_lazy_mmu_mode(void)
++{
++ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
++
++ tb->active = 1;
++}
++
++void arch_leave_lazy_mmu_mode(void)
++{
++ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
++
++ if (tb->tlb_nr)
++ flush_tlb_pending();
++ tb->active = 0;
++}
++
+ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ pte_t *ptep, pte_t orig, int fullmm)
+ {
+@@ -90,6 +114,12 @@ no_cache_flush:
+ nr = 0;
+ }
+
++ if (!tb->active) {
++ global_flush_tlb_page(mm, vaddr);
++ flush_tsb_user_page(mm, vaddr);
++ goto out;
++ }
++
+ if (nr == 0)
+ tb->mm = mm;
+
+@@ -98,5 +128,6 @@ no_cache_flush:
+ if (nr >= TLB_BATCH_NR)
+ flush_tlb_pending();
+
++out:
+ put_cpu_var(tlb_batch);
+ }
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 536412d..3ebcac7 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -8,11 +8,10 @@
+ #include <linux/slab.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+-#include <asm/tlbflush.h>
+-#include <asm/tlb.h>
+-#include <asm/mmu_context.h>
+ #include <asm/pgtable.h>
++#include <asm/mmu_context.h>
+ #include <asm/tsb.h>
++#include <asm/tlb.h>
+ #include <asm/oplib.h>
+
+ extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+@@ -47,23 +46,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
+ }
+ }
+
+-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+- unsigned long tsb, unsigned long nentries)
++static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
++ unsigned long hash_shift,
++ unsigned long nentries)
+ {
+- unsigned long i;
++ unsigned long tag, ent, hash;
+
+- for (i = 0; i < tb->tlb_nr; i++) {
+- unsigned long v = tb->vaddrs[i];
+- unsigned long tag, ent, hash;
++ v &= ~0x1UL;
++ hash = tsb_hash(v, hash_shift, nentries);
++ ent = tsb + (hash * sizeof(struct tsb));
++ tag = (v >> 22UL);
+
+- v &= ~0x1UL;
++ tsb_flush(ent, tag);
++}
+
+- hash = tsb_hash(v, hash_shift, nentries);
+- ent = tsb + (hash * sizeof(struct tsb));
+- tag = (v >> 22UL);
++static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
++ unsigned long tsb, unsigned long nentries)
++{
++ unsigned long i;
+
+- tsb_flush(ent, tag);
+- }
++ for (i = 0; i < tb->tlb_nr; i++)
++ __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
+ }
+
+ void flush_tsb_user(struct tlb_batch *tb)
+@@ -91,6 +94,30 @@ void flush_tsb_user(struct tlb_batch *tb)
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+ }
+
++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ unsigned long nentries, base, flags;
++
++ spin_lock_irqsave(&mm->context.lock, flags);
++
++ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
++ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++ base = __pa(base);
++ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
++
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
++ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
++ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
++ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++ base = __pa(base);
++ __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
++ }
++#endif
++ spin_unlock_irqrestore(&mm->context.lock, flags);
++}
++
+ #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+ #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
+ #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index 874162a..dd10caa 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
+ nop
+
+ .align 32
++ .globl __flush_tlb_page
++__flush_tlb_page: /* 22 insns */
++ /* %o0 = context, %o1 = vaddr */
++ rdpr %pstate, %g7
++ andn %g7, PSTATE_IE, %g2
++ wrpr %g2, %pstate
++ mov SECONDARY_CONTEXT, %o4
++ ldxa [%o4] ASI_DMMU, %g2
++ stxa %o0, [%o4] ASI_DMMU
++ andcc %o1, 1, %g0
++ andn %o1, 1, %o3
++ be,pn %icc, 1f
++ or %o3, 0x10, %o3
++ stxa %g0, [%o3] ASI_IMMU_DEMAP
++1: stxa %g0, [%o3] ASI_DMMU_DEMAP
++ membar #Sync
++ stxa %g2, [%o4] ASI_DMMU
++ sethi %hi(KERNBASE), %o4
++ flush %o4
++ retl
++ wrpr %g7, 0x0, %pstate
++ nop
++ nop
++ nop
++ nop
++
++ .align 32
+ .globl __flush_tlb_pending
+ __flush_tlb_pending: /* 26 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
+ retl
+ wrpr %g7, 0x0, %pstate
+
++__cheetah_flush_tlb_page: /* 22 insns */
++ /* %o0 = context, %o1 = vaddr */
++ rdpr %pstate, %g7
++ andn %g7, PSTATE_IE, %g2
++ wrpr %g2, 0x0, %pstate
++ wrpr %g0, 1, %tl
++ mov PRIMARY_CONTEXT, %o4
++ ldxa [%o4] ASI_DMMU, %g2
++ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
++ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
++ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
++ stxa %o0, [%o4] ASI_DMMU
++ andcc %o1, 1, %g0
++ be,pn %icc, 1f
++ andn %o1, 1, %o3
++ stxa %g0, [%o3] ASI_IMMU_DEMAP
++1: stxa %g0, [%o3] ASI_DMMU_DEMAP
++ membar #Sync
++ stxa %g2, [%o4] ASI_DMMU
++ sethi %hi(KERNBASE), %o4
++ flush %o4
++ wrpr %g0, 0, %tl
++ retl
++ wrpr %g7, 0x0, %pstate
++
+ __cheetah_flush_tlb_pending: /* 27 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ rdpr %pstate, %g7
+@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
+ retl
+ nop
+
++__hypervisor_flush_tlb_page: /* 11 insns */
++ /* %o0 = context, %o1 = vaddr */
++ mov %o0, %g2
++ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
++ mov %g2, %o1 /* ARG1: mmu context */
++ mov HV_MMU_ALL, %o2 /* ARG2: flags */
++ srlx %o0, PAGE_SHIFT, %o0
++ sllx %o0, PAGE_SHIFT, %o0
++ ta HV_MMU_UNMAP_ADDR_TRAP
++ brnz,pn %o0, __hypervisor_tlb_tl0_error
++ mov HV_MMU_UNMAP_ADDR_TRAP, %o1
++ retl
++ nop
++
+ __hypervisor_flush_tlb_pending: /* 16 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ sllx %o1, 3, %g1
+@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
+ call tlb_patch_one
+ mov 19, %o2
+
++ sethi %hi(__flush_tlb_page), %o0
++ or %o0, %lo(__flush_tlb_page), %o0
++ sethi %hi(__cheetah_flush_tlb_page), %o1
++ or %o1, %lo(__cheetah_flush_tlb_page), %o1
++ call tlb_patch_one
++ mov 22, %o2
++
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
+ nop
+ nop
+
+- .globl xcall_flush_tlb_pending
+-xcall_flush_tlb_pending: /* 21 insns */
+- /* %g5=context, %g1=nr, %g7=vaddrs[] */
+- sllx %g1, 3, %g1
++ .globl xcall_flush_tlb_page
++xcall_flush_tlb_page: /* 17 insns */
++ /* %g5=context, %g1=vaddr */
+ mov PRIMARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
+@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
+ or %g5, %g4, %g5
+ mov PRIMARY_CONTEXT, %g4
+ stxa %g5, [%g4] ASI_DMMU
+-1: sub %g1, (1 << 3), %g1
+- ldx [%g7 + %g1], %g5
+- andcc %g5, 0x1, %g0
++ andcc %g1, 0x1, %g0
+ be,pn %icc, 2f
+-
+- andn %g5, 0x1, %g5
++ andn %g1, 0x1, %g5
+ stxa %g0, [%g5] ASI_IMMU_DEMAP
+ 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
+ membar #Sync
+- brnz,pt %g1, 1b
+- nop
+ stxa %g2, [%g4] ASI_DMMU
+ retry
+ nop
++ nop
+
+ .globl xcall_flush_tlb_kernel_range
+ xcall_flush_tlb_kernel_range: /* 25 insns */
+@@ -596,15 +664,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+ membar #Sync
+ retry
+
+- .globl __hypervisor_xcall_flush_tlb_pending
+-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+- /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
+- sllx %g1, 3, %g1
++ .globl __hypervisor_xcall_flush_tlb_page
++__hypervisor_xcall_flush_tlb_page: /* 17 insns */
++ /* %g5=ctx, %g1=vaddr */
+ mov %o0, %g2
+ mov %o1, %g3
+ mov %o2, %g4
+-1: sub %g1, (1 << 3), %g1
+- ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
++ mov %g1, %o0 /* ARG0: virtual address */
+ mov %g5, %o1 /* ARG1: mmu context */
+ mov HV_MMU_ALL, %o2 /* ARG2: flags */
+ srlx %o0, PAGE_SHIFT, %o0
+@@ -613,8 +679,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+ mov HV_MMU_UNMAP_ADDR_TRAP, %g6
+ brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ mov %o0, %g5
+- brnz,pt %g1, 1b
+- nop
+ mov %g2, %o0
+ mov %g3, %o1
+ mov %g4, %o2
+@@ -697,6 +761,13 @@ hypervisor_patch_cachetlbops:
+ call tlb_patch_one
+ mov 10, %o2
+
++ sethi %hi(__flush_tlb_page), %o0
++ or %o0, %lo(__flush_tlb_page), %o0
++ sethi %hi(__hypervisor_flush_tlb_page), %o1
++ or %o1, %lo(__hypervisor_flush_tlb_page), %o1
++ call tlb_patch_one
++ mov 11, %o2
++
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__hypervisor_flush_tlb_pending), %o1
+@@ -728,12 +799,12 @@ hypervisor_patch_cachetlbops:
+ call tlb_patch_one
+ mov 21, %o2
+
+- sethi %hi(xcall_flush_tlb_pending), %o0
+- or %o0, %lo(xcall_flush_tlb_pending), %o0
+- sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
+- or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
++ sethi %hi(xcall_flush_tlb_page), %o0
++ or %o0, %lo(xcall_flush_tlb_page), %o0
++ sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
++ or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
+ call tlb_patch_one
+- mov 21, %o2
++ mov 17, %o2
+
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 957c216..4bb12f7 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -130,8 +130,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
+ };
+
+ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
+- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
++ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
++ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
++ EVENT_EXTRA_END
++};
++
++static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
++ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
++ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+ EVENT_EXTRA_END
+ };
+
+@@ -1711,7 +1717,10 @@ __init int intel_pmu_init(void)
+
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+- x86_pmu.extra_regs = intel_snb_extra_regs;
++ if (boot_cpu_data.x86_model == 45)
++ x86_pmu.extra_regs = intel_snbep_extra_regs;
++ else
++ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 34a7f40..a4cca06 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -44,11 +44,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ int i;
+ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+ unsigned long start = 0, good_end;
++ unsigned long pgd_extra = 0;
+ phys_addr_t base;
+
+ for (i = 0; i < nr_range; i++) {
+ unsigned long range, extra;
+
++ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
++ pgd_extra++;
++
+ range = mr[i].end - mr[i].start;
+ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+
+@@ -73,6 +77,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
++ tables += (pgd_extra * PAGE_SIZE);
+
+ #ifdef CONFIG_X86_32
+ /* for fixmap */
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 69b9ef6..044f5d9 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1391,8 +1391,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ xen_vcpu_setup(cpu);
+- if (xen_have_vector_callback)
++ if (xen_have_vector_callback) {
+ xen_init_lock_cpu(cpu);
++ if (xen_feature(XENFEAT_hvm_safe_pvclock))
++ xen_setup_timer(cpu);
++ }
+ break;
+ default:
+ break;
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 9a23fff..6e4d5dc 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -563,6 +563,8 @@ static void xen_hvm_cpu_die(unsigned int cpu)
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
++ xen_uninit_lock_cpu(cpu);
++ xen_teardown_timer(cpu);
+ native_cpu_die(cpu);
+ }
+
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 0296a95..054cc01 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -497,7 +497,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
+ {
+ int cpu = smp_processor_id();
+ xen_setup_runstate_info(cpu);
+- xen_setup_timer(cpu);
++ /*
++ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
++ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
++ * early bootup and also during CPU hotplug events).
++ */
+ xen_setup_cpu_clockevents();
+ }
+
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index ef5356c..0262210 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
++ msg->msg_namelen = 0;
++
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 6a6dfc0..a1c4f0a 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ long copied = 0;
+
+ lock_sock(sk);
++ msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index 7aff631..5b0f075 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -247,8 +247,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+ *control &= OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ } else {
+- /* Run _OSC query for all possible controls. */
+- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
++ /* Run _OSC query only with existing controls. */
++ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+ }
+
+ status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 0833896..14d49e4 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -374,26 +374,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+ struct hpet_dev *devp;
+ unsigned long addr;
+
+- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
+- return -EINVAL;
+-
+ devp = file->private_data;
+ addr = devp->hd_hpets->hp_hpet_phys;
+
+ if (addr & (PAGE_SIZE - 1))
+ return -ENOSYS;
+
+- vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+- PAGE_SIZE, vma->vm_page_prot)) {
+- printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
+- __func__);
+- return -EAGAIN;
+- }
+-
+- return 0;
++ return vm_iomap_memory(vma, addr, PAGE_SIZE);
+ #else
+ return -ENOSYS;
+ #endif
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index ca67338..c77fc67 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1007,56 +1007,50 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-#define PTE_ADDRESS_MASK 0xfffff000
+-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+-#define PTE_MAPPING_TYPE_MASK (3 << 1)
+-#define PTE_VALID (1 << 0)
+-
+-/**
+- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+- * a physical one
+- * @dev: drm device
+- * @offset: address to translate
+- *
+- * Some chip functions require allocations from stolen space and need the
+- * physical address of the memory in question.
+- */
+-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
++static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+-#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+- * is unreliable, so compute the base by subtracting the stolen memory
+- * from the Top of Low Usable DRAM which is where the BIOS places
+- * the graphics stolen memory.
++ * is unreliable, so on those compute the base by subtracting the
++ * stolen memory from the Top of Low Usable DRAM which is where the
++ * BIOS places the graphics stolen memory.
++ *
++ * On gen2, the layout is slightly different with the Graphics Segment
++ * immediately following Top of Memory (or Top of Usable DRAM). Note
++ * it appears that TOUD is only reported by 865g, so we just use the
++ * top of memory as determined by the e820 probe.
++ *
++ * XXX gen2 requires an unavailable symbol and 945gm fails with
++ * its value of TOLUD.
+ */
+- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- /* top 32bits are reserved = 0 */
++ base = 0;
++ if (INTEL_INFO(dev)->gen >= 6) {
++ /* Read Base Data of Stolen Memory Register (BDSM) directly.
++ * Note that there is also a MCHBAR miror at 0x1080c0 or
++ * we could use device 2:0x5c instead.
++ */
++ pci_read_config_dword(pdev, 0xB0, &base);
++ base &= ~4095; /* lower bits used for locking register */
++ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ /* Read Graphics Base of Stolen Memory directly */
+ pci_read_config_dword(pdev, 0xA4, &base);
+- } else {
+- /* XXX presume 8xx is the same as i915 */
+- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+- }
+-#else
+- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- u16 val;
+- pci_read_config_word(pdev, 0xb0, &val);
+- base = val >> 4 << 20;
+- } else {
++#if 0
++ } else if (IS_GEN3(dev)) {
+ u8 val;
++ /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+- }
+- base -= dev_priv->mm.gtt->stolen_size;
++ base -= dev_priv->mm.gtt->stolen_size;
++ } else {
++ /* Stolen is immediately above Top of Memory */
++ base = max_low_pfn_mapped << PAGE_SHIFT;
+ #endif
++ }
+
+- return base + offset;
++ return base;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+@@ -1081,7 +1075,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_fb)
+ goto err;
+
+- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
++ cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
+ if (!cfb_base)
+ goto err_fb;
+
+@@ -1094,7 +1088,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_llb)
+ goto err_fb;
+
+- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
++ ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
+ if (!ll_base)
+ goto err_llb;
+ }
+@@ -1113,7 +1107,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+- cfb_base, ll_base, size >> 20);
++ (long)cfb_base, (long)ll_base, size >> 20);
+ return;
+
+ err_llb:
+@@ -1187,6 +1181,13 @@ static int i915_load_gem_init(struct drm_device *dev)
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
++ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
++ if (dev_priv->mm.stolen_base == 0)
++ return 0;
++
++ DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
++ dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
++
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 144d37c..20cd295 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,6 +581,7 @@ typedef struct drm_i915_private {
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
++ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index b0186b8..2865b44 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2520,6 +2520,11 @@ i915_find_fence_reg(struct drm_device *dev,
+ return avail;
+ }
+
++static void i915_gem_write_fence__ipi(void *data)
++{
++ wbinvd();
++}
++
+ /**
+ * i915_gem_object_get_fence - set up a fence reg for an object
+ * @obj: object to map through a fence reg
+@@ -2640,6 +2645,17 @@ update:
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
++ /* In order to fully serialize access to the fenced region and
++ * the update to the fence register we need to take extreme
++ * measures on SNB+. In theory, the write to the fence register
++ * flushes all memory transactions before, and coupled with the
++ * mb() placed around the register write we serialise all memory
++ * operations with respect to the changes in the tiler. Yet, on
++ * SNB+ we need to take a step further and emit an explicit wbinvd()
++ * on each processor in order to manually flush all memory
++ * transactions before updating the fence register.
++ */
++ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
+ break;
+ case 5:
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 897ca06..cfbb893 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9093,6 +9093,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ del_timer_sync(&dev_priv->idle_timer);
+ cancel_work_sync(&dev_priv->idle_work);
+
++ /* destroy backlight, if any, before the connectors */
++ intel_panel_destroy_backlight(dev);
++
+ drm_mode_config_cleanup(dev);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index c8ecaab..a07ccab 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2274,11 +2274,6 @@ done:
+ static void
+ intel_dp_destroy(struct drm_connector *connector)
+ {
+- struct drm_device *dev = connector->dev;
+-
+- if (intel_dpd_is_edp(dev))
+- intel_panel_destroy_backlight(dev);
+-
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 6eda1b5..8ac91b8 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -371,6 +371,7 @@ void intel_dvo_init(struct drm_device *dev)
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
+ int gpio;
++ bool dvoinit;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+@@ -390,7 +391,17 @@ void intel_dvo_init(struct drm_device *dev)
+ i2c = &dev_priv->gmbus[gpio].adapter;
+
+ intel_dvo->dev = *dvo;
+- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
++
++ /* GMBUS NAK handling seems to be unstable, hence let the
++ * transmitter detection run in bit banging mode for now.
++ */
++ intel_gmbus_force_bit(i2c, true);
++
++ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
++
++ intel_gmbus_force_bit(i2c, false);
++
++ if (!dvoinit)
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 6601d21..876bac0 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -553,8 +553,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+- intel_panel_destroy_backlight(dev);
+-
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
+ drm_sysfs_connector_remove(connector);
+@@ -788,6 +786,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Fujitsu Esprimo Q900",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 72b8949..04cb34a 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -361,6 +361,9 @@ int intel_panel_setup_backlight(struct drm_device *dev)
+
+ intel_panel_init_backlight(dev);
+
++ if (WARN_ON(dev_priv->backlight))
++ return -ENODEV;
++
+ if (dev_priv->int_lvds_connector)
+ connector = dev_priv->int_lvds_connector;
+ else if (dev_priv->int_edp_connector)
+@@ -388,8 +391,10 @@ int intel_panel_setup_backlight(struct drm_device *dev)
+ void intel_panel_destroy_backlight(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- if (dev_priv->backlight)
++ if (dev_priv->backlight) {
+ backlight_device_unregister(dev_priv->backlight);
++ dev_priv->backlight = NULL;
++ }
+ }
+ #else
+ int intel_panel_setup_backlight(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index 3a05cdb..d969f3c 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -1387,10 +1387,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
++ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
++ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
++ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
+ ctx->scratch_size_bytes = 0;
+ if (usage_bytes == 0)
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index a25d08a..038570a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -544,6 +544,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ /* use frac fb div on APUs */
+ if (ASIC_IS_DCE41(rdev))
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
++ /* use frac fb div on RS780/RS880 */
++ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ } else {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 60d13fe..0495a50 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -412,6 +412,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
++ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
++ /* don't try to enable hpd on eDP or LVDS avoid breaking the
++ * aux dp channel on imac and help (but not completely fix)
++ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
++ * also avoid interrupt storms during dpms.
++ */
++ continue;
++ }
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index c45d921..57a825d 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -506,7 +506,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
+ offset = radeon_encoder->hdmi_offset;
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++ } else if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+@@ -572,7 +572,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
+
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++ } else if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 38585c5..383b38e 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1989,6 +1989,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
++ if (num_modes == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+@@ -2361,6 +2363,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
++ if (power_info->pplib.ucNumStates == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ power_info->pplib.ucNumStates, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+@@ -2443,6 +2447,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
++ u8 *power_state_offset;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+@@ -2459,15 +2464,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ non_clock_info_array = (struct NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
++ if (state_array->ucNumEntries == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
++ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+- power_state = (union pplib_power_state *)&state_array->states[i];
+- /* XXX this might be an inagua bug... */
+- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
++ power_state = (union pplib_power_state *)power_state_offset;
++ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+@@ -2479,9 +2486,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ if (power_state->v2.ucNumDPMLevels) {
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+- /* XXX this might be an inagua bug... */
+- if (clock_array_index >= clock_info_array->ucNumEntries)
+- continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+@@ -2503,6 +2507,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ non_clock_info);
+ state_index++;
+ }
++ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+@@ -2549,7 +2554,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+ default:
+ break;
+ }
+- } else {
++ }
++
++ if (state_index == 0) {
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+ if (rdev->pm.power_state) {
+ rdev->pm.power_state[0].clock_info =
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index be2c122..4bb9e27 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -39,8 +39,12 @@ int radeon_driver_unload_kms(struct drm_device *dev)
+
+ if (rdev == NULL)
+ return 0;
++ if (rdev->rmmio == NULL)
++ goto done_free;
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
++
++done_free:
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index ebd6c51..d58eccb 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -863,7 +863,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+ struct radeon_device *rdev = dev->dev_private;
+
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
++ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
++ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
++ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
++ else
++ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+ if (rdev->asic->get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 4bb68f3..64e7065 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -311,10 +311,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
+ /* last message in transfer -> STOP */
+ data |= XIIC_TX_DYN_STOP_MASK;
+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
+-
+- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+- } else
+- xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ }
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 1702133..2d0544c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1588,8 +1588,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ sector, count, 1) == 0)
+ return -EINVAL;
+ }
+- } else if (sb->bblog_offset == 0)
+- rdev->badblocks.shift = -1;
++ } else if (sb->bblog_offset != 0)
++ rdev->badblocks.shift = 0;
+
+ if (!refdev) {
+ ret = 1;
+@@ -3063,7 +3063,7 @@ int md_rdev_init(struct md_rdev *rdev)
+ * be used - I wonder if that matters
+ */
+ rdev->badblocks.count = 0;
+- rdev->badblocks.shift = 0;
++ rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
+ rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ seqlock_init(&rdev->badblocks.lock);
+ if (rdev->badblocks.page == NULL)
+@@ -3135,9 +3135,6 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
+ goto abort_free;
+ }
+ }
+- if (super_format == -1)
+- /* hot-add for 0.90, or non-persistent: so no badblocks */
+- rdev->badblocks.shift = -1;
+
+ return rdev;
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index fc07f90..b436b84 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1866,6 +1866,7 @@ err_detach:
+ write_unlock_bh(&bond->lock);
+
+ err_close:
++ slave_dev->priv_flags &= ~IFF_BONDING;
+ dev_close(slave_dev);
+
+ err_unset_master:
+@@ -4853,9 +4854,18 @@ static int __net_init bond_net_init(struct net *net)
+ static void __net_exit bond_net_exit(struct net *net)
+ {
+ struct bond_net *bn = net_generic(net, bond_net_id);
++ struct bonding *bond, *tmp_bond;
++ LIST_HEAD(list);
+
+ bond_destroy_sysfs(bn);
+ bond_destroy_proc_dir(bn);
++
++ /* Kill off any bonds created after unregistering bond rtnl ops */
++ rtnl_lock();
++ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
++ unregister_netdevice_queue(bond->dev, &list);
++ unregister_netdevice_many(&list);
++ rtnl_unlock();
+ }
+
+ static struct pernet_operations bond_net_ops = {
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+index edfdf6b..b5fd934 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
+ /* how about 0x2000 */
+ #define MAX_TX_BUF_LEN 0x2000
+ #define MAX_TX_BUF_SHIFT 13
+-/*#define MAX_TX_BUF_LEN 0x3000 */
++#define MAX_TSO_SEG_SIZE 0x3c00
+
+ /* rrs word 1 bit 0:31 */
+ #define RRS_RX_CSUM_MASK 0xFFFF
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index c69dc29..dd893b3 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -2352,6 +2352,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
+
+ INIT_WORK(&adapter->reset_task, atl1e_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
++ netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "register netdevice failed\n");
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index f67b8ae..69c3adf 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -127,7 +127,6 @@ struct gianfar_ptp_registers {
+
+ #define DRIVER "gianfar_ptp"
+ #define DEFAULT_CKSEL 1
+-#define N_ALARM 1 /* first alarm is used internally to reset fipers */
+ #define N_EXT_TS 2
+ #define REG_SIZE sizeof(struct gianfar_ptp_registers)
+
+@@ -410,7 +409,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
+ .owner = THIS_MODULE,
+ .name = "gianfar clock",
+ .max_adj = 512000,
+- .n_alarm = N_ALARM,
++ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .pps = 1,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 41396fa..d93eee1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1937,6 +1937,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
+ * with the write to EICR.
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
++
++ /* The lower 16bits of the EICR register are for the queue interrupts
++ * which should be masked here in order to not accidently clear them if
++ * the bits are high when ixgbe_msix_other is called. There is a race
++ * condition otherwise which results in possible performance loss
++ * especially if the ixgbe_msix_other interrupt is triggering
++ * consistently (as it would when PPS is turned on for the X540 device)
++ */
++ eicr &= 0xFFFF0000;
++
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
+
+ if (eicr & IXGBE_EICR_LSC)
+@@ -5408,7 +5418,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
+
+ pci_wake_from_d3(pdev, false);
+
++ rtnl_lock();
+ err = ixgbe_init_interrupt_scheme(adapter);
++ rtnl_unlock();
+ if (err) {
+ e_dev_err("Cannot initialize interrupts for device\n");
+ return err;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index d812790..f698183 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -1629,8 +1629,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
+
+ if (opts2 & RxVlanTag)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+-
+- desc->opts2 = 0;
+ }
+
+ static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+@@ -5566,6 +5564,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ goto err_stop_0;
+ }
+
++ /* 8168evl does not automatically pad to minimum length. */
++ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
++ skb->len < ETH_ZLEN)) {
++ if (skb_padto(skb, ETH_ZLEN))
++ goto err_update_stats;
++ skb_put(skb, ETH_ZLEN - skb->len);
++ }
++
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+@@ -5633,6 +5639,7 @@ err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ err_dma_0:
+ dev_kfree_skb(skb);
++err_update_stats:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+
+@@ -5814,7 +5821,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ dev->stats.rx_fifo_errors++;
+ }
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ } else {
+ struct sk_buff *skb;
+ dma_addr_t addr = le64_to_cpu(desc->addr);
+@@ -5828,16 +5834,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+- continue;
++ goto release_descriptor;
+ }
+
+ skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+ tp, pkt_size, addr);
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+- continue;
++ goto release_descriptor;
+ }
+
+ rtl8169_rx_csum(skb, status);
+@@ -5851,6 +5855,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
++release_descriptor:
++ desc->opts2 = 0;
++ wmb();
++ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ }
+
+ count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index ccf1524..3935994 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -563,6 +563,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ {
+ struct iwl_addsta_cmd sta_cmd;
++ static const struct iwl_link_quality_cmd zero_lq = {};
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+@@ -602,7 +603,9 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ else
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+- send_lq = true;
++
++ if (!memcmp(&lq, &zero_lq, sizeof(lq)))
++ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index 3cf4ecc..621b84f 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -1821,9 +1821,9 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+ if (pdev) {
+ pci_iounmap(pdev, card->pci_mmap);
+ pci_iounmap(pdev, card->pci_mmap1);
+-
+- pci_release_regions(pdev);
+ pci_disable_device(pdev);
++ pci_release_region(pdev, 2);
++ pci_release_region(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
+ }
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 6d4a531..363a5c6 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -664,15 +664,11 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+ error = platform_pci_set_power_state(dev, state);
+ if (!error)
+ pci_update_current_state(dev, state);
+- /* Fall back to PCI_D0 if native PM is not supported */
+- if (!dev->pm_cap)
+- dev->current_state = PCI_D0;
+- } else {
++ } else
+ error = -ENODEV;
+- /* Fall back to PCI_D0 if native PM is not supported */
+- if (!dev->pm_cap)
+- dev->current_state = PCI_D0;
+- }
++
++ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
++ dev->current_state = PCI_D0;
+
+ return error;
+ }
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 05beb6c..e3eed18 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -805,9 +805,8 @@ static int cmos_suspend(struct device *dev)
+ mask = RTC_IRQMASK;
+ tmp &= ~mask;
+ CMOS_WRITE(tmp, RTC_CONTROL);
++ hpet_mask_rtc_irq_bit(mask);
+
+- /* shut down hpet emulation - we don't need it for alarm */
+- hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
+ cmos_checkintr(cmos, tmp);
+ }
+ spin_unlock_irq(&rtc_lock);
+@@ -872,6 +871,7 @@ static int cmos_resume(struct device *dev)
+ rtc_update_irq(cmos->rtc, 1, mask);
+ tmp &= ~RTC_AIE;
+ hpet_mask_rtc_irq_bit(RTC_AIE);
++ hpet_rtc_timer_init();
+ } while (mask & RTC_AIE);
+ spin_unlock_irq(&rtc_lock);
+ }
+diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
+index 0b54a91..a56a15e 100644
+--- a/drivers/s390/char/sclp_cmd.c
++++ b/drivers/s390/char/sclp_cmd.c
+@@ -509,6 +509,8 @@ static void __init sclp_add_standby_memory(void)
+ add_memory_merged(0);
+ }
+
++#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
++
+ static void __init insert_increment(u16 rn, int standby, int assigned)
+ {
+ struct memory_increment *incr, *new_incr;
+@@ -521,7 +523,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
+ new_incr->rn = rn;
+ new_incr->standby = standby;
+ if (!standby)
+- new_incr->usecount = 1;
++ new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
+ last_rn = 0;
+ prev = &sclp_mem_list;
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index d19b879..4735928 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -669,6 +669,9 @@ static int ptmx_open(struct inode *inode, struct file *filp)
+
+ nonseekable_open(inode, filp);
+
++ /* We refuse fsnotify events on ptmx, since it's a shared resource */
++ filp->f_mode |= FMODE_NONOTIFY;
++
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return retval;
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index af5ffb9..488214a 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1901,6 +1901,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
++ put_device(tty_dev);
++
+ if (console_suspend_enabled || !uart_console(uport))
+ uport->suspended = 1;
+
+@@ -1966,9 +1968,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+ disable_irq_wake(uport->irq);
+ uport->irq_wake = 0;
+ }
++ put_device(tty_dev);
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
++ put_device(tty_dev);
+ uport->suspended = 0;
+
+ /*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 05085be..3f35e42 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -940,6 +940,14 @@ void start_tty(struct tty_struct *tty)
+
+ EXPORT_SYMBOL(start_tty);
+
++/* We limit tty time update visibility to every 8 seconds or so. */
++static void tty_update_time(struct timespec *time)
++{
++ unsigned long sec = get_seconds() & ~7;
++ if ((long)(sec - time->tv_sec) > 0)
++ time->tv_sec = sec;
++}
++
+ /**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+@@ -976,8 +984,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ else
+ i = -EIO;
+ tty_ldisc_deref(ld);
++
+ if (i > 0)
+- inode->i_atime = current_fs_time(inode->i_sb);
++ tty_update_time(&inode->i_atime);
++
+ return i;
+ }
+
+@@ -1079,8 +1089,8 @@ static inline ssize_t do_tty_write(
+ cond_resched();
+ }
+ if (written) {
+- struct inode *inode = file->f_path.dentry->d_inode;
+- inode->i_mtime = current_fs_time(inode->i_sb);
++ struct inode *inode = file->f_path.dentry->d_inode;
++ tty_update_time(&inode->i_mtime);
+ ret = written;
+ }
+ out:
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index a9df218..22f770a 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -643,6 +643,8 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+ index &= 0xff;
+ switch (requesttype & USB_RECIP_MASK) {
+ case USB_RECIP_ENDPOINT:
++ if ((index & ~USB_DIR_IN) == 0)
++ return 0;
+ ret = findintfep(ps->dev, index);
+ if (ret >= 0)
+ ret = checkintf(ps, ret);
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index ac0d75a..9f7003e 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = {
+ { APPLEDISPLAY_DEVICE(0x9219) },
+ { APPLEDISPLAY_DEVICE(0x921c) },
+ { APPLEDISPLAY_DEVICE(0x921d) },
++ { APPLEDISPLAY_DEVICE(0x9236) },
+
+ /* Terminating entry */
+ { }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 06394e5a..51d1712 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -195,6 +195,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+@@ -876,7 +877,9 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+- { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
++ { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
+ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+@@ -1816,8 +1819,11 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ }
+
+ /*
+- * First and second port on STMCLiteadaptors is reserved for JTAG interface
+- * and the forth port for pio
++ * First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's
++ * ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and
++ * can be accessed from userspace.
++ * The next two ports are enabled as UARTs by default, where port 2 is
++ * a conventional RS-232 UART.
+ */
+ static int ftdi_stmclite_probe(struct usb_serial *serial)
+ {
+@@ -1826,12 +1832,13 @@ static int ftdi_stmclite_probe(struct usb_serial *serial)
+
+ dbg("%s", __func__);
+
+- if (interface == udev->actconfig->interface[2])
+- return 0;
+-
+- dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
++ if (interface == udev->actconfig->interface[0] ||
++ interface == udev->actconfig->interface[1]) {
++ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
++ return -ENODEV;
++ }
+
+- return -ENODEV;
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 809c03a..2f86008 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -74,6 +74,7 @@
+ #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
+ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+ #define FTDI_OPENDCC_GBM_PID 0xBFDC
++#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD
+
+ /* NZR SEM 16+ USB (http://www.nzr.de) */
+ #define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
+@@ -1150,7 +1151,8 @@
+ * STMicroelectonics
+ */
+ #define ST_VID 0x0483
+-#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
++#define ST_STMCLT_2232_PID 0x3746
++#define ST_STMCLT_4232_PID 0x3747
+
+ /*
+ * Papouch products (http://www.papouch.com/)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4418538..8513f51 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -347,6 +347,7 @@ static void option_instat_callback(struct urb *urb);
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
++#define OLIVETTI_PRODUCT_OLICARD145 0xc003
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+@@ -1273,6 +1274,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+@@ -1350,6 +1352,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
+index c844718..7341ce2 100644
+--- a/drivers/usb/storage/cypress_atacb.c
++++ b/drivers/usb/storage/cypress_atacb.c
+@@ -248,14 +248,26 @@ static int cypress_probe(struct usb_interface *intf,
+ {
+ struct us_data *us;
+ int result;
++ struct usb_device *device;
+
+ result = usb_stor_probe1(&us, intf, id,
+ (id - cypress_usb_ids) + cypress_unusual_dev_list);
+ if (result)
+ return result;
+
+- us->protocol_name = "Transparent SCSI with Cypress ATACB";
+- us->proto_handler = cypress_atacb_passthrough;
++ /* Among CY7C68300 chips, the A revision does not support Cypress ATACB
++ * Filter out this revision from EEPROM default descriptor values
++ */
++ device = interface_to_usbdev(intf);
++ if (device->descriptor.iManufacturer != 0x38 ||
++ device->descriptor.iProduct != 0x4e ||
++ device->descriptor.iSerialNumber != 0x64) {
++ us->protocol_name = "Transparent SCSI with Cypress ATACB";
++ us->proto_handler = cypress_atacb_passthrough;
++ } else {
++ us->protocol_name = "Transparent SCSI";
++ us->proto_handler = usb_stor_transparent_scsi_command;
++ }
+
+ result = usb_stor_probe2(us);
+ return result;
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 7a36dff..6b4fb5c 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -1229,6 +1229,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ finished:
+
+ fbcon_free_font(p, free_font);
++ if (free_font)
++ vc->vc_font.data = NULL;
+
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index babbb07..0a22808 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1350,15 +1350,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+ {
+ struct fb_info *info = file_fb_info(file);
+ struct fb_ops *fb;
+- unsigned long off;
++ unsigned long mmio_pgoff;
+ unsigned long start;
+ u32 len;
+
+ if (!info)
+ return -ENODEV;
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+- return -EINVAL;
+- off = vma->vm_pgoff << PAGE_SHIFT;
+ fb = info->fbops;
+ if (!fb)
+ return -ENODEV;
+@@ -1370,33 +1367,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+ return res;
+ }
+
+- /* frame buffer memory */
++ /*
++ * Ugh. This can be either the frame buffer mapping, or
++ * if pgoff points past it, the mmio mapping.
++ */
+ start = info->fix.smem_start;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
+- if (off >= len) {
+- /* memory mapped io */
+- off -= len;
+- if (info->var.accel_flags) {
+- mutex_unlock(&info->mm_lock);
+- return -EINVAL;
+- }
++ len = info->fix.smem_len;
++ mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
++ if (vma->vm_pgoff >= mmio_pgoff) {
++ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
++ len = info->fix.mmio_len;
+ }
+ mutex_unlock(&info->mm_lock);
+- start &= PAGE_MASK;
+- if ((vma->vm_end - vma->vm_start + off) > len)
+- return -EINVAL;
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+- /* This is an IO map - tell maydump to skip this VMA */
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+- fb_pgprotect(file, vma, off);
+- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+- return -EAGAIN;
+- return 0;
++ fb_pgprotect(file, vma, start);
++
++ return vm_iomap_memory(vma, start, len);
+ }
+
+ static int
+diff --git a/fs/aio.c b/fs/aio.c
+index 3b65ee7..8cdd8ea 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1112,9 +1112,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
+ spin_unlock(&info->ring_lock);
+
+ out:
+- kunmap_atomic(ring, KM_USER0);
+ dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
+ (unsigned long)ring->head, (unsigned long)ring->tail);
++ kunmap_atomic(ring, KM_USER0);
+ return ret;
+ }
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index e923bf4..d322929 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1176,8 +1176,10 @@ void shrink_dcache_parent(struct dentry * parent)
+ LIST_HEAD(dispose);
+ int found;
+
+- while ((found = select_parent(parent, &dispose)) != 0)
++ while ((found = select_parent(parent, &dispose)) != 0) {
+ shrink_dentry_list(&dispose);
++ cond_resched();
++ }
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+
+diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
+index 9ed1bb1..5459168 100644
+--- a/fs/ext4/Kconfig
++++ b/fs/ext4/Kconfig
+@@ -82,4 +82,5 @@ config EXT4_DEBUG
+ Enables run-time debugging support for the ext4 filesystem.
+
+ If you select Y here, then you will be able to turn on debugging
+- with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug"
++ with a command such as:
++ echo 1 > /sys/module/ext4/parameters/mballoc_debug
+diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
+index bb6c7d8..a8d03a4 100644
+--- a/fs/ext4/fsync.c
++++ b/fs/ext4/fsync.c
+@@ -260,8 +260,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ if (journal->j_flags & JBD2_BARRIER &&
+ !jbd2_trans_will_send_data_barrier(journal, commit_tid))
+ needs_barrier = true;
+- jbd2_log_start_commit(journal, commit_tid);
+- ret = jbd2_log_wait_commit(journal, commit_tid);
++ ret = jbd2_complete_transaction(journal, commit_tid);
+ if (needs_barrier)
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ out:
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3270ffd..025b4b6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -147,8 +147,7 @@ void ext4_evict_inode(struct inode *inode)
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+- jbd2_log_start_commit(journal, commit_tid);
+- jbd2_log_wait_commit(journal, commit_tid);
++ jbd2_complete_transaction(journal, commit_tid);
+ filemap_write_and_wait(&inode->i_data);
+ }
+ truncate_inode_pages(&inode->i_data, 0);
+diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
+index 4765190..73c0bd7 100644
+--- a/fs/fscache/stats.c
++++ b/fs/fscache/stats.c
+@@ -276,5 +276,5 @@ const struct file_operations fscache_stats_fops = {
+ .open = fscache_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = single_release,
+ };
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index d751f04..ab9463a 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -326,7 +326,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ int space_left = 0;
+ int first_tag = 0;
+ int tag_flag;
+- int i, to_free = 0;
++ int i;
+ int tag_bytes = journal_tag_bytes(journal);
+ struct buffer_head *cbh = NULL; /* For transactional checksums */
+ __u32 crc32_sum = ~0;
+@@ -996,7 +996,7 @@ restart_loop:
+ journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
+ spin_unlock(&journal->j_history_lock);
+
+- commit_transaction->t_state = T_FINISHED;
++ commit_transaction->t_state = T_COMMIT_CALLBACK;
+ J_ASSERT(commit_transaction == journal->j_committing_transaction);
+ journal->j_commit_sequence = commit_transaction->t_tid;
+ journal->j_committing_transaction = NULL;
+@@ -1011,38 +1011,44 @@ restart_loop:
+ journal->j_average_commit_time*3) / 4;
+ else
+ journal->j_average_commit_time = commit_time;
++
+ write_unlock(&journal->j_state_lock);
+
+- if (commit_transaction->t_checkpoint_list == NULL &&
+- commit_transaction->t_checkpoint_io_list == NULL) {
+- __jbd2_journal_drop_transaction(journal, commit_transaction);
+- to_free = 1;
++ if (journal->j_checkpoint_transactions == NULL) {
++ journal->j_checkpoint_transactions = commit_transaction;
++ commit_transaction->t_cpnext = commit_transaction;
++ commit_transaction->t_cpprev = commit_transaction;
+ } else {
+- if (journal->j_checkpoint_transactions == NULL) {
+- journal->j_checkpoint_transactions = commit_transaction;
+- commit_transaction->t_cpnext = commit_transaction;
+- commit_transaction->t_cpprev = commit_transaction;
+- } else {
+- commit_transaction->t_cpnext =
+- journal->j_checkpoint_transactions;
+- commit_transaction->t_cpprev =
+- commit_transaction->t_cpnext->t_cpprev;
+- commit_transaction->t_cpnext->t_cpprev =
+- commit_transaction;
+- commit_transaction->t_cpprev->t_cpnext =
++ commit_transaction->t_cpnext =
++ journal->j_checkpoint_transactions;
++ commit_transaction->t_cpprev =
++ commit_transaction->t_cpnext->t_cpprev;
++ commit_transaction->t_cpnext->t_cpprev =
++ commit_transaction;
++ commit_transaction->t_cpprev->t_cpnext =
+ commit_transaction;
+- }
+ }
+ spin_unlock(&journal->j_list_lock);
+-
++ /* Drop all spin_locks because commit_callback may be block.
++ * __journal_remove_checkpoint() can not destroy transaction
++ * under us because it is not marked as T_FINISHED yet */
+ if (journal->j_commit_callback)
+ journal->j_commit_callback(journal, commit_transaction);
+
+ trace_jbd2_end_commit(journal, commit_transaction);
+ jbd_debug(1, "JBD2: commit %d complete, head %d\n",
+ journal->j_commit_sequence, journal->j_tail_sequence);
+- if (to_free)
+- kfree(commit_transaction);
+
++ write_lock(&journal->j_state_lock);
++ spin_lock(&journal->j_list_lock);
++ commit_transaction->t_state = T_FINISHED;
++ /* Recheck checkpoint lists after j_list_lock was dropped */
++ if (commit_transaction->t_checkpoint_list == NULL &&
++ commit_transaction->t_checkpoint_io_list == NULL) {
++ __jbd2_journal_drop_transaction(journal, commit_transaction);
++ kfree(commit_transaction);
++ }
++ spin_unlock(&journal->j_list_lock);
++ write_unlock(&journal->j_state_lock);
+ wake_up(&journal->j_wait_done_commit);
+ }
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 0fa0123..17b04fc 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -663,6 +663,37 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
+ }
+
+ /*
++ * When this function returns the transaction corresponding to tid
++ * will be completed. If the transaction has currently running, start
++ * committing that transaction before waiting for it to complete. If
++ * the transaction id is stale, it is by definition already completed,
++ * so just return SUCCESS.
++ */
++int jbd2_complete_transaction(journal_t *journal, tid_t tid)
++{
++ int need_to_wait = 1;
++
++ read_lock(&journal->j_state_lock);
++ if (journal->j_running_transaction &&
++ journal->j_running_transaction->t_tid == tid) {
++ if (journal->j_commit_request != tid) {
++ /* transaction not yet started, so request it */
++ read_unlock(&journal->j_state_lock);
++ jbd2_log_start_commit(journal, tid);
++ goto wait_commit;
++ }
++ } else if (!(journal->j_committing_transaction &&
++ journal->j_committing_transaction->t_tid == tid))
++ need_to_wait = 0;
++ read_unlock(&journal->j_state_lock);
++ if (!need_to_wait)
++ return 0;
++wait_commit:
++ return jbd2_log_wait_commit(journal, tid);
++}
++EXPORT_SYMBOL(jbd2_complete_transaction);
++
++/*
+ * Log buffer allocation routines:
+ */
+
+diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
+index 8d4ea83..de88922 100644
+--- a/fs/lockd/clntlock.c
++++ b/fs/lockd/clntlock.c
+@@ -141,6 +141,9 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
+ timeout);
+ if (ret < 0)
+ return -ERESTARTSYS;
++ /* Reset the lock status after a server reboot so we resend */
++ if (block->b_status == nlm_lck_denied_grace_period)
++ block->b_status = nlm_lck_blocked;
+ req->a_res.status = block->b_status;
+ return 0;
+ }
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index a3a0987..8392cb8 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,9 +551,6 @@ again:
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ if (status < 0)
+ break;
+- /* Resend the blocking lock request after a server reboot */
+- if (resp->status == nlm_lck_denied_grace_period)
+- continue;
+ if (resp->status != nlm_lck_blocked)
+ break;
+ }
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index fe5c5fb..08921b8 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -880,14 +880,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+
+ nfs4_lock_state();
+ status = nfs4_preprocess_stateid_op(cstate, stateid, WR_STATE, &filp);
+- if (filp)
+- get_file(filp);
+- nfs4_unlock_state();
+-
+ if (status) {
++ nfs4_unlock_state();
+ dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
+ return status;
+ }
++ if (filp)
++ get_file(filp);
++ nfs4_unlock_state();
+
+ cnt = write->wr_buflen;
+ write->wr_how_written = write->wr_stable_how;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 7d189dc..4cef99f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -188,13 +188,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
+ {
+ if (atomic_dec_and_test(&fp->fi_access[oflag])) {
+ nfs4_file_put_fd(fp, oflag);
+- /*
+- * It's also safe to get rid of the RDWR open *if*
+- * we no longer have need of the other kind of access
+- * or if we already have the other kind of open:
+- */
+- if (fp->fi_fds[1-oflag]
+- || atomic_read(&fp->fi_access[1 - oflag]) == 0)
++ if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
+ nfs4_file_put_fd(fp, O_RDWR);
+ }
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 24afa96..ade5316 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -360,10 +360,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ all 32 bits of 'nseconds'. */
+ READ_BUF(12);
+ len += 12;
+- READ32(dummy32);
+- if (dummy32)
+- return nfserr_inval;
+- READ32(iattr->ia_atime.tv_sec);
++ READ64(iattr->ia_atime.tv_sec);
+ READ32(iattr->ia_atime.tv_nsec);
+ if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+@@ -386,10 +383,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ all 32 bits of 'nseconds'. */
+ READ_BUF(12);
+ len += 12;
+- READ32(dummy32);
+- if (dummy32)
+- return nfserr_inval;
+- READ32(iattr->ia_mtime.tv_sec);
++ READ64(iattr->ia_mtime.tv_sec);
+ READ32(iattr->ia_mtime.tv_nsec);
+ if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+@@ -2374,8 +2368,7 @@ out_acl:
+ if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.atime.tv_sec);
++ WRITE64((s64)stat.atime.tv_sec);
+ WRITE32(stat.atime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
+@@ -2388,15 +2381,13 @@ out_acl:
+ if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.ctime.tv_sec);
++ WRITE64((s64)stat.ctime.tv_sec);
+ WRITE32(stat.ctime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.mtime.tv_sec);
++ WRITE64((s64)stat.mtime.tv_sec);
+ WRITE32(stat.mtime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 6f292dd..f255d37 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -577,7 +577,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ int add = (arg & IN_MASK_ADD);
+ int ret;
+
+- /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+@@ -628,7 +627,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+
+- /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+@@ -757,6 +755,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
+ int ret, fput_needed;
+ unsigned flags = 0;
+
++ /* don't allow invalid bits: we don't want flags set */
++ if (unlikely(!(mask & ALL_INOTIFY_BITS)))
++ return -EINVAL;
++
+ filp = fget_light(fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 3899e24..e756bc4 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -977,6 +977,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ enum kobj_ns_type type;
+ const void *ns;
+ ino_t ino;
++ loff_t off;
+
+ type = sysfs_ns_type(parent_sd);
+ ns = sysfs_info(dentry->d_sb)->ns[type];
+@@ -999,6 +1000,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ return 0;
+ }
+ mutex_lock(&sysfs_mutex);
++ off = filp->f_pos;
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+ pos;
+ pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
+@@ -1010,19 +1012,24 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ len = strlen(name);
+ ino = pos->s_ino;
+ type = dt_type(pos);
+- filp->f_pos = ino;
++ off = filp->f_pos = ino;
+ filp->private_data = sysfs_get(pos);
+
+ mutex_unlock(&sysfs_mutex);
+- ret = filldir(dirent, name, len, filp->f_pos, ino, type);
++ ret = filldir(dirent, name, len, off, ino, type);
+ mutex_lock(&sysfs_mutex);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&sysfs_mutex);
+- if ((filp->f_pos > 1) && !pos) { /* EOF */
+- filp->f_pos = INT_MAX;
++
++ /* don't reference last entry if its refcount is dropped */
++ if (!pos) {
+ filp->private_data = NULL;
++
++ /* EOF and not changed as 0 or 1 in read/write path */
++ if (off == filp->f_pos && off > 1)
++ filp->f_pos = INT_MAX;
+ }
+ return 0;
+ }
+diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
+index 8a297a5..497c6cc 100644
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -42,8 +42,8 @@ struct ipc_namespace {
+
+ size_t shm_ctlmax;
+ size_t shm_ctlall;
++ unsigned long shm_tot;
+ int shm_ctlmni;
+- int shm_tot;
+ /*
+ * Defines whether IPC_RMID is forced for _all_ shm segments regardless
+ * of shmctl()
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 2092ea2..a153ed5 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -470,6 +470,7 @@ struct transaction_s
+ T_COMMIT,
+ T_COMMIT_DFLUSH,
+ T_COMMIT_JFLUSH,
++ T_COMMIT_CALLBACK,
+ T_FINISHED
+ } t_state;
+
+@@ -1165,6 +1166,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+ int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+ int jbd2_journal_force_commit_nested(journal_t *journal);
+ int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
++int jbd2_complete_transaction(journal_t *journal, tid_t tid);
+ int jbd2_log_do_checkpoint(journal_t *journal);
+ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 4baadd1..d0493f6 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1509,6 +1509,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
++
+
+ struct page *follow_page(struct vm_area_struct *, unsigned long address,
+ unsigned int foll_flags);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 00ca32b..8c43fd1 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -225,9 +225,9 @@ struct netdev_hw_addr {
+ #define NETDEV_HW_ADDR_T_SLAVE 3
+ #define NETDEV_HW_ADDR_T_UNICAST 4
+ #define NETDEV_HW_ADDR_T_MULTICAST 5
+- bool synced;
+ bool global_use;
+ int refcount;
++ int synced;
+ struct rcu_head rcu_head;
+ };
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index da65890..efe50af 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2367,6 +2367,13 @@ static inline void nf_reset(struct sk_buff *skb)
+ #endif
+ }
+
++static inline void nf_reset_trace(struct sk_buff *skb)
++{
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
++ skb->nf_trace = 0;
++#endif
++}
++
+ /* Note: This doesn't put any conntrack and bridge info in dst. */
+ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ {
+diff --git a/ipc/shm.c b/ipc/shm.c
+index b76be5b..326a20b 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -450,7 +450,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ size_t size = params->u.size;
+ int error;
+ struct shmid_kernel *shp;
+- int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
++ size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ struct file * file;
+ char name[13];
+ int id;
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 31fdc48..0caf1f8 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -608,9 +608,9 @@ void audit_trim_trees(void)
+ }
+ spin_unlock(&hash_lock);
+ trim_marked(tree);
+- put_tree(tree);
+ drop_collected_mounts(root_mnt);
+ skip_it:
++ put_tree(tree);
+ mutex_lock(&audit_filter_mutex);
+ }
+ list_del(&cursor);
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index c0739f8..d2a01fe 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2029,7 +2029,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+ if (!group)
+ return -ENOMEM;
+ /* pre-allocate to guarantee space while iterating in rcu read-side. */
+- retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
++ retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
+ if (retval)
+ goto out_free_group_list;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7d1f05e..9f21915 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5164,7 +5164,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
+
+ static int perf_swevent_init(struct perf_event *event)
+ {
+- int event_id = event->attr.config;
++ u64 event_id = event->attr.config;
+
+ if (event->attr.type != PERF_TYPE_SOFTWARE)
+ return -ENOENT;
+@@ -5756,6 +5756,7 @@ skip_type:
+ if (pmu->pmu_cpu_context)
+ goto got_cpu_context;
+
++ ret = -ENOMEM;
+ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+ if (!pmu->pmu_cpu_context)
+ goto free_dev;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index e4cee8d..60f7e32 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -298,6 +298,10 @@ ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
+ } else {
+ unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+
++ /* Make sure nsec fits into long */
++ if (unlikely(nsec > KTIME_SEC_MAX))
++ return (ktime_t){ .tv64 = KTIME_MAX };
++
+ tmp = ktime_set((long)nsec, rem);
+ }
+
+@@ -1308,6 +1312,8 @@ retry:
+
+ expires = ktime_sub(hrtimer_get_expires(timer),
+ base->offset);
++ if (expires.tv64 < 0)
++ expires.tv64 = KTIME_MAX;
+ if (expires.tv64 < expires_next.tv64)
+ expires_next = expires;
+ break;
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index cd068b2..c3509fb 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -66,6 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
+ */
+ int tick_check_broadcast_device(struct clock_event_device *dev)
+ {
++ struct clock_event_device *cur = tick_broadcast_device.evtdev;
++
+ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+@@ -73,6 +75,8 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
+ return 0;
+
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
++ if (cur)
++ cur->event_handler = clockevents_handle_noop;
+ tick_broadcast_device.evtdev = dev;
+ if (!cpumask_empty(tick_get_broadcast_mask()))
+ tick_broadcast_start_periodic(dev);
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index da6c9ec..ead79bc 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -323,6 +323,7 @@ static void tick_shutdown(unsigned int *cpup)
+ */
+ dev->mode = CLOCK_EVT_MODE_UNUSED;
+ clockevents_exchange_device(dev, NULL);
++ dev->event_handler = clockevents_handle_noop;
+ td->evtdev = NULL;
+ }
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 5527211..24b3759 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -554,7 +554,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+
+ pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+- for (i = 0; i < pages; i++) {
++ for (i = 1; i < pages; i++) {
+ pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pg->next)
+ goto out_free;
+@@ -3303,7 +3303,8 @@ out:
+ if (fail)
+ return -EINVAL;
+
+- ftrace_graph_filter_enabled = 1;
++ ftrace_graph_filter_enabled = !!(*idx);
++
+ return 0;
+ }
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17edb14..0ec6c34 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4563,6 +4563,8 @@ static __init int tracer_init_debugfs(void)
+ trace_access_lock_init();
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("tracing_enabled", 0644, d_tracer,
+ &global_trace, &tracing_ctrl_fops);
+@@ -4696,36 +4698,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
+ iter->cpu_file = TRACE_PIPE_ALL_CPU;
+ }
+
+-static void
+-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
++void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+ {
+- static arch_spinlock_t ftrace_dump_lock =
+- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+ /* use static because iter can be a bit big for the stack */
+ static struct trace_iterator iter;
++ static atomic_t dump_running;
+ unsigned int old_userobj;
+- static int dump_ran;
+ unsigned long flags;
+ int cnt = 0, cpu;
+
+- /* only one dump */
+- local_irq_save(flags);
+- arch_spin_lock(&ftrace_dump_lock);
+- if (dump_ran)
+- goto out;
+-
+- dump_ran = 1;
++ /* Only allow one dump user at a time. */
++ if (atomic_inc_return(&dump_running) != 1) {
++ atomic_dec(&dump_running);
++ return;
++ }
+
++ /*
++ * Always turn off tracing when we dump.
++ * We don't need to show trace output of what happens
++ * between multiple crashes.
++ *
++ * If the user does a sysrq-z, then they can re-enable
++ * tracing with echo 1 > tracing_on.
++ */
+ tracing_off();
+
+- /* Did function tracer already get disabled? */
+- if (ftrace_is_dead()) {
+- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+- printk("# MAY BE MISSING FUNCTION EVENTS\n");
+- }
+-
+- if (disable_tracing)
+- ftrace_kill();
++ local_irq_save(flags);
+
+ trace_init_global_iter(&iter);
+
+@@ -4758,6 +4756,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+
+ printk(KERN_TRACE "Dumping ftrace buffer:\n");
+
++ /* Did function tracer already get disabled? */
++ if (ftrace_is_dead()) {
++ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
++ printk("# MAY BE MISSING FUNCTION EVENTS\n");
++ }
++
+ /*
+ * We need to stop all tracing on all CPUS to read the
+ * the next buffer. This is a bit expensive, but is
+@@ -4796,26 +4800,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+ printk(KERN_TRACE "---------------------------------\n");
+
+ out_enable:
+- /* Re-enable tracing if requested */
+- if (!disable_tracing) {
+- trace_flags |= old_userobj;
++ trace_flags |= old_userobj;
+
+- for_each_tracing_cpu(cpu) {
+- atomic_dec(&iter.tr->data[cpu]->disabled);
+- }
+- tracing_on();
++ for_each_tracing_cpu(cpu) {
++ atomic_dec(&iter.tr->data[cpu]->disabled);
+ }
+-
+- out:
+- arch_spin_unlock(&ftrace_dump_lock);
++ atomic_dec(&dump_running);
+ local_irq_restore(flags);
+ }
+-
+-/* By default: disable tracing after the dump */
+-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+-{
+- __ftrace_dump(true, oops_dump_mode);
+-}
++EXPORT_SYMBOL_GPL(ftrace_dump);
+
+ __init static int tracer_alloc_buffers(void)
+ {
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 288541f..09fd98a 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
+ /* Maximum number of functions to trace before diagnosing a hang */
+ #define GRAPH_MAX_FUNC_TEST 100000000
+
+-static void
+-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
+ static unsigned int graph_hang_thresh;
+
+ /* Wrap the real function entry probe to avoid possible hanging */
+@@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+ ftrace_graph_stop();
+ printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+- if (ftrace_dump_on_oops)
+- __ftrace_dump(false, DUMP_ALL);
++ if (ftrace_dump_on_oops) {
++ ftrace_dump(DUMP_ALL);
++ /* ftrace_dump() disables tracing */
++ tracing_on();
++ }
+ return 0;
+ }
+
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index 77575b3..c5b20a3 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -17,13 +17,24 @@
+
+ #define STACK_TRACE_ENTRIES 500
+
++#ifdef CC_USING_FENTRY
++# define fentry 1
++#else
++# define fentry 0
++#endif
++
+ static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
+ { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
+ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+
++/*
++ * Reserve one entry for the passed in ip. This will allow
++ * us to remove most or all of the stack size overhead
++ * added by the stack tracer itself.
++ */
+ static struct stack_trace max_stack_trace = {
+- .max_entries = STACK_TRACE_ENTRIES,
+- .entries = stack_dump_trace,
++ .max_entries = STACK_TRACE_ENTRIES - 1,
++ .entries = &stack_dump_trace[1],
+ };
+
+ static unsigned long max_stack_size;
+@@ -37,25 +48,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
+ int stack_tracer_enabled;
+ static int last_stack_tracer_enabled;
+
+-static inline void check_stack(void)
++static inline void
++check_stack(unsigned long ip, unsigned long *stack)
+ {
+ unsigned long this_size, flags;
+ unsigned long *p, *top, *start;
++ static int tracer_frame;
++ int frame_size = ACCESS_ONCE(tracer_frame);
+ int i;
+
+- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
++ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
+ this_size = THREAD_SIZE - this_size;
++ /* Remove the frame of the tracer */
++ this_size -= frame_size;
+
+ if (this_size <= max_stack_size)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(&this_size))
++ if (!object_is_on_stack(stack))
+ return;
+
+ local_irq_save(flags);
+ arch_spin_lock(&max_stack_lock);
+
++ /* In case another CPU set the tracer_frame on us */
++ if (unlikely(!frame_size))
++ this_size -= tracer_frame;
++
+ /* a race could have already updated it */
+ if (this_size <= max_stack_size)
+ goto out;
+@@ -68,10 +88,18 @@ static inline void check_stack(void)
+ save_stack_trace(&max_stack_trace);
+
+ /*
++ * Add the passed in ip from the function tracer.
++ * Searching for this on the stack will skip over
++ * most of the overhead from the stack tracer itself.
++ */
++ stack_dump_trace[0] = ip;
++ max_stack_trace.nr_entries++;
++
++ /*
+ * Now find where in the stack these are.
+ */
+ i = 0;
+- start = &this_size;
++ start = stack;
+ top = (unsigned long *)
+ (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
+
+@@ -95,6 +123,18 @@ static inline void check_stack(void)
+ found = 1;
+ /* Start the search from here */
+ start = p + 1;
++ /*
++ * We do not want to show the overhead
++ * of the stack tracer stack in the
++ * max stack. If we haven't figured
++ * out what that is, then figure it out
++ * now.
++ */
++ if (unlikely(!tracer_frame) && i == 1) {
++ tracer_frame = (p - stack) *
++ sizeof(unsigned long);
++ max_stack_size -= tracer_frame;
++ }
+ }
+ }
+
+@@ -110,6 +150,7 @@ static inline void check_stack(void)
+ static void
+ stack_trace_call(unsigned long ip, unsigned long parent_ip)
+ {
++ unsigned long stack;
+ int cpu;
+
+ if (unlikely(!ftrace_enabled || stack_trace_disabled))
+@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
+ if (per_cpu(trace_active, cpu)++ != 0)
+ goto out;
+
+- check_stack();
++ /*
++ * When fentry is used, the traced function does not get
++ * its stack frame set up, and we lose the parent.
++ * The ip is pretty useless because the function tracer
++ * was called before that function set up its stack frame.
++ * In this case, we use the parent ip.
++ *
++ * By adding the return address of either the parent ip
++ * or the current ip we can disregard most of the stack usage
++ * caused by the stack tracer itself.
++ *
++ * The function tracer always reports the address of where the
++ * mcount call was, but the stack will hold the return address.
++ */
++ if (fentry)
++ ip = parent_ip;
++ else
++ ip += MCOUNT_INSN_SIZE;
++
++ check_stack(ip, &stack);
+
+ out:
+ per_cpu(trace_active, cpu)--;
+@@ -351,6 +411,8 @@ static __init int stack_trace_init(void)
+ struct dentry *d_tracer;
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("stack_max_size", 0644, d_tracer,
+ &max_stack_size, &stack_max_size_fops);
+diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
+index 96cffb2..847f88a 100644
+--- a/kernel/trace/trace_stat.c
++++ b/kernel/trace/trace_stat.c
+@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
+ struct dentry *d_tracing;
+
+ d_tracing = tracing_init_dentry();
++ if (!d_tracing)
++ return 0;
+
+ stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+ if (!stat_dir)
+diff --git a/mm/memory.c b/mm/memory.c
+index 4f2add1..d5f913b 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2309,6 +2309,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+
++/**
++ * vm_iomap_memory - remap memory to userspace
++ * @vma: user vma to map to
++ * @start: start of area
++ * @len: size of area
++ *
++ * This is a simplified io_remap_pfn_range() for common driver use. The
++ * driver just needs to give us the physical memory range to be mapped,
++ * we'll figure out the rest from the vma information.
++ *
++ * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
++ * whatever write-combining details or similar.
++ */
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
++{
++ unsigned long vm_len, pfn, pages;
++
++ /* Check that the physical memory area passed in looks valid */
++ if (start + len < start)
++ return -EINVAL;
++ /*
++ * You *really* shouldn't map things that aren't page-aligned,
++ * but we've historically allowed it because IO memory might
++ * just have smaller alignment.
++ */
++ len += start & ~PAGE_MASK;
++ pfn = start >> PAGE_SHIFT;
++ pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
++ if (pfn + pages < pfn)
++ return -EINVAL;
++
++ /* We start the mapping 'vm_pgoff' pages into the area */
++ if (vma->vm_pgoff > pages)
++ return -EINVAL;
++ pfn += vma->vm_pgoff;
++ pages -= vma->vm_pgoff;
++
++ /* Can we fit all of the mapping? */
++ vm_len = vma->vm_end - vma->vm_start;
++ if (vm_len >> PAGE_SHIFT > pages)
++ return -EINVAL;
++
++ /* Ok, let it rip */
++ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_iomap_memory);
++
+ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ pte_fn_t fn, void *data)
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 0ca06e8..43b6bfe 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -500,6 +500,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
++ msg->msg_namelen = 0;
++
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+ if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index b04a6ef..86ac37f 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1641,6 +1641,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
+
++ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+ &digi, NULL, NULL);
+ sax->sax25_family = AF_AX25;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 062124c..838f113 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -245,6 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
++ msg->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -252,8 +254,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ return err;
+ }
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 14c4864..82ce164 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -627,6 +627,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
++ msg->msg_namelen = 0;
+ return 0;
+ }
+
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index a986280..53a8e37 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -320,6 +320,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
++ m->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 720aea0..8e455b8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1619,6 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
++ nf_reset_trace(skb);
+ return netif_rx(skb);
+ }
+ EXPORT_SYMBOL_GPL(dev_forward_skb);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index 0387da0..cd09414 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,7 +57,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->global_use = global;
+- ha->synced = false;
++ ha->synced = 0;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+ return 0;
+@@ -155,7 +155,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ addr_len, ha->type);
+ if (err)
+ break;
+- ha->synced = true;
++ ha->synced++;
+ ha->refcount++;
+ } else if (ha->refcount == 1) {
+ __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+@@ -176,7 +176,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ if (ha->synced) {
+ __hw_addr_del(to_list, ha->addr,
+ addr_len, ha->type);
+- ha->synced = false;
++ ha->synced--;
+ __hw_addr_del(from_list, ha->addr,
+ addr_len, ha->type);
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3b5e680..5b7d5f2 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1064,7 +1064,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
+- if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+
+ if (tb[IFLA_EXT_MASK])
+@@ -1907,7 +1907,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ u32 ext_filter_mask = 0;
+ u16 min_ifinfo_dump_size = 0;
+
+- if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 530787b..238fc3b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -137,8 +137,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ /* skb is pure payload to encrypt */
+
+- err = -ENOMEM;
+-
+ esp = x->data;
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+@@ -174,8 +172,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
+- if (!tmp)
++ if (!tmp) {
++ err = -ENOMEM;
+ goto error;
++ }
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b2cfe83..8f441b2 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -251,8 +251,7 @@ static void ip_expire(unsigned long arg)
+ if (!head->dev)
+ goto out_rcu_unlock;
+
+- /* skb dst is stale, drop it, and perform route lookup again */
+- skb_dst_drop(head);
++ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+@@ -518,8 +517,16 @@ found:
+ qp->q.last_in |= INET_FRAG_FIRST_IN;
+
+ if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+- qp->q.meat == qp->q.len)
+- return ip_frag_reasm(qp, prev, dev);
++ qp->q.meat == qp->q.len) {
++ unsigned long orefdst = skb->_skb_refdst;
++
++ skb->_skb_refdst = 0UL;
++ err = ip_frag_reasm(qp, prev, dev);
++ skb->_skb_refdst = orefdst;
++ return err;
++ }
++
++ skb_dst_drop(skb);
+
+ write_lock(&ip4_frags.lock);
+ list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 769c0e9..8a1bed2 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -347,8 +347,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ * hasn't changed since we received the original syn, but I see
+ * no easy way to do this.
+ */
+- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
++ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
++ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+ inet_sk_flowi_flags(sk),
+ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+ ireq->loc_addr, th->source, th->dest);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 3124e17..872b41d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -115,6 +115,7 @@ int sysctl_tcp_abc __read_mostly;
+ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
+ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
+ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
++#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+
+ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
+ #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
+@@ -3723,6 +3724,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
+ }
+ }
+
++static void tcp_store_ts_recent(struct tcp_sock *tp)
++{
++ tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
++ tp->rx_opt.ts_recent_stamp = get_seconds();
++}
++
++static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
++{
++ if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
++ /* PAWS bug workaround wrt. ACK frames, the PAWS discard
++ * extra check below makes sure this can only happen
++ * for pure ACK frames. -DaveM
++ *
++ * Not only, also it occurs for expired timestamps.
++ */
++
++ if (tcp_paws_check(&tp->rx_opt, 0))
++ tcp_store_ts_recent(tp);
++ }
++}
++
+ /* This routine deals with incoming acks, but not outgoing ones. */
+ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ {
+@@ -3771,6 +3793,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ prior_fackets = tp->fackets_out;
+ prior_in_flight = tcp_packets_in_flight(tp);
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ if (flag & FLAG_UPDATE_TS_RECENT)
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+ /* Window is constant, pure forward advance.
+ * No more checks are required.
+@@ -4061,27 +4089,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
+ EXPORT_SYMBOL(tcp_parse_md5sig_option);
+ #endif
+
+-static inline void tcp_store_ts_recent(struct tcp_sock *tp)
+-{
+- tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
+- tp->rx_opt.ts_recent_stamp = get_seconds();
+-}
+-
+-static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
+-{
+- if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
+- /* PAWS bug workaround wrt. ACK frames, the PAWS discard
+- * extra check below makes sure this can only happen
+- * for pure ACK frames. -DaveM
+- *
+- * Not only, also it occurs for expired timestamps.
+- */
+-
+- if (tcp_paws_check(&tp->rx_opt, 0))
+- tcp_store_ts_recent(tp);
+- }
+-}
+-
+ /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
+ *
+ * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
+@@ -5552,14 +5559,10 @@ slow_path:
+ return 0;
+
+ step5:
+- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
++ if (th->ack &&
++ tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
+ goto discard;
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+ /* Process urgent data. */
+@@ -5923,7 +5926,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ /* step 5: check the ACK field */
+ if (th->ack) {
+- int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
++ int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
++ FLAG_UPDATE_TS_RECENT) > 0;
+
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+@@ -6030,11 +6034,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ } else
+ goto discard;
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 8589c2d..d84033b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2404,6 +2404,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
+ static void init_loopback(struct net_device *dev)
+ {
+ struct inet6_dev *idev;
++ struct net_device *sp_dev;
++ struct inet6_ifaddr *sp_ifa;
++ struct rt6_info *sp_rt;
+
+ /* ::1 */
+
+@@ -2415,6 +2418,30 @@ static void init_loopback(struct net_device *dev)
+ }
+
+ add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
++
++ /* Add routes to other interface's IPv6 addresses */
++ for_each_netdev(dev_net(dev), sp_dev) {
++ if (!strcmp(sp_dev->name, dev->name))
++ continue;
++
++ idev = __in6_dev_get(sp_dev);
++ if (!idev)
++ continue;
++
++ read_lock_bh(&idev->lock);
++ list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
++
++ if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
++ continue;
++
++ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
++
++ /* Failure cases are ignored */
++ if (!IS_ERR(sp_rt))
++ ip6_ins_rt(sp_rt);
++ }
++ read_unlock_bh(&idev->lock);
++ }
+ }
+
+ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 2b0a4ca..411fe2c 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -386,8 +386,17 @@ found:
+ }
+
+ if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+- fq->q.meat == fq->q.len)
+- return ip6_frag_reasm(fq, prev, dev);
++ fq->q.meat == fq->q.len) {
++ int res;
++ unsigned long orefdst = skb->_skb_refdst;
++
++ skb->_skb_refdst = 0UL;
++ res = ip6_frag_reasm(fq, prev, dev);
++ skb->_skb_refdst = orefdst;
++ return res;
++ }
++
++ skb_dst_drop(skb);
+
+ write_lock(&ip6_frags.lock);
+ list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index f4b49c5..91821e9 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
++ msg->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index cf98d62..e836140 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1356,6 +1356,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int blen;
+ int err = 0;
+
++ msg->msg_namelen = 0;
++
+ if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 99a60d5..e5565c7 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int target; /* Read at least this many bytes */
+ long timeo;
+
++ msg->msg_namelen = 0;
++
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index f156382..3df7c5a 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1178,6 +1178,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ if (sax != NULL) {
++ memset(sax, 0, sizeof(*sax));
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index f9ea925..1f96fb9 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+ if (srose != NULL) {
++ memset(srose, 0, msg->msg_namelen);
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 599f67a..b7cddb9 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -963,8 +963,11 @@ cbq_dequeue(struct Qdisc *sch)
+ cbq_update(q);
+ if ((incr -= incr2) < 0)
+ incr = 0;
++ q->now += incr;
++ } else {
++ if (now > q->now)
++ q->now = now;
+ }
+- q->now += incr;
+ q->now_rt = now;
+
+ for (;;) {
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index bf81204..333926d 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
+ return;
+
+ if (atomic_dec_and_test(&key->refcnt)) {
+- kfree(key);
++ kzfree(key);
+ SCTP_DBG_OBJCNT_DEC(keys);
+ }
+ }
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 42b8324..fdf34af 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -829,6 +829,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+ if (addr) {
+ addr->family = AF_TIPC;
+ addr->addrtype = TIPC_ADDR_ID;
++ memset(&addr->addr, 0, sizeof(addr->addr));
+ addr->addr.id.ref = msg_origport(msg);
+ addr->addr.id.node = msg_orignode(msg);
+ addr->addr.name.domain = 0; /* could leave uninitialized */
+@@ -948,6 +949,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
++ /* will be updated in set_orig_addr() if needed */
++ m->msg_namelen = 0;
++
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1074,6 +1078,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
++ /* will be updated in set_orig_addr() if needed */
++ m->msg_namelen = 0;
++
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 18978b6..5611563 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1956,7 +1956,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+ (UNIXCB(skb).cred != siocb->scm->cred))
+ break;
+- } else {
++ } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+ /* Copy credentials */
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
+ check_creds = 1;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 0b08905..21958cd 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -853,7 +853,7 @@ static void handle_channel(struct wiphy *wiphy,
+ return;
+
+ REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
+- chan->flags = IEEE80211_CHAN_DISABLED;
++ chan->flags |= IEEE80211_CHAN_DISABLED;
+ return;
+ }
+
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 7ada40e..638600b 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3204,18 +3204,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
+ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+ {
+- long size;
+- unsigned long offset;
++ struct snd_pcm_runtime *runtime = substream->runtime;;
+
+ area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
+- area->vm_flags |= VM_IO;
+- size = area->vm_end - area->vm_start;
+- offset = area->vm_pgoff << PAGE_SHIFT;
+- if (io_remap_pfn_range(area, area->vm_start,
+- (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
+- size, area->vm_page_prot))
+- return -EAGAIN;
+- return 0;
++ return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
+ }
+
+ EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index ebbf63c..b7cf246 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -2007,7 +2007,7 @@ static int max98088_probe(struct snd_soc_codec *codec)
+ ret);
+ goto err_access;
+ }
+- dev_info(codec->dev, "revision %c\n", ret + 'A');
++ dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A');
+
+ snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV);
+
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 566acb3..acb7fac 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -611,7 +611,9 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ int err = -ENODEV;
+
+ down_read(&chip->shutdown_rwsem);
+- if (!chip->shutdown && !chip->probing)
++ if (chip->probing)
++ err = 0;
++ else if (!chip->shutdown)
+ err = usb_autopm_get_interface(chip->pm_intf);
+ up_read(&chip->shutdown_rwsem);
+
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 665e297..2b7559c 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -73,6 +73,7 @@ struct snd_usb_substream {
+ unsigned int fill_max: 1; /* fill max packet size always */
+ unsigned int txfr_quirk:1; /* allow sub-frame alignment */
+ unsigned int fmt_type; /* USB audio format type (1-3) */
++ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
+
+ unsigned int running: 1; /* running status */
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 9ab2b3e..5ebe8c4 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -458,7 +458,7 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
+ stride = runtime->frame_bits >> 3;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+- cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
++ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj;
+ if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
+ snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
+ // continue;
+@@ -898,6 +898,7 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
+ subs->speed = snd_usb_get_speed(subs->dev);
+ if (subs->speed >= USB_SPEED_HIGH)
+ subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
++ subs->pkt_offset_adj = 0;
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 34b9bb7..e5fee18 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -126,7 +126,6 @@ struct snd_usb_midi {
+ struct snd_usb_midi_in_endpoint *in;
+ } endpoints[MIDI_MAX_ENDPOINTS];
+ unsigned long input_triggered;
+- bool autopm_reference;
+ unsigned int opened[2];
+ unsigned char disconnected;
+ unsigned char input_running;
+@@ -1040,7 +1039,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct snd_kcontrol *ctl;
+- int err;
+
+ down_read(&umidi->disc_rwsem);
+ if (umidi->disconnected) {
+@@ -1051,13 +1049,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ mutex_lock(&umidi->mutex);
+ if (open) {
+ if (!umidi->opened[0] && !umidi->opened[1]) {
+- err = usb_autopm_get_interface(umidi->iface);
+- umidi->autopm_reference = err >= 0;
+- if (err < 0 && err != -EACCES) {
+- mutex_unlock(&umidi->mutex);
+- up_read(&umidi->disc_rwsem);
+- return -EIO;
+- }
+ if (umidi->roland_load_ctl) {
+ ctl = umidi->roland_load_ctl;
+ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+@@ -1080,8 +1071,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+ }
+- if (umidi->autopm_reference)
+- usb_autopm_put_interface(umidi->iface);
+ }
+ }
+ mutex_unlock(&umidi->mutex);
+@@ -2256,6 +2245,8 @@ int snd_usbmidi_create(struct snd_card *card,
+ return err;
+ }
+
++ usb_autopm_get_interface_no_resume(umidi->iface);
++
+ list_add_tail(&umidi->list, midi_list);
+ return 0;
+ }
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index dfbd65d..42eeee8 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -744,6 +744,7 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
+ break;
+ }
+ snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
++ subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
+ }
+
+ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 5ff8010..33a335b 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -168,6 +168,14 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
+ if (!csep && altsd->bNumEndpoints >= 2)
+ csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
+
++ /*
++ * If we can't locate the USB_DT_CS_ENDPOINT descriptor in the extra
++ * bytes after the first endpoint, go search the entire interface.
++ * Some devices have it directly *before* the standard endpoint.
++ */
++ if (!csep)
++ csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
++
+ if (!csep || csep->bLength < 7 ||
+ csep->bDescriptorSubtype != UAC_EP_GENERAL) {
+ snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
diff --git a/3.2.54/1045_linux-3.2.46.patch b/3.2.54/1045_linux-3.2.46.patch
new file mode 100644
index 0000000..bc10efd
--- /dev/null
+++ b/3.2.54/1045_linux-3.2.46.patch
@@ -0,0 +1,3142 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 897f223..2ba8272 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -734,6 +734,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ edd= [EDD]
+ Format: {"off" | "on" | "skip[mbr]"}
+
++ efi_no_storage_paranoia [EFI; X86]
++ Using this parameter you can use more than 50% of
++ your efi variable storage. Use this parameter only if
++ you are really sure that your UEFI does sane gc and
++ fulfills the spec otherwise your board may brick.
++
+ eisa_irq_edge= [PARISC,HW]
+ See header of drivers/parisc/eisa.c.
+
+diff --git a/Makefile b/Makefile
+index 9072fee..f600582 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
+index 606d48f..8aab786 100644
+--- a/arch/arm/configs/at91sam9g45_defconfig
++++ b/arch/arm/configs/at91sam9g45_defconfig
+@@ -173,7 +173,6 @@ CONFIG_MMC=y
+ # CONFIG_MMC_BLOCK_BOUNCE is not set
+ CONFIG_SDIO_UART=m
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_LEDS_ATMEL_PWM=y
+ CONFIG_LEDS_GPIO=y
+ CONFIG_LEDS_TRIGGER_TIMER=y
+diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
+index 262c034..3d0737e 100644
+--- a/arch/arm/mach-kirkwood/ts219-setup.c
++++ b/arch/arm/mach-kirkwood/ts219-setup.c
+@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
+ static int __init ts219_pci_init(void)
+ {
+ if (machine_is_ts219())
+- kirkwood_pcie_init(KW_PCIE0);
++ kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
+
+ return 0;
+ }
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index 8a6886a..c72b083 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -347,7 +347,7 @@ static struct resource orion_ge10_shared_resources[] = {
+
+ static struct platform_device orion_ge10_shared = {
+ .name = MV643XX_ETH_SHARED_NAME,
+- .id = 1,
++ .id = 2,
+ .dev = {
+ .platform_data = &orion_ge10_shared_data,
+ },
+@@ -362,8 +362,8 @@ static struct resource orion_ge10_resources[] = {
+
+ static struct platform_device orion_ge10 = {
+ .name = MV643XX_ETH_NAME,
+- .id = 1,
+- .num_resources = 2,
++ .id = 2,
++ .num_resources = 1,
+ .resource = orion_ge10_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+@@ -401,7 +401,7 @@ static struct resource orion_ge11_shared_resources[] = {
+
+ static struct platform_device orion_ge11_shared = {
+ .name = MV643XX_ETH_SHARED_NAME,
+- .id = 1,
++ .id = 3,
+ .dev = {
+ .platform_data = &orion_ge11_shared_data,
+ },
+@@ -416,8 +416,8 @@ static struct resource orion_ge11_resources[] = {
+
+ static struct platform_device orion_ge11 = {
+ .name = MV643XX_ETH_NAME,
+- .id = 1,
+- .num_resources = 2,
++ .id = 3,
++ .num_resources = 1,
+ .resource = orion_ge11_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
+index 19973b0..59e4cc9 100644
+--- a/arch/avr32/configs/favr-32_defconfig
++++ b/arch/avr32/configs/favr-32_defconfig
+@@ -122,7 +122,6 @@ CONFIG_USB_G_SERIAL=m
+ CONFIG_USB_CDC_COMPOSITE=m
+ CONFIG_MMC=y
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_ATMEL_PWM=m
+diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
+index 3befab9..65de443 100644
+--- a/arch/avr32/configs/merisc_defconfig
++++ b/arch/avr32/configs/merisc_defconfig
+@@ -102,7 +102,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_LOGO=y
+ CONFIG_MMC=y
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_ATMEL_PWM=y
+diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
+index 596f730..2c94129 100644
+--- a/arch/avr32/kernel/module.c
++++ b/arch/avr32/kernel/module.c
+@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ break;
+ case R_AVR32_GOT18SW:
+ if ((relocation & 0xfffe0003) != 0
+- && (relocation & 0xfffc0003) != 0xffff0000)
++ && (relocation & 0xfffc0000) != 0xfffc0000)
+ return reloc_overflow(module, "R_AVR32_GOT18SW",
+ relocation);
+ relocation >>= 2;
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 41f69ae..8c3efd2 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -230,6 +230,8 @@ extern void rtas_progress(char *s, unsigned short hex);
+ extern void rtas_initialize(void);
+ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+ extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
++extern int rtas_online_cpus_mask(cpumask_var_t cpus);
++extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+ extern int rtas_ibm_suspend_me(struct rtas_args *);
+
+ struct rtc_time;
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 517b1d8..434a180 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/capability.h>
+ #include <linux/delay.h>
++#include <linux/cpu.h>
+ #include <linux/smp.h>
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+@@ -716,7 +717,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
+ int cpu;
+
+ slb_set_size(SLB_MIN_SIZE);
+- stop_topology_update();
+ printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
+
+ while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
+@@ -732,7 +732,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
+ rc = atomic_read(&data->error);
+
+ atomic_set(&data->error, rc);
+- start_topology_update();
+ pSeries_coalesce_init();
+
+ if (wake_when_done) {
+@@ -811,6 +810,95 @@ static void rtas_percpu_suspend_me(void *info)
+ __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
+ }
+
++enum rtas_cpu_state {
++ DOWN,
++ UP,
++};
++
++#ifndef CONFIG_SMP
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ if (!cpumask_empty(cpus)) {
++ cpumask_clear(cpus);
++ return -EINVAL;
++ } else
++ return 0;
++}
++#else
++/* On return cpumask will be altered to indicate CPUs changed.
++ * CPUs with states changed will be set in the mask,
++ * CPUs with status unchanged will be unset in the mask. */
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ int cpu;
++ int cpuret = 0;
++ int ret = 0;
++
++ if (cpumask_empty(cpus))
++ return 0;
++
++ for_each_cpu(cpu, cpus) {
++ switch (state) {
++ case DOWN:
++ cpuret = cpu_down(cpu);
++ break;
++ case UP:
++ cpuret = cpu_up(cpu);
++ break;
++ }
++ if (cpuret) {
++ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
++ __func__,
++ ((state == UP) ? "up" : "down"),
++ cpu, cpuret);
++ if (!ret)
++ ret = cpuret;
++ if (state == UP) {
++ /* clear bits for unchanged cpus, return */
++ cpumask_shift_right(cpus, cpus, cpu);
++ cpumask_shift_left(cpus, cpus, cpu);
++ break;
++ } else {
++ /* clear bit for unchanged cpu, continue */
++ cpumask_clear_cpu(cpu, cpus);
++ }
++ }
++ }
++
++ return ret;
++}
++#endif
++
++int rtas_online_cpus_mask(cpumask_var_t cpus)
++{
++ int ret;
++
++ ret = rtas_cpu_state_change_mask(UP, cpus);
++
++ if (ret) {
++ cpumask_var_t tmp_mask;
++
++ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
++ return ret;
++
++ /* Use tmp_mask to preserve cpus mask from first failure */
++ cpumask_copy(tmp_mask, cpus);
++ rtas_offline_cpus_mask(tmp_mask);
++ free_cpumask_var(tmp_mask);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(rtas_online_cpus_mask);
++
++int rtas_offline_cpus_mask(cpumask_var_t cpus)
++{
++ return rtas_cpu_state_change_mask(DOWN, cpus);
++}
++EXPORT_SYMBOL(rtas_offline_cpus_mask);
++
+ int rtas_ibm_suspend_me(struct rtas_args *args)
+ {
+ long state;
+@@ -818,6 +906,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ struct rtas_suspend_me_data data;
+ DECLARE_COMPLETION_ONSTACK(done);
++ cpumask_var_t offline_mask;
++ int cpuret;
+
+ if (!rtas_service_present("ibm,suspend-me"))
+ return -ENOSYS;
+@@ -841,12 +931,26 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ return 0;
+ }
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ atomic_set(&data.working, 0);
+ atomic_set(&data.done, 0);
+ atomic_set(&data.error, 0);
+ data.token = rtas_token("ibm,suspend-me");
+ data.complete = &done;
+
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
++ cpuret = rtas_online_cpus_mask(offline_mask);
++ if (cpuret) {
++ pr_err("%s: Could not bring present CPUs online.\n", __func__);
++ atomic_set(&data.error, cpuret);
++ goto out;
++ }
++
++ stop_topology_update();
++
+ /* Call function on all CPUs. One of us will make the
+ * rtas call
+ */
+@@ -858,6 +962,16 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ if (atomic_read(&data.error) != 0)
+ printk(KERN_ERR "Error doing global join\n");
+
++ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ cpuret = rtas_offline_cpus_mask(offline_mask);
++ if (cpuret)
++ pr_warn("%s: Could not restore CPUs to offline state.\n",
++ __func__);
++
++out:
++ free_cpumask_var(offline_mask);
+ return atomic_read(&data.error);
+ }
+ #else /* CONFIG_PPC_PSERIES */
+diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
+index d3de084..55a4771 100644
+--- a/arch/powerpc/platforms/pseries/suspend.c
++++ b/arch/powerpc/platforms/pseries/suspend.c
+@@ -16,6 +16,7 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/suspend.h>
+ #include <linux/stat.h>
+@@ -24,6 +25,7 @@
+ #include <asm/machdep.h>
+ #include <asm/mmu.h>
+ #include <asm/rtas.h>
++#include <asm/topology.h>
+
+ static u64 stream_id;
+ static struct sys_device suspend_sysdev;
+@@ -125,11 +127,15 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+ {
++ cpumask_var_t offline_mask;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ stream_id = simple_strtoul(buf, NULL, 16);
+
+ do {
+@@ -138,13 +144,33 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
+ ssleep(1);
+ } while (rc == -EAGAIN);
+
+- if (!rc)
++ if (!rc) {
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask,
++ cpu_online_mask);
++ rc = rtas_online_cpus_mask(offline_mask);
++ if (rc) {
++ pr_err("%s: Could not bring present CPUs online.\n",
++ __func__);
++ goto out;
++ }
++
++ stop_topology_update();
+ rc = pm_suspend(PM_SUSPEND_MEM);
++ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ if (!rtas_offline_cpus_mask(offline_mask))
++ pr_warn("%s: Could not restore CPUs to offline "
++ "state.\n", __func__);
++ }
+
+ stream_id = 0;
+
+ if (!rc)
+ rc = count;
++out:
++ free_cpumask_var(offline_mask);
+ return rc;
+ }
+
+diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
+index aa365c5..5888f1b 100644
+--- a/arch/um/include/asm/pgtable.h
++++ b/arch/um/include/asm/pgtable.h
+@@ -69,6 +69,8 @@ extern unsigned long end_iomem;
+ #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+
++#define io_remap_pfn_range remap_pfn_range
++
+ /*
+ * The i386 can't do page protection for execute, and considers that the same
+ * are read.
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 429e0c9..fb2eb32 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -160,10 +160,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+ u64 arch_irq_stat(void)
+ {
+ u64 sum = atomic_read(&irq_err_count);
+-
+-#ifdef CONFIG_X86_IO_APIC
+- sum += atomic_read(&irq_mis_count);
+-#endif
+ return sum;
+ }
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 407789b..aac5ea7 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4882,6 +4882,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ if (err != EMULATE_DONE)
+ return 0;
+
++ if (vcpu->arch.halt_request) {
++ vcpu->arch.halt_request = 0;
++ ret = kvm_emulate_halt(vcpu);
++ goto out;
++ }
++
+ if (signal_pending(current))
+ goto out;
+ if (need_resched())
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 1de542b..07ef7e8 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -101,6 +101,15 @@ static int __init setup_add_efi_memmap(char *arg)
+ }
+ early_param("add_efi_memmap", setup_add_efi_memmap);
+
++static bool efi_no_storage_paranoia;
++
++static int __init setup_storage_paranoia(char *arg)
++{
++ efi_no_storage_paranoia = true;
++ return 0;
++}
++early_param("efi_no_storage_paranoia", setup_storage_paranoia);
++
+
+ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+ {
+@@ -815,3 +824,37 @@ u64 efi_mem_attributes(unsigned long phys_addr)
+ }
+ return 0;
+ }
++
++/*
++ * Some firmware has serious problems when using more than 50% of the EFI
++ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
++ * we never use more than this safe limit.
++ *
++ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
++ * store.
++ */
++efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
++{
++ efi_status_t status;
++ u64 storage_size, remaining_size, max_size;
++
++ status = efi.query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++ if (status != EFI_SUCCESS)
++ return status;
++
++ if (!max_size && remaining_size > size)
++ printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
++ " is returning MaxVariableSize=0\n");
++
++ if (!storage_size || size > remaining_size ||
++ (max_size && size > max_size))
++ return EFI_OUT_OF_RESOURCES;
++
++ if (!efi_no_storage_paranoia &&
++ (remaining_size - size) < (storage_size / 2))
++ return EFI_OUT_OF_RESOURCES;
++
++ return EFI_SUCCESS;
++}
++EXPORT_SYMBOL_GPL(efi_query_variable_store);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 044f5d9..5189fe8 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -129,6 +129,21 @@ static void xen_vcpu_setup(int cpu)
+
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
++ /*
++ * This path is called twice on PVHVM - first during bootup via
++ * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
++ * hotplugged: cpu_up -> xen_hvm_cpu_notify.
++ * As we can only do the VCPUOP_register_vcpu_info once lets
++ * not over-write its result.
++ *
++ * For PV it is called during restore (xen_vcpu_restore) and bootup
++ * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
++ * use this function.
++ */
++ if (xen_hvm_domain()) {
++ if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
++ return;
++ }
+ if (cpu < MAX_VIRT_CPUS)
+ per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
+index f915a7f..b334f54 100644
+--- a/drivers/acpi/acpica/exfldio.c
++++ b/drivers/acpi/acpica/exfldio.c
+@@ -702,7 +702,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
+
+ if ((obj_desc->common_field.start_field_bit_offset == 0) &&
+ (obj_desc->common_field.bit_length == access_bit_width)) {
+- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
++ if (buffer_length >= sizeof(u64)) {
++ status =
++ acpi_ex_field_datum_io(obj_desc, 0, buffer,
++ ACPI_READ);
++ } else {
++ /* Use raw_datum (u64) to handle buffers < 64 bits */
++
++ status =
++ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
++ ACPI_READ);
++ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
++ }
++
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index d2519b2..51de186 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -217,7 +217,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+ static int ec_poll(struct acpi_ec *ec)
+ {
+ unsigned long flags;
+- int repeat = 2; /* number of command restarts */
++ int repeat = 5; /* number of command restarts */
+ while (repeat--) {
+ unsigned long delay = jiffies +
+ msecs_to_jiffies(ec_delay);
+@@ -235,8 +235,6 @@ static int ec_poll(struct acpi_ec *ec)
+ }
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ } while (time_before(jiffies, delay));
+- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
+- break;
+ pr_debug(PREFIX "controller reset, restart transaction\n");
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ start_transaction(ec);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index d9c0199..53e28a9 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -164,6 +164,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
+ },
+ },
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "Asus UL30A",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
++ },
++ },
+ { },
+ };
+
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index d22119d..968a0d4 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+
+ spin_lock(&brd->brd_lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
++ page->index = idx;
+ if (radix_tree_insert(&brd->brd_pages, idx, page)) {
+ __free_page(page);
+ page = radix_tree_lookup(&brd->brd_pages, idx);
+ BUG_ON(!page);
+ BUG_ON(page->index != idx);
+- } else
+- page->index = idx;
++ }
+ spin_unlock(&brd->brd_lock);
+
+ radix_tree_preload_end();
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 43beaca..13cbdd3 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -2225,7 +2225,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
+ if (hg == -1 && mdev->state.role == R_PRIMARY) {
+ enum drbd_state_rv rv2;
+
+- drbd_set_role(mdev, R_SECONDARY, 0);
+ /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
+ * we might be here in C_WF_REPORT_PARAMS which is transient.
+ * we do not need to wait for the after state change work either. */
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index 3ed20e8..92ce302 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -95,9 +95,9 @@ struct si_sm_data {
+ enum bt_states state;
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
++ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int write_count;
+- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
++ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
+index 2aa3977..8dde1f5 100644
+--- a/drivers/char/ipmi/ipmi_devintf.c
++++ b/drivers/char/ipmi/ipmi_devintf.c
+@@ -838,13 +838,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ return ipmi_ioctl(filep, cmd, arg);
+ }
+ }
++
++static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++
++ mutex_lock(&ipmi_mutex);
++ ret = compat_ipmi_ioctl(filep, cmd, arg);
++ mutex_unlock(&ipmi_mutex);
++
++ return ret;
++}
+ #endif
+
+ static const struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+ #ifdef CONFIG_COMPAT
+- .compat_ioctl = compat_ipmi_ioctl,
++ .compat_ioctl = unlocked_compat_ipmi_ioctl,
+ #endif
+ .open = ipmi_open,
+ .release = ipmi_release,
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 8ae9235..b651733 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -889,16 +889,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ if (r->entropy_count / 8 < min + reserved) {
+ nbytes = 0;
+ } else {
++ int entropy_count, orig;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ /* If limited, never pull more than available */
+- if (r->limit && nbytes + reserved >= r->entropy_count / 8)
+- nbytes = r->entropy_count/8 - reserved;
+-
+- if (r->entropy_count / 8 >= nbytes + reserved)
+- r->entropy_count -= nbytes*8;
+- else
+- r->entropy_count = reserved;
++ if (r->limit && nbytes + reserved >= entropy_count / 8)
++ nbytes = entropy_count/8 - reserved;
++
++ if (entropy_count / 8 >= nbytes + reserved) {
++ entropy_count -= nbytes*8;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++ } else {
++ entropy_count = reserved;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++ }
+
+- if (r->entropy_count < random_write_wakeup_thresh) {
++ if (entropy_count < random_write_wakeup_thresh) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
+index 629c430..49138e7 100644
+--- a/drivers/dma/pch_dma.c
++++ b/drivers/dma/pch_dma.c
+@@ -489,7 +489,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
+ dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
+
+ if (!ret) {
+- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
++ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
+ if (ret) {
+ spin_lock(&pd_chan->lock);
+ pd_chan->descs_allocated++;
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index b15c0aa..2a64e69 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -425,24 +425,12 @@ static efi_status_t
+ check_var_size_locked(struct efivars *efivars, u32 attributes,
+ unsigned long size)
+ {
+- u64 storage_size, remaining_size, max_size;
+- efi_status_t status;
+ const struct efivar_operations *fops = efivars->ops;
+
+- if (!efivars->ops->query_variable_info)
++ if (!efivars->ops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+- status = fops->query_variable_info(attributes, &storage_size,
+- &remaining_size, &max_size);
+-
+- if (status != EFI_SUCCESS)
+- return status;
+-
+- if (!storage_size || size > remaining_size || size > max_size ||
+- (remaining_size - size) < (storage_size / 2))
+- return EFI_OUT_OF_RESOURCES;
+-
+- return status;
++ return fops->query_variable_store(attributes, size);
+ }
+
+ static ssize_t
+@@ -1456,7 +1444,7 @@ efivars_init(void)
+ ops.get_variable = efi.get_variable;
+ ops.set_variable = efi.set_variable;
+ ops.get_next_variable = efi.get_next_variable;
+- ops.query_variable_info = efi.query_variable_info;
++ ops.query_variable_store = efi_query_variable_store;
+ error = register_efivars(&__efivars, &ops, efi_kobj);
+ if (error)
+ goto err_put;
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index c77fc67..ca67338 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1007,50 +1007,56 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
++#define PTE_ADDRESS_MASK 0xfffff000
++#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
++#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
++#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
++#define PTE_MAPPING_TYPE_CACHED (3 << 1)
++#define PTE_MAPPING_TYPE_MASK (3 << 1)
++#define PTE_VALID (1 << 0)
++
++/**
++ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
++ * a physical one
++ * @dev: drm device
++ * @offset: address to translate
++ *
++ * Some chip functions require allocations from stolen space and need the
++ * physical address of the memory in question.
++ */
++static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
++#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+- * is unreliable, so on those compute the base by subtracting the
+- * stolen memory from the Top of Low Usable DRAM which is where the
+- * BIOS places the graphics stolen memory.
+- *
+- * On gen2, the layout is slightly different with the Graphics Segment
+- * immediately following Top of Memory (or Top of Usable DRAM). Note
+- * it appears that TOUD is only reported by 865g, so we just use the
+- * top of memory as determined by the e820 probe.
+- *
+- * XXX gen2 requires an unavailable symbol and 945gm fails with
+- * its value of TOLUD.
++ * is unreliable, so compute the base by subtracting the stolen memory
++ * from the Top of Low Usable DRAM which is where the BIOS places
++ * the graphics stolen memory.
+ */
+- base = 0;
+- if (INTEL_INFO(dev)->gen >= 6) {
+- /* Read Base Data of Stolen Memory Register (BDSM) directly.
+- * Note that there is also a MCHBAR miror at 0x1080c0 or
+- * we could use device 2:0x5c instead.
+- */
+- pci_read_config_dword(pdev, 0xB0, &base);
+- base &= ~4095; /* lower bits used for locking register */
+- } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- /* Read Graphics Base of Stolen Memory directly */
++ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+-#if 0
+- } else if (IS_GEN3(dev)) {
++ } else {
++ /* XXX presume 8xx is the same as i915 */
++ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
++ }
++#else
++ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ u16 val;
++ pci_read_config_word(pdev, 0xb0, &val);
++ base = val >> 4 << 20;
++ } else {
+ u8 val;
+- /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+- base -= dev_priv->mm.gtt->stolen_size;
+- } else {
+- /* Stolen is immediately above Top of Memory */
+- base = max_low_pfn_mapped << PAGE_SHIFT;
+-#endif
+ }
++ base -= dev_priv->mm.gtt->stolen_size;
++#endif
+
+- return base;
++ return base + offset;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+@@ -1075,7 +1081,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_fb)
+ goto err;
+
+- cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
++ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+@@ -1088,7 +1094,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_llb)
+ goto err_fb;
+
+- ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
++ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+@@ -1107,7 +1113,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+- (long)cfb_base, (long)ll_base, size >> 20);
++ cfb_base, ll_base, size >> 20);
+ return;
+
+ err_llb:
+@@ -1181,13 +1187,6 @@ static int i915_load_gem_init(struct drm_device *dev)
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+- if (dev_priv->mm.stolen_base == 0)
+- return 0;
+-
+- DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
+- dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
+-
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 20cd295..144d37c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,7 +581,6 @@ typedef struct drm_i915_private {
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
+- unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
+diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+index 1fe98b4..9aa02be 100644
+--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+ for (i = 0; i < nr; ++i) {
+- if (DRM_COPY_FROM_USER_UNCHECKED
++ if (DRM_COPY_FROM_USER
+ (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+ DRM_ERROR("copy cliprect faulted\n");
+ return -EFAULT;
+diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
+index 65a35cf..61ab615 100644
+--- a/drivers/hwmon/abituguru.c
++++ b/drivers/hwmon/abituguru.c
+@@ -1280,14 +1280,18 @@ static int __devinit abituguru_probe(struct platform_device *pdev)
+ pr_info("found Abit uGuru\n");
+
+ /* Register sysfs hooks */
+- for (i = 0; i < sysfs_attr_i; i++)
+- if (device_create_file(&pdev->dev,
+- &data->sysfs_attr[i].dev_attr))
++ for (i = 0; i < sysfs_attr_i; i++) {
++ res = device_create_file(&pdev->dev,
++ &data->sysfs_attr[i].dev_attr);
++ if (res)
+ goto abituguru_probe_error;
+- for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
+- if (device_create_file(&pdev->dev,
+- &abituguru_sysfs_attr[i].dev_attr))
++ }
++ for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
++ res = device_create_file(&pdev->dev,
++ &abituguru_sysfs_attr[i].dev_attr);
++ if (res)
+ goto abituguru_probe_error;
++ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (!IS_ERR(data->hwmon_dev))
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index 6193349..3c2812f 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -349,7 +349,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+
+- /* Enable interrupts */
++ /* Clear and enable interrupts */
++ i2c_dw_clear_int(dev);
+ dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
+ }
+
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 0a6806f..a5dfcc0 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -322,6 +322,9 @@ static void __cache_size_refresh(void)
+ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ enum data_mode *data_mode)
+ {
++ unsigned noio_flag;
++ void *ptr;
++
+ if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
+ *data_mode = DATA_MODE_SLAB;
+ return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
+@@ -335,7 +338,28 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ }
+
+ *data_mode = DATA_MODE_VMALLOC;
+- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
++
++ /*
++ * __vmalloc allocates the data pages and auxiliary structures with
++ * gfp_flags that were specified, but pagetables are always allocated
++ * with GFP_KERNEL, no matter what was specified as gfp_mask.
++ *
++ * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
++ * all allocations done by this process (including pagetables) are done
++ * as if GFP_NOIO was specified.
++ */
++
++ if (gfp_mask & __GFP_NORETRY) {
++ noio_flag = current->flags & PF_MEMALLOC;
++ current->flags |= PF_MEMALLOC;
++ }
++
++ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
++
++ if (gfp_mask & __GFP_NORETRY)
++ current->flags = (current->flags & ~PF_MEMALLOC) | noio_flag;
++
++ return ptr;
+ }
+
+ /*
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 34ec2b5..b4aaa7b 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1117,6 +1117,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
+ if (!s->pending_pool) {
+ ti->error = "Could not allocate mempool for pending exceptions";
++ r = -ENOMEM;
+ goto bad_pending_pool;
+ }
+
+diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
+index e5180e4..5d15c6b 100644
+--- a/drivers/media/dvb/mantis/mantis_dvb.c
++++ b/drivers/media/dvb/mantis/mantis_dvb.c
+@@ -248,8 +248,10 @@ int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+ err5:
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+- dvb_unregister_frontend(mantis->fe);
+- dvb_frontend_detach(mantis->fe);
++ if (mantis->fe) {
++ dvb_unregister_frontend(mantis->fe);
++ dvb_frontend_detach(mantis->fe);
++ }
+ err4:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+
+diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
+index 8d816cc..105f820 100644
+--- a/drivers/mfd/adp5520.c
++++ b/drivers/mfd/adp5520.c
+@@ -36,6 +36,7 @@ struct adp5520_chip {
+ struct blocking_notifier_head notifier_list;
+ int irq;
+ unsigned long id;
++ uint8_t mode;
+ };
+
+ static int __adp5520_read(struct i2c_client *client,
+@@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+- adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
++ adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode);
++ /* All other bits are W1C */
++ chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY;
++ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
+ return 0;
+ }
+
+@@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+- adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
++ adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode);
+ return 0;
+ }
+ #endif
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index fb7c27f..c1aec06 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -363,13 +363,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
++ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if (card->ext_csd.rev >= 4) {
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+- card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ hc_erase_grp_sz =
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index cf444b0..90233ad 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -297,16 +297,6 @@ config MMC_ATMELMCI
+
+ endchoice
+
+-config MMC_ATMELMCI_DMA
+- bool "Atmel MCI DMA support"
+- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
+- help
+- Say Y here to have the Atmel MCI driver use a DMA engine to
+- do data transfers and thus increase the throughput and
+- reduce the CPU utilization.
+-
+- If unsure, say N.
+-
+ config MMC_IMX
+ tristate "Motorola i.MX Multimedia Card Interface support"
+ depends on ARCH_MX1
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 0932024..83790f2 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -163,6 +163,7 @@ struct atmel_mci {
+ void __iomem *regs;
+
+ struct scatterlist *sg;
++ unsigned int sg_len;
+ unsigned int pio_offset;
+
+ struct atmel_mci_slot *cur_slot;
+@@ -751,6 +752,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
+ data->error = -EINPROGRESS;
+
+ host->sg = data->sg;
++ host->sg_len = data->sg_len;
+ host->data = data;
+ host->data_chan = NULL;
+
+@@ -1573,7 +1575,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+@@ -1586,7 +1589,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 4 - remaining;
+@@ -1640,7 +1644,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
+ nbytes += 4;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+@@ -1654,7 +1659,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
+ nbytes += remaining;
+
+ host->sg = sg = sg_next(sg);
+- if (!sg) {
++ host->sg_len--;
++ if (!sg || !host->sg_len) {
+ atmci_writel(host, ATMCI_TDR, value);
+ goto done;
+ }
+@@ -2167,10 +2173,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
+ atmci_readl(host, ATMCI_SR);
+ clk_disable(host->mck);
+
+-#ifdef CONFIG_MMC_ATMELMCI_DMA
+ if (host->dma.chan)
+ dma_release_channel(host->dma.chan);
+-#endif
+
+ free_irq(platform_get_irq(pdev, 0), host);
+ iounmap(host->regs);
+diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
+index 92053e6..c15d6ce 100644
+--- a/drivers/net/ethernet/3com/3c509.c
++++ b/drivers/net/ethernet/3com/3c509.c
+@@ -309,6 +309,7 @@ static int __devinit el3_isa_match(struct device *pdev,
+ if (!dev)
+ return -ENOMEM;
+
++ SET_NETDEV_DEV(dev, pdev);
+ netdev_boot_setup_check(dev);
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
+@@ -704,6 +705,7 @@ static int __init el3_eisa_probe (struct device *device)
+ return -ENOMEM;
+ }
+
++ SET_NETDEV_DEV(dev, device);
+ netdev_boot_setup_check(dev);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index e0c5529..efc9dee 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -632,7 +632,6 @@ struct vortex_private {
+ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
+ open:1,
+ medialock:1,
+- must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+@@ -951,7 +950,7 @@ static int __devexit vortex_eisa_remove(struct device *device)
+
+ unregister_netdev(dev);
+ iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
+- release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
++ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
+
+ free_netdev(dev);
+ return 0;
+@@ -1012,6 +1011,12 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ if (rc < 0)
+ goto out;
+
++ rc = pci_request_regions(pdev, DRV_NAME);
++ if (rc < 0) {
++ pci_disable_device(pdev);
++ goto out;
++ }
++
+ unit = vortex_cards_found;
+
+ if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
+@@ -1027,6 +1032,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
+ ioaddr = pci_iomap(pdev, 0, 0);
+ if (!ioaddr) {
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ rc = -ENOMEM;
+ goto out;
+@@ -1036,6 +1042,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ ent->driver_data, unit);
+ if (rc < 0) {
+ pci_iounmap(pdev, ioaddr);
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ goto out;
+ }
+@@ -1180,11 +1187,6 @@ static int __devinit vortex_probe1(struct device *gendev,
+
+ /* PCI-only startup logic */
+ if (pdev) {
+- /* EISA resources already marked, so only PCI needs to do this here */
+- /* Ignore return value, because Cardbus drivers already allocate for us */
+- if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
+- vp->must_free_region = 1;
+-
+ /* enable bus-mastering if necessary */
+ if (vci->flags & PCI_USES_MASTER)
+ pci_set_master(pdev);
+@@ -1222,7 +1224,7 @@ static int __devinit vortex_probe1(struct device *gendev,
+ &vp->rx_ring_dma);
+ retval = -ENOMEM;
+ if (!vp->rx_ring)
+- goto free_region;
++ goto free_device;
+
+ vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+ vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+@@ -1487,9 +1489,7 @@ free_ring:
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+-free_region:
+- if (vp->must_free_region)
+- release_region(dev->base_addr, vci->io_size);
++free_device:
+ free_netdev(dev);
+ pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
+ out:
+@@ -3254,8 +3254,9 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+- if (vp->must_free_region)
+- release_region(dev->base_addr, vp->io_size);
++
++ pci_release_regions(pdev);
++
+ free_netdev(dev);
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c6b9903..ec13a59 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -2752,6 +2752,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+ static int tg3_setup_phy(struct tg3 *, int);
+ static int tg3_halt_cpu(struct tg3 *, u32);
+
++static bool tg3_phy_power_bug(struct tg3 *tp)
++{
++ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
++ case ASIC_REV_5700:
++ case ASIC_REV_5704:
++ return true;
++ case ASIC_REV_5780:
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ return true;
++ return false;
++ case ASIC_REV_5717:
++ if (!tp->pci_fn)
++ return true;
++ return false;
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ !tp->pci_fn)
++ return true;
++ return false;
++ }
++
++ return false;
++}
++
+ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ {
+ u32 val;
+@@ -2808,12 +2833,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ /* The PHY should not be powered down on some chips because
+ * of bugs.
+ */
+- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
+- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+- !tp->pci_fn))
++ if (tg3_phy_power_bug(tp))
+ return;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 021463b..57e2da0 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1325,7 +1325,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
+ static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+ {
+- int rc, i;
++ int rc, i, mac_len;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+@@ -1335,11 +1335,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+- NULL);
++ &mac_len);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
+ }
++ /* Workaround for old/broken pHyp */
++ if (mac_len == 8)
++ mac_addr_p += 2;
++ else if (mac_len != 6) {
++ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
++ mac_len);
++ return -EINVAL;
++ }
+
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE, NULL);
+@@ -1364,17 +1372,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+- /*
+- * Some older boxes running PHYP non-natively have an OF that returns
+- * a 8-byte local-mac-address field (and the first 2 bytes have to be
+- * ignored) while newer boxes' OF return a 6-byte field. Note that
+- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+- * The RPA doc specifies that the first byte must be 10b, so we'll
+- * just look for it to solve this 8 vs. 6 byte field issue
+- */
+- if ((*mac_addr_p & 0x3) != 0x02)
+- mac_addr_p += 2;
+-
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 544ac06..301b39e 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -204,7 +204,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ }
+
+ if (port->passthru)
+- vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
++ vlan = list_first_or_null_rcu(&port->vlans,
++ struct macvlan_dev, list);
+ else
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (vlan == NULL)
+@@ -725,7 +726,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ if (err < 0)
+ goto destroy_port;
+
+- list_add_tail(&vlan->list, &port->vlans);
++ list_add_tail_rcu(&vlan->list, &port->vlans);
+ netif_stacked_transfer_operstate(lowerdev, dev);
+
+ return 0;
+@@ -751,7 +752,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+- list_del(&vlan->list);
++ list_del_rcu(&vlan->list);
+ unregister_netdevice_queue(dev, head);
+ }
+ EXPORT_SYMBOL_GPL(macvlan_dellink);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 95437fc..df3e27c 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1793,6 +1793,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ struct ieee80211_key_conf ps_key = { };
++ int key;
+
+ ath_node_attach(sc, sta);
+
+@@ -1800,7 +1801,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
+ vif->type != NL80211_IFTYPE_AP_VLAN)
+ return 0;
+
+- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
++ key = ath_key_config(common, vif, sta, &ps_key);
++ if (key > 0)
++ an->ps_key = key;
+
+ return 0;
+ }
+@@ -1817,6 +1820,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
+ return;
+
+ ath_key_delete(common, &ps_key);
++ an->ps_key = 0;
+ }
+
+ static int ath9k_sta_remove(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 12975ad..ca70267 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1719,6 +1719,25 @@ drop_recycle_buffer:
+ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
+ }
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
++{
++ int current_slot, previous_slot;
++
++ B43_WARN_ON(ring->tx);
++
++ /* Device has filled all buffers, drop all packets and let TCP
++ * decrease speed.
++ * Decrement RX index by one will let the device to see all slots
++ * as free again
++ */
++ /*
++ *TODO: How to increase rx_drop in mac80211?
++ */
++ current_slot = ring->ops->get_current_rxslot(ring);
++ previous_slot = prev_slot(ring, current_slot);
++ ring->ops->set_current_rxslot(ring, previous_slot);
++}
++
+ void b43_dma_rx(struct b43_dmaring *ring)
+ {
+ const struct b43_dma_ops *ops = ring->ops;
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 9fdd198..df8c8cd 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -9,7 +9,7 @@
+ /* DMA-Interrupt reasons. */
+ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
+ | (1 << 14) | (1 << 15))
+-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
++#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
+ #define B43_DMAIRQ_RX_DONE (1 << 16)
+
+ /*** 32-bit DMA Engine. ***/
+@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
+ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ const struct b43_txstatus *status);
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
++
+ void b43_dma_rx(struct b43_dmaring *ring);
+
+ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 680709c..c0f2041 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -1901,30 +1901,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ }
+ }
+
+- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
+- B43_DMAIRQ_NONFATALMASK))) {
+- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
+- b43err(dev->wl, "Fatal DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- b43err(dev->wl, "This device does not support DMA "
++ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
++ b43err(dev->wl,
++ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
++ dma_reason[0], dma_reason[1],
++ dma_reason[2], dma_reason[3],
++ dma_reason[4], dma_reason[5]);
++ b43err(dev->wl, "This device does not support DMA "
+ "on your system. It will now be switched to PIO.\n");
+- /* Fall back to PIO transfers if we get fatal DMA errors! */
+- dev->use_pio = 1;
+- b43_controller_restart(dev, "DMA error");
+- return;
+- }
+- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
+- b43err(dev->wl, "DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- }
++ /* Fall back to PIO transfers if we get fatal DMA errors! */
++ dev->use_pio = true;
++ b43_controller_restart(dev, "DMA error");
++ return;
+ }
+
+ if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
+@@ -1943,6 +1931,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ handle_irq_noise(dev);
+
+ /* Check the DMA reason registers for received data. */
++ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
++ if (B43_DEBUG)
++ b43warn(dev->wl, "RX descriptor underrun\n");
++ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
++ }
+ if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
+ if (b43_using_pio_transfers(dev))
+ b43_pio_rx(dev->pio.rx_queue);
+@@ -2000,7 +1993,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
+ return IRQ_NONE;
+
+ dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
+- & 0x0001DC00;
++ & 0x0001FC00;
+ dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
+ & 0x0000DC00;
+ dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
+@@ -3103,7 +3096,7 @@ static int b43_chip_init(struct b43_wldev *dev)
+ b43_write32(dev, 0x018C, 0x02000000);
+ }
+ b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
+- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
++ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
+ b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 727c129..45ac407 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1281,9 +1281,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+ if (dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(dev);
+
+- if (dev->reg_state == NETREG_UNREGISTERED)
+- free_netdev(dev);
+-
+ /* Clear the priv in adapter */
+ priv->netdev = NULL;
+
+diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
+index 5a25dd2..90ffc76 100644
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -1083,6 +1083,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
+ adapter->if_ops.wakeup(adapter);
+ adapter->hs_activated = false;
+ adapter->is_hs_configured = false;
++ adapter->is_suspended = false;
+ mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY), false);
+ }
+diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
+index 67e6db7..5baa12a 100644
+--- a/drivers/net/wireless/mwifiex/main.c
++++ b/drivers/net/wireless/mwifiex/main.c
+@@ -581,6 +581,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
+ struct net_device *dev)
+ {
+ dev->netdev_ops = &mwifiex_netdev_ops;
++ dev->destructor = free_netdev;
+ /* Initialize private structure */
+ priv->current_key_index = 0;
+ priv->media_connected = false;
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index 56e1c4a..5c3c62d 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -105,7 +105,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
+ } else {
+ /* Multicast */
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
+- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
++ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
+ dev_dbg(priv->adapter->dev,
+ "info: Enabling All Multicast!\n");
+ priv->curr_pkt_filter |=
+@@ -117,20 +117,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
+ dev_dbg(priv->adapter->dev,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
+- /* Set multicast addresses to firmware */
+- if (old_pkt_filter == priv->curr_pkt_filter) {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- } else {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- }
++ /* Send multicast addresses to firmware */
++ ret = mwifiex_send_cmd_async(priv,
++ HostCmd_CMD_MAC_MULTICAST_ADR,
++ HostCmd_ACT_GEN_SET, 0,
++ mcast_list);
+ }
+ }
+ }
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index 22b2dfa..fdacfce 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
+
+ static int lis3lv02d_resume(struct acpi_device *device)
+ {
+- return lis3lv02d_poweron(&lis3_dev);
++ lis3lv02d_poweron(&lis3_dev);
++ return 0;
+ }
+ #else
+ #define lis3lv02d_suspend NULL
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 33471e1..23ef16c 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -475,6 +475,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ u32 intval;
+ u32 ch_inte;
+
++ /* For MSI mode disable all device-level interrupts */
++ if (priv->flags & TSI721_USING_MSI)
++ iowrite32(0, priv->regs + TSI721_DEV_INTE);
++
+ dev_int = ioread32(priv->regs + TSI721_DEV_INT);
+ if (!dev_int)
+ return IRQ_NONE;
+@@ -548,6 +552,13 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ tsi721_pw_handler(mport);
+ }
+
++ /* For MSI mode re-enable device-level interrupts */
++ if (priv->flags & TSI721_USING_MSI) {
++ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
++ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
++ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
+index 2ee3bbf..62e1b2c 100644
+--- a/drivers/rtc/rtc-pcf2123.c
++++ b/drivers/rtc/rtc-pcf2123.c
+@@ -264,6 +264,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
+
+ if (!(rxbuf[0] & 0x20)) {
+ dev_err(&spi->dev, "chip not found\n");
++ ret = -ENODEV;
+ goto kfree_exit;
+ }
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index a023f52..fe4dbf3 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -143,6 +143,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ }
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
++ if (rc == 0)
++ /* Evade comedi_auto_unconfig(). */
++ dev_file_info->hardware_device = NULL;
+ goto done;
+ }
+
+diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
+index 51b5adf..df8ea25 100644
+--- a/drivers/staging/vt6656/hostap.c
++++ b/drivers/staging/vt6656/hostap.c
+@@ -153,7 +153,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ pDevice->dev->name, pDevice->apdev->name);
+ }
+- kfree(pDevice->apdev);
++ free_netdev(pDevice->apdev);
+ pDevice->apdev = NULL;
+ pDevice->bEnable8021x = FALSE;
+ pDevice->bEnableHostWEP = FALSE;
+diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
+index 101b1be..d86791e 100644
+--- a/drivers/target/iscsi/iscsi_target_erl1.c
++++ b/drivers/target/iscsi/iscsi_target_erl1.c
+@@ -824,7 +824,7 @@ static int iscsit_attach_ooo_cmdsn(
+ /*
+ * CmdSN is greater than the tail of the list.
+ */
+- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
++ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+@@ -834,11 +834,12 @@ static int iscsit_attach_ooo_cmdsn(
+ */
+ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+ ooo_list) {
+- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
++ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
+ continue;
+
++ /* Insert before this entry */
+ list_add(&ooo_cmdsn->ooo_list,
+- &ooo_tmp->ooo_list);
++ ooo_tmp->ooo_list.prev);
+ break;
+ }
+ }
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 8481aae..0f8a785 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1530,6 +1530,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ tty->real_raw = 0;
+ }
+ n_tty_set_room(tty);
++ /*
++ * Fix tty hang when I_IXON(tty) is cleared, but the tty
++ * been stopped by STOP_CHAR(tty) before it.
++ */
++ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
++ start_tty(tty);
++ }
++
+ /* The termios change make the tty ready for I/O */
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index a845f8b..9497171 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
+ {
+ int ret, len;
+ __le32 *buf;
+- int offb, offd;
++ int offb;
++ unsigned int offd;
+ const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
+ int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 0aaa4f1..2fbcb75 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Edirol SD-20 */
+ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Alcor Micro Corp. Hub */
++ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 045cde4..850723f 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -221,7 +221,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ /* auto-stop if nothing connected for 1 second */
+ if (any_ports_active(uhci))
+ uhci->rh_state = UHCI_RH_RUNNING;
+- else if (time_after_eq(jiffies, uhci->auto_stop_time))
++ else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
++ !uhci->wait_for_hp)
+ suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
+ break;
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index ee5ec11..430c1d5 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1353,15 +1353,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
+
+ /* Set the max packet size and max burst */
++ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
++ max_burst = 0;
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+- max_packet = usb_endpoint_maxp(&ep->desc);
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ /* dig out max burst from ep companion desc */
+- max_packet = ep->ss_ep_comp.bMaxBurst;
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
++ max_burst = ep->ss_ep_comp.bMaxBurst;
+ break;
+ case USB_SPEED_HIGH:
++ /* Some devices get this wrong */
++ if (usb_endpoint_xfer_bulk(&ep->desc))
++ max_packet = 512;
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+@@ -1369,17 +1371,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (usb_endpoint_maxp(&ep->desc)
+ & 0x1800) >> 11;
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
+ }
+- /* Fall through */
++ break;
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ break;
+ default:
+ BUG();
+ }
++ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
++ MAX_BURST(max_burst));
+ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 37b2a89..d08a804 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2376,14 +2376,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * TD list.
+ */
+ if (list_empty(&ep_ring->td_list)) {
+- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
+- "with no TDs queued?\n",
+- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+- ep_index);
+- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+- (le32_to_cpu(event->flags) &
+- TRB_TYPE_BITMASK)>>10);
+- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
++ /*
++ * A stopped endpoint may generate an extra completion
++ * event if the device was suspended. Don't print
++ * warnings.
++ */
++ if (!(trb_comp_code == COMP_STOP ||
++ trb_comp_code == COMP_STOP_INVAL)) {
++ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
++ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
++ ep_index);
++ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
++ (le32_to_cpu(event->flags) &
++ TRB_TYPE_BITMASK)>>10);
++ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
++ }
+ if (ep->skip) {
+ ep->skip = false;
+ xhci_dbg(xhci, "td_list is empty while skip "
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 51d1712..918ec98 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -197,6 +197,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 2f86008..5d25e26 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -772,6 +772,8 @@
+ */
+ #define NEWPORT_VID 0x104D
+ #define NEWPORT_AGILIS_PID 0x3000
++#define NEWPORT_CONEX_CC_PID 0x3002
++#define NEWPORT_CONEX_AGP_PID 0x3006
+
+ /* Interbiometrics USB I/O Board */
+ /* Developed for Interbiometrics by Rudolf Gugler */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8513f51..59c4997 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
+
+ #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
+ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
++#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
+
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+@@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_EU3_E 0x0051
+ #define CINTERION_PRODUCT_EU3_P 0x0052
+ #define CINTERION_PRODUCT_PH8 0x0053
+-#define CINTERION_PRODUCT_AH6 0x0055
+-#define CINTERION_PRODUCT_PLS8 0x0060
++#define CINTERION_PRODUCT_AHXX 0x0055
++#define CINTERION_PRODUCT_PLXX 0x0060
+
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+@@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+@@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 450f529..2c69d12 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
+ /* This is an autofs submount, we can't expire it */
+ if (autofs_type_indirect(sbi->type))
+ goto done;
+-
+- /*
+- * Otherwise it's an offset mount and we need to check
+- * if we can umount its mount, if there is one.
+- */
+- if (!d_mountpoint(path.dentry)) {
+- status = 0;
+- goto done;
+- }
+ }
+
+ /* Update the expiry counter if fs is busy */
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index c04f02c..618ae6f 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1571,7 +1571,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
+ item_off = btrfs_item_ptr_offset(leaf, i);
+ item_len = btrfs_item_size_nr(leaf, i);
+
+- if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
++ btrfs_item_key_to_cpu(leaf, key, i);
++ if (!key_in_sk(key, sk))
++ continue;
++
++ if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+ item_len = 0;
+
+ if (sizeof(sh) + item_len + *sk_offset >
+@@ -1580,10 +1584,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
+ goto overflow;
+ }
+
+- btrfs_item_key_to_cpu(leaf, key, i);
+- if (!key_in_sk(key, sk))
+- continue;
+-
+ sh.objectid = key->objectid;
+ sh.offset = key->offset;
+ sh.type = key->type;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index e851d5b..20431b4 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -173,7 +173,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+
+ if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
+ inode->i_flags |= S_AUTOMOUNT;
+- cifs_set_ops(inode);
++ if (inode->i_state & I_NEW)
++ cifs_set_ops(inode);
+ }
+
+ void
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7b18563..9243103 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2027,7 +2027,11 @@ repeat:
+ group = ac->ac_g_ex.fe_group;
+
+ for (i = 0; i < ngroups; group++, i++) {
+- if (group == ngroups)
++ /*
++ * Artificially restricted ngroups for non-extent
++ * files makes group > ngroups possible on first loop.
++ */
++ if (group >= ngroups)
+ group = 0;
+
+ /* This now checks without needing the buddy page */
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 808cac7..fc33ca1 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -1238,6 +1238,19 @@ static int fat_read_root(struct inode *inode)
+ return 0;
+ }
+
++static unsigned long calc_fat_clusters(struct super_block *sb)
++{
++ struct msdos_sb_info *sbi = MSDOS_SB(sb);
++
++ /* Divide first to avoid overflow */
++ if (sbi->fat_bits != 12) {
++ unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
++ return ent_per_sec * sbi->fat_length;
++ }
++
++ return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
++}
++
+ /*
+ * Read the super block of an MS-DOS FS.
+ */
+@@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+ sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
+
+ /* check that FAT table does not overflow */
+- fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
++ fat_clusters = calc_fat_clusters(sb);
+ total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
+ if (total_clusters > MAX_FAT(sb)) {
+ if (!silent)
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 08921b8..e065497 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -267,6 +267,7 @@ static __be32
+ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+ {
+ __be32 status;
++ int accmode = 0;
+
+ /* Only reclaims from previously confirmed clients are valid */
+ if ((status = nfs4_check_open_reclaim(&open->op_clientid)))
+@@ -284,9 +285,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
+
+ open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
+ (open->op_iattr.ia_size == 0);
++ /*
++ * In the delegation case, the client is telling us about an
++ * open that it *already* performed locally, some time ago. We
++ * should let it succeed now if possible.
++ *
++ * In the case of a CLAIM_FH open, on the other hand, the client
++ * may be counting on us to enforce permissions (the Linux 4.1
++ * client uses this for normal opens, for example).
++ */
++ if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
++ accmode = NFSD_MAY_OWNER_OVERRIDE;
+
+- status = do_open_permission(rqstp, current_fh, open,
+- NFSD_MAY_OWNER_OVERRIDE);
++ status = do_open_permission(rqstp, current_fh, open, accmode);
+
+ return status;
+ }
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index b50ffb7..edeb239 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -195,13 +195,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
+
+ static int nilfs_set_page_dirty(struct page *page)
+ {
+- int ret = __set_page_dirty_buffers(page);
++ int ret = __set_page_dirty_nobuffers(page);
+
+- if (ret) {
++ if (page_has_buffers(page)) {
+ struct inode *inode = page->mapping->host;
+- unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
++ unsigned nr_dirty = 0;
++ struct buffer_head *bh, *head;
+
+- nilfs_set_file_dirty(inode, nr_dirty);
++ /*
++ * This page is locked by callers, and no other thread
++ * concurrently marks its buffers dirty since they are
++ * only dirtied through routines in fs/buffer.c in
++ * which call sites of mark_buffer_dirty are protected
++ * by page lock.
++ */
++ bh = head = page_buffers(page);
++ do {
++ /* Do not mark hole blocks dirty */
++ if (buffer_dirty(bh) || !buffer_mapped(bh))
++ continue;
++
++ set_buffer_dirty(bh);
++ nr_dirty++;
++ } while (bh = bh->b_this_page, bh != head);
++
++ if (nr_dirty)
++ nilfs_set_file_dirty(inode, nr_dirty);
+ }
+ return ret;
+ }
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index 2f5b92e..7eb1c0c 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -791,7 +791,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ &hole_size, &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+- goto out;
++ goto out_unlock;
+ }
+
+ if (rec.e_blkno == 0ULL) {
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 8469f3f..88c953d 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -204,6 +204,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type);
++typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+
+ /*
+ * EFI Configuration Table and GUID definitions
+@@ -331,6 +332,14 @@ extern void efi_map_pal_code (void);
+ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+ extern void efi_gettimeofday (struct timespec *ts);
+ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
++#ifdef CONFIG_X86
++extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
++#else
++static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
++{
++ return EFI_SUCCESS;
++}
++#endif
+ extern u64 efi_get_iobase (void);
+ extern u32 efi_mem_type (unsigned long phys_addr);
+ extern u64 efi_mem_attributes (unsigned long phys_addr);
+@@ -475,7 +484,7 @@ struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+- efi_query_variable_info_t *query_variable_info;
++ efi_query_variable_store_t *query_variable_store;
+ };
+
+ struct efivars {
+diff --git a/include/linux/if_cablemodem.h b/include/linux/if_cablemodem.h
+index 9ca1007..ee6b3c4 100644
+--- a/include/linux/if_cablemodem.h
++++ b/include/linux/if_cablemodem.h
+@@ -12,11 +12,11 @@
+ */
+
+ /* some useful defines for sb1000.c e cmconfig.c - fv */
+-#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
+-#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
+-#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
+-#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
+-#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
+-#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
++#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
++#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
++#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
++#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
++#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
++#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
+
+ #endif
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index d079290..6f95e24 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -242,6 +242,23 @@ static inline void list_splice_init_rcu(struct list_head *list,
+ list_entry_rcu((ptr)->next, type, member)
+
+ /**
++ * list_first_or_null_rcu - get the first element from a list
++ * @ptr: the list head to take the element from.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the list_struct within the struct.
++ *
++ * Note that if the list is empty, it returns NULL.
++ *
++ * This primitive may safely run concurrently with the _rcu list-mutation
++ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
++ */
++#define list_first_or_null_rcu(ptr, type, member) \
++ ({struct list_head *__ptr = (ptr); \
++ struct list_head __rcu *__next = list_next_rcu(__ptr); \
++ likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
++ })
++
++/**
+ * list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
+index bdf4b00..82e12ad 100644
+--- a/include/linux/virtio_console.h
++++ b/include/linux/virtio_console.h
+@@ -39,7 +39,7 @@
+ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+ #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
+
+-#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
++#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
+
+ struct virtio_console_config {
+ /* colums of the screens */
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 3efc9f3..bea7ad5 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -233,6 +233,8 @@ do { \
+ if (!ret) \
+ break; \
+ } \
++ if (!ret && (condition)) \
++ ret = 1; \
+ finish_wait(&wq, &__wait); \
+ } while (0)
+
+@@ -249,8 +251,9 @@ do { \
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+- * The function returns 0 if the @timeout elapsed, and the remaining
+- * jiffies if the condition evaluated to true before the timeout elapsed.
++ * The function returns 0 if the @timeout elapsed, or the remaining
++ * jiffies (at least 1) if the @condition evaluated to %true before
++ * the @timeout elapsed.
+ */
+ #define wait_event_timeout(wq, condition, timeout) \
+ ({ \
+@@ -318,6 +321,8 @@ do { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
++ if (!ret && (condition)) \
++ ret = 1; \
+ finish_wait(&wq, &__wait); \
+ } while (0)
+
+@@ -334,9 +339,10 @@ do { \
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+- * was interrupted by a signal, and the remaining jiffies otherwise
+- * if the condition evaluated to true before the timeout elapsed.
++ * Returns:
++ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
++ * a signal, or the remaining jiffies (at least 1) if the @condition
++ * evaluated to %true before the @timeout elapsed.
+ */
+ #define wait_event_interruptible_timeout(wq, condition, timeout) \
+ ({ \
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ddf523c..e6454b6 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -734,6 +734,18 @@ struct inet_hashinfo;
+ struct raw_hashinfo;
+ struct module;
+
++/*
++ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
++ * un-modified. Special care is taken when initializing object to zero.
++ */
++static inline void sk_prot_clear_nulls(struct sock *sk, int size)
++{
++ if (offsetof(struct sock, sk_node.next) != 0)
++ memset(sk, 0, offsetof(struct sock, sk_node.next));
++ memset(&sk->sk_node.pprev, 0,
++ size - offsetof(struct sock, sk_node.pprev));
++}
++
+ /* Networking protocol blocks we attach to sockets.
+ * socket layer -> transport layer interface
+ * transport -> network interface is defined by struct inet_proto
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0768715..fe46019 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -931,6 +931,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ return 0;
+
++ skb_dst_force(skb);
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index d6fe08a..a16dac1 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -467,6 +467,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ int retval = 0;
+
+ helper_lock();
++ if (!sub_info->path) {
++ retval = -EINVAL;
++ goto out;
++ }
++
+ if (sub_info->path[0] == '\0')
+ goto out;
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d08c9f4..d93369a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6672,16 +6672,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+ *tablep = NULL;
+ }
+
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
+ static void
+ set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+- mode_t mode, proc_handler *proc_handler)
++ mode_t mode, proc_handler *proc_handler,
++ bool load_idx)
+ {
+ entry->procname = procname;
+ entry->data = data;
+ entry->maxlen = maxlen;
+ entry->mode = mode;
+ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
+ }
+
+ static struct ctl_table *
+@@ -6693,30 +6702,30 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ return NULL;
+
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[10], "flags", &sd->flags,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[11], "name", sd->name,
+- CORENAME_MAX_SIZE, 0444, proc_dostring);
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+ /* &table[12] is terminator */
+
+ return table;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 793548c..e9a45f1 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -833,7 +833,7 @@ void tick_cancel_sched_timer(int cpu)
+ hrtimer_cancel(&ts->sched_timer);
+ # endif
+
+- ts->nohz_mode = NOHZ_MODE_INACTIVE;
++ memset(ts, 0, sizeof(*ts));
+ }
+ #endif
+
+diff --git a/kernel/timer.c b/kernel/timer.c
+index c219db6..f2f71d7 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1630,12 +1630,12 @@ static int __cpuinit init_timers_cpu(int cpu)
+ boot_done = 1;
+ base = &boot_tvec_bases;
+ }
++ spin_lock_init(&base->lock);
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
+ }
+
+- spin_lock_init(&base->lock);
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 95dc31e..b0996c1 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -769,7 +769,11 @@ static int filter_set_pred(struct event_filter *filter,
+
+ static void __free_preds(struct event_filter *filter)
+ {
++ int i;
++
+ if (filter->preds) {
++ for (i = 0; i < filter->n_preds; i++)
++ kfree(filter->preds[i].ops);
+ kfree(filter->preds);
+ filter->preds = NULL;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 470cbb4..d80ac4b 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1937,7 +1937,12 @@ static void collapse_huge_page(struct mm_struct *mm,
+ pte_unmap(pte);
+ spin_lock(&mm->page_table_lock);
+ BUG_ON(!pmd_none(*pmd));
+- set_pmd_at(mm, address, pmd, _pmd);
++ /*
++ * We can only use set_pmd_at when establishing
++ * hugepmds and never for establishing regular pmds that
++ * points to regular pagetables. Use pmd_populate for that
++ */
++ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+ spin_unlock(&mm->page_table_lock);
+ anon_vma_unlock(vma->anon_vma);
+ goto out;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 180d97f..e1052d1 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -147,7 +147,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ if (PageHuge(new))
+ pte = pte_mkhuge(pte);
+ #endif
+- flush_cache_page(vma, addr, pte_pfn(pte));
++ flush_dcache_page(new);
+ set_pte_at(mm, addr, ptep, pte);
+
+ if (PageHuge(new)) {
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 8d1ca2d..a160ec8 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -37,51 +37,48 @@ static struct srcu_struct srcu;
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
++ struct hlist_node *n;
+ int id;
+
+ /*
+- * srcu_read_lock() here will block synchronize_srcu() in
+- * mmu_notifier_unregister() until all registered
+- * ->release() callouts this function makes have
+- * returned.
++ * SRCU here will block mmu_notifier_unregister until
++ * ->release returns.
+ */
+ id = srcu_read_lock(&srcu);
++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
++ /*
++ * If ->release runs before mmu_notifier_unregister it must be
++ * handled, as it's the only way for the driver to flush all
++ * existing sptes and stop the driver from establishing any more
++ * sptes before all the pages in the mm are freed.
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++ srcu_read_unlock(&srcu, id);
++
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
+-
+ /*
+- * Unlink. This will prevent mmu_notifier_unregister()
+- * from also making the ->release() callout.
++ * We arrived before mmu_notifier_unregister so
++ * mmu_notifier_unregister will do nothing other than to wait
++ * for ->release to finish and for mmu_notifier_unregister to
++ * return.
+ */
+ hlist_del_init_rcu(&mn->hlist);
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+-
+- /*
+- * Clear sptes. (see 'release' description in mmu_notifier.h)
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+-
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * All callouts to ->release() which we have done are complete.
+- * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
+- */
+- srcu_read_unlock(&srcu, id);
+-
+- /*
+- * mmu_notifier_unregister() may have unlinked a notifier and may
+- * still be calling out to it. Additionally, other notifiers
+- * may have been active via vmtruncate() et. al. Block here
+- * to ensure that all notifier callouts for this mm have been
+- * completed and the sptes are really cleaned up before returning
+- * to exit_mmap().
++ * synchronize_srcu here prevents mmu_notifier_release from returning to
++ * exit_mmap (which would proceed with freeing all pages in the mm)
++ * until the ->release method returns, if it was invoked by
++ * mmu_notifier_unregister.
++ *
++ * The mmu_notifier_mm can't go away from under us because one mm_count
++ * is held by exit_mmap.
+ */
+ synchronize_srcu(&srcu);
+ }
+@@ -302,31 +299,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
++ /*
++ * SRCU here will force exit_mmap to wait for ->release to
++ * finish before freeing the pages.
++ */
+ int id;
+
++ id = srcu_read_lock(&srcu);
+ /*
+- * Ensure we synchronize up with __mmu_notifier_release().
++ * exit_mmap will block in mmu_notifier_release to guarantee
++ * that ->release is called before freeing the pages.
+ */
+- id = srcu_read_lock(&srcu);
+-
+- hlist_del_rcu(&mn->hlist);
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+-
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
++ srcu_read_unlock(&srcu, id);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ /*
+- * Allow __mmu_notifier_release() to complete.
++ * Can not use list_del_rcu() since __mmu_notifier_release
++ * can delete it before we hold the lock.
+ */
+- srcu_read_unlock(&srcu, id);
+- } else
++ hlist_del_init_rcu(&mn->hlist);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
++ }
+
+ /*
+- * Wait for any running method to finish, including ->release() if it
+- * was run by __mmu_notifier_release() instead of us.
++ * Wait for any running method to finish, of course including
++ * ->release if it was run by mmu_notifier_relase instead of us.
+ */
+ synchronize_srcu(&srcu);
+
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index aa9701e..1090e77 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
+ return 0;
+ }
+
+-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
+-{
+- struct vm_area_struct *vma;
+-
+- /* We don't need vma lookup at all. */
+- if (!walk->hugetlb_entry)
+- return NULL;
+-
+- VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+- vma = find_vma(walk->mm, addr);
+- if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
+- return vma;
+-
+- return NULL;
+-}
+-
+ #else /* CONFIG_HUGETLB_PAGE */
+-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
+-{
+- return NULL;
+-}
+-
+ static int walk_hugetlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+@@ -199,30 +178,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
+ if (!walk->mm)
+ return -EINVAL;
+
++ VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
++
+ pgd = pgd_offset(walk->mm, addr);
+ do {
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma = NULL;
+
+ next = pgd_addr_end(addr, end);
+
+ /*
+- * handle hugetlb vma individually because pagetable walk for
+- * the hugetlb page is dependent on the architecture and
+- * we can't handled it in the same manner as non-huge pages.
++ * This function was not intended to be vma based.
++ * But there are vma special cases to be handled:
++ * - hugetlb vma's
++ * - VM_PFNMAP vma's
+ */
+- vma = hugetlb_vma(addr, walk);
++ vma = find_vma(walk->mm, addr);
+ if (vma) {
+- if (vma->vm_end < next)
++ /*
++ * There are no page structures backing a VM_PFNMAP
++ * range, so do not allow split_huge_page_pmd().
++ */
++ if ((vma->vm_start <= addr) &&
++ (vma->vm_flags & VM_PFNMAP)) {
+ next = vma->vm_end;
++ pgd = pgd_offset(walk->mm, next);
++ continue;
++ }
+ /*
+- * Hugepage is very tightly coupled with vma, so
+- * walk through hugetlb entries within a given vma.
++ * Handle hugetlb vma individually because pagetable
++ * walk for the hugetlb page is dependent on the
++ * architecture and we can't handled it in the same
++ * manner as non-huge pages.
+ */
+- err = walk_hugetlb_range(vma, addr, next, walk);
+- if (err)
+- break;
+- pgd = pgd_offset(walk->mm, next);
+- continue;
++ if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
++ is_vm_hugetlb_page(vma)) {
++ if (vma->vm_end < next)
++ next = vma->vm_end;
++ /*
++ * Hugepage is very tightly coupled with vma,
++ * so walk through hugetlb entries within a
++ * given vma.
++ */
++ err = walk_hugetlb_range(vma, addr, next, walk);
++ if (err)
++ break;
++ pgd = pgd_offset(walk->mm, next);
++ continue;
++ }
+ }
+
+ if (pgd_none_or_clear_bad(pgd)) {
+diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
+index 58de2a0..c83ee79 100644
+--- a/net/bridge/br_stp_timer.c
++++ b/net/bridge/br_stp_timer.c
+@@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
+
+ br_debug(br, "tcn timer expired\n");
+ spin_lock(&br->lock);
+- if (br->dev->flags & IFF_UP) {
++ if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
+ br_transmit_tcn(br);
+
+ mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 2c73adf..8a2c2dd 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1021,18 +1021,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
+ #endif
+ }
+
+-/*
+- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+- * un-modified. Special care is taken when initializing object to zero.
+- */
+-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+-{
+- if (offsetof(struct sock, sk_node.next) != 0)
+- memset(sk, 0, offsetof(struct sock, sk_node.next));
+- memset(&sk->sk_node.pprev, 0,
+- size - offsetof(struct sock, sk_node.pprev));
+-}
+-
+ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
+ {
+ unsigned long nulls1, nulls2;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index db10805..c69358c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -2196,6 +2196,17 @@ void tcp6_proc_exit(struct net *net)
+ }
+ #endif
+
++static void tcp_v6_clear_sk(struct sock *sk, int size)
++{
++ struct inet_sock *inet = inet_sk(sk);
++
++ /* we do not want to clear pinet6 field, because of RCU lookups */
++ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
++
++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
++ memset(&inet->pinet6 + 1, 0, size);
++}
++
+ struct proto tcpv6_prot = {
+ .name = "TCPv6",
+ .owner = THIS_MODULE,
+@@ -2235,6 +2246,7 @@ struct proto tcpv6_prot = {
+ .compat_setsockopt = compat_tcp_setsockopt,
+ .compat_getsockopt = compat_tcp_getsockopt,
+ #endif
++ .clear_sk = tcp_v6_clear_sk,
+ };
+
+ static const struct inet6_protocol tcpv6_protocol = {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 8c25419..20f0812 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1453,6 +1453,17 @@ void udp6_proc_exit(struct net *net) {
+ }
+ #endif /* CONFIG_PROC_FS */
+
++void udp_v6_clear_sk(struct sock *sk, int size)
++{
++ struct inet_sock *inet = inet_sk(sk);
++
++ /* we do not want to clear pinet6 field, because of RCU lookups */
++ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
++
++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
++ memset(&inet->pinet6 + 1, 0, size);
++}
++
+ /* ------------------------------------------------------------------------ */
+
+ struct proto udpv6_prot = {
+@@ -1483,7 +1494,7 @@ struct proto udpv6_prot = {
+ .compat_setsockopt = compat_udpv6_setsockopt,
+ .compat_getsockopt = compat_udpv6_getsockopt,
+ #endif
+- .clear_sk = sk_prot_clear_portaddr_nulls,
++ .clear_sk = udp_v6_clear_sk,
+ };
+
+ static struct inet_protosw udpv6_protosw = {
+diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
+index d757104..4691ed5 100644
+--- a/net/ipv6/udp_impl.h
++++ b/net/ipv6/udp_impl.h
+@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+ extern void udpv6_destroy_sock(struct sock *sk);
+
++extern void udp_v6_clear_sk(struct sock *sk, int size);
++
+ #ifdef CONFIG_PROC_FS
+ extern int udp6_seq_show(struct seq_file *seq, void *v);
+ #endif
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 1d08e21..dfcc4be 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
+ .compat_setsockopt = compat_udpv6_setsockopt,
+ .compat_getsockopt = compat_udpv6_getsockopt,
+ #endif
+- .clear_sk = sk_prot_clear_portaddr_nulls,
++ .clear_sk = udp_v6_clear_sk,
+ };
+
+ static struct inet_protosw udplite6_protosw = {
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index d879f7e..db78e7d 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -96,8 +96,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ dev_hold(dev);
+
+ xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
+- if (!xdst->u.rt6.rt6i_idev)
++ if (!xdst->u.rt6.rt6i_idev) {
++ dev_put(dev);
+ return -ENODEV;
++ }
+
+ xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+ if (rt->rt6i_peer)
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 13d607a..87ecf75 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
+ if (ret > 0)
+ break;
+ if (!ret)
+- return 0;
++ return -EINVAL;
+ dataoff += *matchoff;
+ }
+
+- /* Empty callid is useless */
+- if (!*matchlen)
+- return -EINVAL;
+-
+ /* Too large is useless */
+ if (*matchlen > IP_VS_PEDATA_MAXLEN)
+ return -EINVAL;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 835fcea..5a70215 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -813,37 +813,27 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
+
+ smp_rmb();
+
+- if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
+-
+- /* We could have just memset this but we will lose the
+- * flexibility of making the priv area sticky
+- */
+- BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
+- BLOCK_NUM_PKTS(pbd1) = 0;
+- BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+- getnstimeofday(&ts);
+- h1->ts_first_pkt.ts_sec = ts.tv_sec;
+- h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
+- pkc1->pkblk_start = (char *)pbd1;
+- pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
+- BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+- BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+- BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
+- pbd1->version = pkc1->version;
+- pkc1->prev = pkc1->nxt_offset;
+- pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
+- prb_thaw_queue(pkc1);
+- _prb_refresh_rx_retire_blk_timer(pkc1);
+-
+- smp_wmb();
+-
+- return;
+- }
++ /* We could have just memset this but we will lose the
++ * flexibility of making the priv area sticky
++ */
++ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
++ BLOCK_NUM_PKTS(pbd1) = 0;
++ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
++ getnstimeofday(&ts);
++ h1->ts_first_pkt.ts_sec = ts.tv_sec;
++ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
++ pkc1->pkblk_start = (char *)pbd1;
++ pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
++ BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
++ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
++ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
++ pbd1->version = pkc1->version;
++ pkc1->prev = pkc1->nxt_offset;
++ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
++ prb_thaw_queue(pkc1);
++ _prb_refresh_rx_retire_blk_timer(pkc1);
+
+- WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
+- pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
+- dump_stack();
+- BUG();
++ smp_wmb();
+ }
+
+ /*
+@@ -934,10 +924,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ prb_close_block(pkc, pbd, po, status);
+ return;
+ }
+-
+- WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
+- dump_stack();
+- BUG();
+ }
+
+ static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 60f8f61..57827bf 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -8,7 +8,7 @@
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+- * Copyright: Jamal Hadi Salim (2002-4)
++ * Copyright: Jamal Hadi Salim (2002-13)
+ */
+
+ #include <linux/types.h>
+@@ -299,17 +299,44 @@ static struct tc_action_ops act_ipt_ops = {
+ .walk = tcf_generic_walker
+ };
+
+-MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
++static struct tc_action_ops act_xt_ops = {
++ .kind = "xt",
++ .hinfo = &ipt_hash_info,
++ .type = TCA_ACT_IPT,
++ .capab = TCA_CAP_NONE,
++ .owner = THIS_MODULE,
++ .act = tcf_ipt,
++ .dump = tcf_ipt_dump,
++ .cleanup = tcf_ipt_cleanup,
++ .lookup = tcf_hash_search,
++ .init = tcf_ipt_init,
++ .walk = tcf_generic_walker
++};
++
++MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
+ MODULE_DESCRIPTION("Iptables target actions");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("act_xt");
+
+ static int __init ipt_init_module(void)
+ {
+- return tcf_register_action(&act_ipt_ops);
++ int ret1, ret2;
++ ret1 = tcf_register_action(&act_xt_ops);
++ if (ret1 < 0)
++ printk("Failed to load xt action\n");
++ ret2 = tcf_register_action(&act_ipt_ops);
++ if (ret2 < 0)
++ printk("Failed to load ipt action\n");
++
++ if (ret1 < 0 && ret2 < 0)
++ return ret1;
++ else
++ return 0;
+ }
+
+ static void __exit ipt_cleanup_module(void)
+ {
++ tcf_unregister_action(&act_xt_ops);
+ tcf_unregister_action(&act_ipt_ops);
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index dc6af27..206c61e 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -296,13 +296,20 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
+ /*
+ * Make an RPC task runnable.
+ *
+- * Note: If the task is ASYNC, this must be called with
+- * the spinlock held to protect the wait queue operation.
++ * Note: If the task is ASYNC, and is being made runnable after sitting on an
++ * rpc_wait_queue, this must be called with the queue spinlock held to protect
++ * the wait queue operation.
++ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
++ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
++ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
++ * the RPC_TASK_RUNNING flag.
+ */
+ static void rpc_make_runnable(struct rpc_task *task)
+ {
++ bool need_wakeup = !rpc_test_and_set_running(task);
++
+ rpc_clear_queued(task);
+- if (rpc_test_and_set_running(task))
++ if (!need_wakeup)
+ return;
+ if (RPC_IS_ASYNC(task)) {
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 4707b6c..faabaa5 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -615,6 +615,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
+ struct hda_bus_unsolicited *unsol;
+ unsigned int wp;
+
++ if (!bus || !bus->workq)
++ return 0;
++
+ trace_hda_unsol_event(bus, res, res_ex);
+ unsol = bus->unsol;
+ if (!unsol)
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 98c5774..b73f226 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2636,6 +2636,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
+ default:
+ return 0;
+ }
++ break;
+ default:
+ return 0;
+ }
+diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
+index a4ffc95..4c11605 100755
+--- a/tools/perf/scripts/python/net_dropmonitor.py
++++ b/tools/perf/scripts/python/net_dropmonitor.py
+@@ -40,9 +40,9 @@ def get_kallsyms_table():
+
+ def get_sym(sloc):
+ loc = int(sloc)
+- for i in kallsyms:
+- if (i['loc'] >= loc):
+- return (i['name'], i['loc']-loc)
++ for i in kallsyms[::-1]:
++ if loc >= i['loc']:
++ return (i['name'], loc - i['loc'])
+ return (None, 0)
+
+ def print_drop_table():
+@@ -64,7 +64,7 @@ def trace_end():
+
+ # called from perf, when it finds a correspoinding event
+ def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+- skbaddr, protocol, location):
++ skbaddr, location, protocol):
+ slocation = str(location)
+ try:
+ drop_log[slocation] = drop_log[slocation] + 1
diff --git a/3.2.54/1046_linux-3.2.47.patch b/3.2.54/1046_linux-3.2.47.patch
new file mode 100644
index 0000000..b74563c
--- /dev/null
+++ b/3.2.54/1046_linux-3.2.47.patch
@@ -0,0 +1,3314 @@
+diff --git a/Makefile b/Makefile
+index f600582..40e2a11 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index 21f56ff..5954a1a 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -123,7 +123,6 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
+ endif
+
+ ccflags-y := -fpic -fno-builtin -I$(obj)
+-asflags-y := -Wa,-march=all
+
+ # Supply kernel BSS size to the decompressor via a linker symbol.
+ KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
+diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
+index 6179d94..3115e31 100644
+--- a/arch/arm/boot/compressed/head-sa1100.S
++++ b/arch/arm/boot/compressed/head-sa1100.S
+@@ -11,6 +11,7 @@
+ #include <asm/mach-types.h>
+
+ .section ".start", "ax"
++ .arch armv4
+
+ __SA1100_start:
+
+diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
+index 089c560..92b5689 100644
+--- a/arch/arm/boot/compressed/head-shark.S
++++ b/arch/arm/boot/compressed/head-shark.S
+@@ -18,6 +18,7 @@
+
+ .section ".start", "ax"
+
++ .arch armv4
+ b __beginning
+
+ __ofw_data: .long 0 @ the number of memory blocks
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index d63632f..8c57359 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -10,6 +10,7 @@
+ */
+ #include <linux/linkage.h>
+
++ .arch armv7-a
+ /*
+ * Debugging stuff
+ *
+diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
+index 8200dea..140c817 100644
+--- a/arch/arm/kernel/topology.c
++++ b/arch/arm/kernel/topology.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/cpu.h>
+ #include <linux/cpumask.h>
++#include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/percpu.h>
+ #include <linux/node.h>
+@@ -42,6 +43,7 @@
+ #define MPIDR_LEVEL2_SHIFT 16
+
+ struct cputopo_arm cpu_topology[NR_CPUS];
++EXPORT_SYMBOL_GPL(cpu_topology);
+
+ const struct cpumask *cpu_coregroup_mask(int cpu)
+ {
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index cf9c69b..8c3baa0 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -463,7 +463,7 @@ machine_check_common:
+ STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+ STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
++ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
+ STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
+ STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
+ STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 82dcd4d..9844662 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1036,6 +1036,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ }
+
++/*
++ * This occurs when running in hypervisor mode on POWER6 or later
++ * and an illegal instruction is encountered.
++ */
++void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
++{
++ regs->msr |= REASON_ILLEGAL;
++ program_check_exception(regs);
++}
++
+ void alignment_exception(struct pt_regs *regs)
+ {
+ int sig, code, fixed = 0;
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index 7a6f3b3..f2bb9c9 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -160,7 +160,7 @@ identity_mapped:
+ xorq %rbp, %rbp
+ xorq %r8, %r8
+ xorq %r9, %r9
+- xorq %r10, %r9
++ xorq %r10, %r10
+ xorq %r11, %r11
+ xorq %r12, %r12
+ xorq %r13, %r13
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 0e47949..e19898d 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -447,6 +447,38 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
+ },
+ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Pavilion dm4",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
++ },
++ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Pavilion g6 Notebook PC",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
++ },
++ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP 1000 Notebook PC",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
++ },
++ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Pavilion m4",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index ddfc1c1..0e92326 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -151,6 +151,7 @@ enum piix_controller_ids {
+ piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
+ ich8_sata_snb,
+ ich8_2port_sata_snb,
++ ich8_2port_sata_byt,
+ };
+
+ struct piix_map_db {
+@@ -356,6 +357,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (BayTrail) */
++ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
++ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+
+ { } /* terminate list */
+ };
+@@ -521,6 +525,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
+ [tolapai_sata] = &tolapai_map_db,
+ [ich8_sata_snb] = &ich8_map_db,
+ [ich8_2port_sata_snb] = &ich8_2port_map_db,
++ [ich8_2port_sata_byt] = &ich8_2port_map_db,
+ };
+
+ static struct ata_port_info piix_port_info[] = {
+@@ -672,6 +677,15 @@ static struct ata_port_info piix_port_info[] = {
+ .port_ops = &piix_sata_ops,
+ },
+
++ [ich8_2port_sata_byt] =
++ {
++ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &piix_sata_ops,
++ },
++
+ };
+
+ static struct pci_bits piix_enable_bits[] = {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 288b635..d54b7d6 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1598,6 +1598,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
+ qc->tf = *tf;
+ if (cdb)
+ memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
++
++ /* some SATA bridges need us to indicate data xfer direction */
++ if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
++ dma_dir == DMA_FROM_DEVICE)
++ qc->tf.feature |= ATAPI_DMADIR;
++
+ qc->flags |= ATA_QCFLAG_RESULT_TF;
+ qc->dma_dir = dma_dir;
+ if (dma_dir != DMA_NONE) {
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index b0f553b..d3446f6 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -161,8 +161,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
+ static int cciss_open(struct block_device *bdev, fmode_t mode);
+ static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
+ static int cciss_release(struct gendisk *disk, fmode_t mode);
+-static int do_ioctl(struct block_device *bdev, fmode_t mode,
+- unsigned int cmd, unsigned long arg);
+ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+@@ -229,7 +227,7 @@ static const struct block_device_operations cciss_fops = {
+ .owner = THIS_MODULE,
+ .open = cciss_unlocked_open,
+ .release = cciss_release,
+- .ioctl = do_ioctl,
++ .ioctl = cciss_ioctl,
+ .getgeo = cciss_getgeo,
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = cciss_compat_ioctl,
+@@ -1140,16 +1138,6 @@ static int cciss_release(struct gendisk *disk, fmode_t mode)
+ return 0;
+ }
+
+-static int do_ioctl(struct block_device *bdev, fmode_t mode,
+- unsigned cmd, unsigned long arg)
+-{
+- int ret;
+- mutex_lock(&cciss_mutex);
+- ret = cciss_ioctl(bdev, mode, cmd, arg);
+- mutex_unlock(&cciss_mutex);
+- return ret;
+-}
+-
+ #ifdef CONFIG_COMPAT
+
+ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+@@ -1176,7 +1164,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ case CCISS_REGNEWD:
+ case CCISS_RESCANDISK:
+ case CCISS_GETLUNINFO:
+- return do_ioctl(bdev, mode, cmd, arg);
++ return cciss_ioctl(bdev, mode, cmd, arg);
+
+ case CCISS_PASSTHRU32:
+ return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
+@@ -1216,7 +1204,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ if (err)
+ return -EFAULT;
+
+- err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
++ err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
+ if (err)
+ return err;
+ err |=
+@@ -1258,7 +1246,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
+ if (err)
+ return -EFAULT;
+
+- err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
++ err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
+ if (err)
+ return err;
+ err |=
+@@ -1308,11 +1296,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
+ static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
+ {
+ cciss_coalint_struct intinfo;
++ unsigned long flags;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
+ intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user
+ (argp, &intinfo, sizeof(cciss_coalint_struct)))
+ return -EFAULT;
+@@ -1353,12 +1344,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
+ static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
+ {
+ NodeName_type NodeName;
++ unsigned long flags;
+ int i;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ for (i = 0; i < 16; i++)
+ NodeName[i] = readb(&h->cfgtable->ServerName[i]);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
+ return -EFAULT;
+ return 0;
+@@ -1395,10 +1389,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
+ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
+ {
+ Heartbeat_type heartbeat;
++ unsigned long flags;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ heartbeat = readl(&h->cfgtable->HeartBeat);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
+ return -EFAULT;
+ return 0;
+@@ -1407,10 +1404,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
+ static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
+ {
+ BusTypes_type BusTypes;
++ unsigned long flags;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ BusTypes = readl(&h->cfgtable->BusTypes);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
+ return -EFAULT;
+ return 0;
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 44a5d0a..73af885 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -981,7 +981,7 @@ EXPORT_SYMBOL(drm_vblank_off);
+ */
+ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+ {
+- /* vblank is not initialized (IRQ not installed ?) */
++ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+ /*
+@@ -1003,6 +1003,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+ {
+ unsigned long irqflags;
+
++ /* vblank is not initialized (IRQ not installed ?), or has been freed */
++ if (!dev->num_crtcs)
++ return;
++
+ if (dev->vblank_inmodeset[crtc]) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = 1;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 876bac0..2ffa740 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -740,10 +740,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+- .ident = "Hewlett-Packard HP t5740e Thin Client",
++ .ident = "Hewlett-Packard HP t5740",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
++ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+ },
+ },
+ {
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 9e24670..00ec0dd 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -1582,11 +1582,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+- intel_ddc_get_modes(connector, intel_sdvo->i2c);
+- if (list_empty(&connector->probed_modes) == false)
+- goto end;
++ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+- /* Fetch modes from VBT */
++ /*
++ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
++ * SDVO->LVDS transcoders can't cope with the EDID mode. Since
++ * drm_mode_probed_add adds the mode at the head of the list we add it
++ * last.
++ */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+@@ -1598,7 +1601,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ }
+ }
+
+-end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 0495a50..9bea4a6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3086,6 +3086,12 @@ static int evergreen_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -3218,10 +3224,6 @@ int evergreen_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 636255b..3f9705b 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1389,6 +1389,12 @@ static int cayman_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -1506,10 +1512,6 @@ int cayman_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index fad7cd1..76c1290 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -3905,6 +3905,12 @@ static int r100_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -4050,9 +4056,6 @@ int r100_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index c93bc64..441570b 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -1397,6 +1397,12 @@ static int r300_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -1521,9 +1527,6 @@ int r300_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
+index 417fab8..5b219b8 100644
+--- a/drivers/gpu/drm/radeon/r420.c
++++ b/drivers/gpu/drm/radeon/r420.c
+@@ -255,6 +255,12 @@ static int r420_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -391,10 +397,6 @@ int r420_init(struct radeon_device *rdev)
+ if (r) {
+ return r;
+ }
+- r = radeon_irq_kms_init(rdev);
+- if (r) {
+- return r;
+- }
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r) {
+diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
+index 3081d07..f36a5c9 100644
+--- a/drivers/gpu/drm/radeon/r520.c
++++ b/drivers/gpu/drm/radeon/r520.c
+@@ -188,6 +188,12 @@ static int r520_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -281,9 +287,6 @@ int r520_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index bdfa82a..3d46d7d4 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2449,6 +2449,12 @@ int r600_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -2592,10 +2598,6 @@ int r600_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index bd959c1..cd94abb 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -359,18 +359,17 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ return false;
+
+ /* first check CRTCs */
+- if (ASIC_IS_DCE41(rdev)) {
++ if (ASIC_IS_DCE4(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+- if (reg & EVERGREEN_CRTC_MASTER_EN)
+- return true;
+- } else if (ASIC_IS_DCE4(rdev)) {
+- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
++ if (rdev->num_crtc >= 4) {
++ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
++ }
++ if (rdev->num_crtc >= 6) {
++ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
++ }
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index 06b90c8..4dd9512 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -411,6 +411,12 @@ static int rs400_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -519,9 +525,6 @@ int rs400_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index ee898e9..cea482a 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -848,6 +848,12 @@ static int rs600_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -963,9 +969,6 @@ int rs600_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index a9049ed..93bce72 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -622,6 +622,12 @@ static int rs690_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -738,9 +744,6 @@ int rs690_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index d5f45b4..9103638 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -380,6 +380,12 @@ static int rv515_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -500,9 +506,6 @@ int rv515_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index cc79449..63db75d 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1092,6 +1092,12 @@ static int rv770_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -1220,10 +1226,6 @@ int rv770_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
+index 1ad0a88..8178927 100644
+--- a/drivers/hwmon/adm1021.c
++++ b/drivers/hwmon/adm1021.c
+@@ -311,26 +311,68 @@ static int adm1021_detect(struct i2c_client *client,
+ man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
+ dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
+
++ if (man_id < 0 || dev_id < 0)
++ return -ENODEV;
++
+ if (man_id == 0x4d && dev_id == 0x01)
+ type_name = "max1617a";
+ else if (man_id == 0x41) {
+ if ((dev_id & 0xF0) == 0x30)
+ type_name = "adm1023";
+- else
++ else if ((dev_id & 0xF0) == 0x00)
+ type_name = "adm1021";
++ else
++ return -ENODEV;
+ } else if (man_id == 0x49)
+ type_name = "thmc10";
+ else if (man_id == 0x23)
+ type_name = "gl523sm";
+ else if (man_id == 0x54)
+ type_name = "mc1066";
+- /* LM84 Mfr ID in a different place, and it has more unused bits */
+- else if (conv_rate == 0x00
+- && (config & 0x7F) == 0x00
+- && (status & 0xAB) == 0x00)
+- type_name = "lm84";
+- else
+- type_name = "max1617";
++ else {
++ int lte, rte, lhi, rhi, llo, rlo;
++
++ /* extra checks for LM84 and MAX1617 to avoid misdetections */
++
++ llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
++ rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
++
++ /* fail if any of the additional register reads failed */
++ if (llo < 0 || rlo < 0)
++ return -ENODEV;
++
++ lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
++ rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
++ lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
++ rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
++
++ /*
++ * Fail for negative temperatures and negative high limits.
++ * This check also catches read errors on the tested registers.
++ */
++ if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
++ return -ENODEV;
++
++ /* fail if all registers hold the same value */
++ if (lte == rte && lte == lhi && lte == rhi && lte == llo
++ && lte == rlo)
++ return -ENODEV;
++
++ /*
++ * LM84 Mfr ID is in a different place,
++ * and it has more unused bits.
++ */
++ if (conv_rate == 0x00
++ && (config & 0x7F) == 0x00
++ && (status & 0xAB) == 0x00) {
++ type_name = "lm84";
++ } else {
++ /* fail if low limits are larger than high limits */
++ if ((s8)llo > lhi || (s8)rlo > rhi)
++ return -ENODEV;
++ type_name = "max1617";
++ }
++ }
+
+ pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n",
+ type_name, i2c_adapter_id(adapter), client->addr);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 62306e5..298e02a 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -407,7 +407,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
+
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+- set_bit(R1BIO_Uptodate, &r1_bio->state);
++ /*
++ * Do not set R1BIO_Uptodate if the current device is
++ * rebuilding or Faulty. This is because we cannot use
++ * such device for properly reading the data back (we could
++ * potentially use it, if the current write would have felt
++ * before rdev->recovery_offset, but for simplicity we don't
++ * check this here.
++ */
++ if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
++ !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
++ set_bit(R1BIO_Uptodate, &r1_bio->state);
+
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[mirror].rdev,
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 8f67c4d..8bba438 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -390,7 +390,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
+ sector_t first_bad;
+ int bad_sectors;
+
+- set_bit(R10BIO_Uptodate, &r10_bio->state);
++ /*
++ * Do not set R10BIO_Uptodate if the current device is
++ * rebuilding or Faulty. This is because we cannot use
++ * such device for properly reading the data back (we could
++ * potentially use it, if the current write would have felt
++ * before rdev->recovery_offset, but for simplicity we don't
++ * check this here.
++ */
++ if (test_bit(In_sync, &conf->mirrors[dev].rdev->flags) &&
++ !test_bit(Faulty, &conf->mirrors[dev].rdev->flags))
++ set_bit(R10BIO_Uptodate, &r10_bio->state);
+
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[dev].rdev,
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index ec13a59..1bc927a 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -1620,6 +1620,9 @@ static int tg3_poll_fw(struct tg3 *tp)
+ int i;
+ u32 val;
+
++ if (tg3_flag(tp, NO_FWARE_REPORTED))
++ return 0;
++
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* Wait up to 20ms for init done. */
+ for (i = 0; i < 200; i++) {
+@@ -8282,6 +8285,14 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+ }
+
++static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
++{
++ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
++ else
++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
++}
++
+ /* tp->lock is held. */
+ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ {
+@@ -8920,6 +8931,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ tw32_f(RDMAC_MODE, rdmac_mode);
+ udelay(40);
+
++ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
++ for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
++ if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
++ break;
++ }
++ if (i < TG3_NUM_RDMA_CHANNELS) {
++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
++ val |= tg3_lso_rd_dma_workaround_bit(tp);
++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
++ tg3_flag_set(tp, 5719_5720_RDMA_BUG);
++ }
++ }
++
+ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+@@ -9166,6 +9191,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ */
+ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+ {
++ /* Chip may have been just powered on. If so, the boot code may still
++ * be running initialization. Wait for it to finish to avoid races in
++ * accessing the hardware.
++ */
++ tg3_enable_register_access(tp);
++ tg3_poll_fw(tp);
++
+ tg3_switch_clocks(tp);
+
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+@@ -9200,6 +9232,16 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
+ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
++ if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
++ (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
++ sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
++ u32 val;
++
++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
++ val &= ~tg3_lso_rd_dma_workaround_bit(tp);
++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
++ tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
++ }
+
+ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 94b4bd0..da90ba5 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -1368,7 +1368,12 @@
+ #define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
+ #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
+ #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
+-/* 0x4914 --> 0x4c00 unused */
++#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000
++#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000
++/* 0x4914 --> 0x4be0 unused */
++
++#define TG3_NUM_RDMA_CHANNELS 4
++#define TG3_RDMA_LENGTH 0x00004be0
+
+ /* Write DMA control registers */
+ #define WDMAC_MODE 0x00004c00
+@@ -2921,6 +2926,7 @@ enum TG3_FLAGS {
+ TG3_FLAG_APE_HAS_NCSI,
+ TG3_FLAG_5717_PLUS,
+ TG3_FLAG_4K_FIFO_LIMIT,
++ TG3_FLAG_5719_5720_RDMA_BUG,
+ TG3_FLAG_RESET_TASK_PENDING,
+
+ /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
+diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
+index d9c08c6..f4ed4d8 100644
+--- a/drivers/net/wireless/ath/ath9k/Kconfig
++++ b/drivers/net/wireless/ath/ath9k/Kconfig
+@@ -50,13 +50,17 @@ config ATH9K_DEBUGFS
+
+ Also required for changing debug message flags at run time.
+
+-config ATH9K_RATE_CONTROL
++config ATH9K_LEGACY_RATE_CONTROL
+ bool "Atheros ath9k rate control"
+ depends on ATH9K
+- default y
++ default n
+ ---help---
+ Say Y, if you want to use the ath9k specific rate control
+- module instead of minstrel_ht.
++ module instead of minstrel_ht. Be warned that there are various
++ issues with the ath9k RC and minstrel is a more robust algorithm.
++ Note that even if this option is selected, "ath9k_rate_control"
++ has to be passed to mac80211 using the module parameter,
++ ieee80211_default_rc_algo.
+
+ config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
+index 36ed3c4..1cdb246 100644
+--- a/drivers/net/wireless/ath/ath9k/Makefile
++++ b/drivers/net/wireless/ath/ath9k/Makefile
+@@ -5,7 +5,7 @@ ath9k-y += beacon.o \
+ recv.o \
+ xmit.o \
+
+-ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
++ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
+ ath9k-$(CONFIG_ATH9K_PCI) += pci.o
+ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
+ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index 57622e0..ba6a49c 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -691,8 +691,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+- if (AR_SREV_5416(sc->sc_ah))
+- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
++ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+@@ -714,10 +713,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ sc->ant_rx = hw->wiphy->available_antennas_rx;
+ sc->ant_tx = hw->wiphy->available_antennas_tx;
+
+-#ifdef CONFIG_ATH9K_RATE_CONTROL
+- hw->rate_control_algorithm = "ath9k_rate_control";
+-#endif
+-
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
+index b7a4bcd..e8e1901 100644
+--- a/drivers/net/wireless/ath/ath9k/rc.h
++++ b/drivers/net/wireless/ath/ath9k/rc.h
+@@ -221,7 +221,7 @@ struct ath_rate_priv {
+ struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
+ };
+
+-#ifdef CONFIG_ATH9K_RATE_CONTROL
++#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
+ int ath_rate_control_register(void);
+ void ath_rate_control_unregister(void);
+ #else
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index c0f2041..b0c2801 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -2421,7 +2421,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
+ for (i = 0; i < B43_NR_FWTYPES; i++) {
+ errmsg = ctx->errors[i];
+ if (strlen(errmsg))
+- b43err(dev->wl, errmsg);
++ b43err(dev->wl, "%s", errmsg);
+ }
+ b43_print_fw_helptext(dev->wl, 1);
+ err = -ENOENT;
+diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
+index c5ce163..71195cb 100644
+--- a/drivers/net/wireless/b43legacy/main.c
++++ b/drivers/net/wireless/b43legacy/main.c
+@@ -3837,6 +3837,8 @@ static void b43legacy_remove(struct ssb_device *dev)
+ cancel_work_sync(&wldev->restart_work);
+
+ B43legacy_WARN_ON(!wl);
++ if (!wldev->fw.ucode)
++ return; /* NULL if fw never loaded */
+ if (wl->current_dev == wldev)
+ ieee80211_unregister_hw(wl->hw);
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index 3935994..bc30a5f 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -604,7 +604,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+
+- if (!memcmp(&lq, &zero_lq, sizeof(lq)))
++ if (memcmp(&lq, &zero_lq, sizeof(lq)))
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 185a0eb..fd2b92d 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -46,11 +46,33 @@
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/page.h>
+
++/*
++ * This is the maximum slots a skb can have. If a guest sends a skb
++ * which exceeds this limit it is considered malicious.
++ */
++#define FATAL_SKB_SLOTS_DEFAULT 20
++static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
++module_param(fatal_skb_slots, uint, 0444);
++
++/*
++ * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
++ * the maximum slots a valid packet can use. Now this value is defined
++ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
++ * all backend.
++ */
++#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
++
++typedef unsigned int pending_ring_idx_t;
++#define INVALID_PENDING_RING_IDX (~0U)
++
+ struct pending_tx_info {
+- struct xen_netif_tx_request req;
++ struct xen_netif_tx_request req; /* coalesced tx request */
+ struct xenvif *vif;
++ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
++ * if it is head of one or more tx
++ * reqs
++ */
+ };
+-typedef unsigned int pending_ring_idx_t;
+
+ struct netbk_rx_meta {
+ int id;
+@@ -101,7 +123,11 @@ struct xen_netbk {
+ atomic_t netfront_count;
+
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+- struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
++ /* Coalescing tx requests before copying makes number of grant
++ * copy ops greater or equal to number of slots required. In
++ * worst case a tx request consumes 2 gnttab_copy.
++ */
++ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
+
+ u16 pending_ring[MAX_PENDING_REQS];
+
+@@ -117,6 +143,16 @@ struct xen_netbk {
+ static struct xen_netbk *xen_netbk;
+ static int xen_netbk_group_nr;
+
++/*
++ * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
++ * one or more merged tx requests, otherwise it is the continuation of
++ * previous tx request.
++ */
++static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
++{
++ return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
++}
++
+ void xen_netbk_add_xenvif(struct xenvif *vif)
+ {
+ int i;
+@@ -249,6 +285,7 @@ static int max_required_rx_slots(struct xenvif *vif)
+ {
+ int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+
++ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+ if (vif->can_sg || vif->gso || vif->gso_prefix)
+ max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
+
+@@ -627,6 +664,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
++ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+ break;
+ }
+@@ -874,47 +912,99 @@ static int netbk_count_requests(struct xenvif *vif,
+ int work_to_do)
+ {
+ RING_IDX cons = vif->tx.req_cons;
+- int frags = 0;
++ int slots = 0;
++ int drop_err = 0;
++ int more_data;
+
+ if (!(first->flags & XEN_NETTXF_more_data))
+ return 0;
+
+ do {
+- if (frags >= work_to_do) {
+- netdev_err(vif->dev, "Need more frags\n");
++ struct xen_netif_tx_request dropped_tx = { 0 };
++
++ if (slots >= work_to_do) {
++ netdev_err(vif->dev,
++ "Asked for %d slots but exceeds this limit\n",
++ work_to_do);
+ netbk_fatal_tx_err(vif);
+ return -ENODATA;
+ }
+
+- if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- netdev_err(vif->dev, "Too many frags\n");
++ /* This guest is really using too many slots and
++ * considered malicious.
++ */
++ if (unlikely(slots >= fatal_skb_slots)) {
++ netdev_err(vif->dev,
++ "Malicious frontend using %d slots, threshold %u\n",
++ slots, fatal_skb_slots);
+ netbk_fatal_tx_err(vif);
+ return -E2BIG;
+ }
+
+- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
++ /* Xen network protocol had implicit dependency on
++ * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
++ * the historical MAX_SKB_FRAGS value 18 to honor the
++ * same behavior as before. Any packet using more than
++ * 18 slots but less than fatal_skb_slots slots is
++ * dropped
++ */
++ if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
++ if (net_ratelimit())
++ netdev_dbg(vif->dev,
++ "Too many slots (%d) exceeding limit (%d), dropping packet\n",
++ slots, XEN_NETBK_LEGACY_SLOTS_MAX);
++ drop_err = -E2BIG;
++ }
++
++ if (drop_err)
++ txp = &dropped_tx;
++
++ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ sizeof(*txp));
+- if (txp->size > first->size) {
+- netdev_err(vif->dev, "Frag is bigger than frame.\n");
+- netbk_fatal_tx_err(vif);
+- return -EIO;
++
++ /* If the guest submitted a frame >= 64 KiB then
++ * first->size overflowed and following slots will
++ * appear to be larger than the frame.
++ *
++ * This cannot be fatal error as there are buggy
++ * frontends that do this.
++ *
++ * Consume all slots and drop the packet.
++ */
++ if (!drop_err && txp->size > first->size) {
++ if (net_ratelimit())
++ netdev_dbg(vif->dev,
++ "Invalid tx request, slot size %u > remaining size %u\n",
++ txp->size, first->size);
++ drop_err = -EIO;
+ }
+
+ first->size -= txp->size;
+- frags++;
++ slots++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
++ netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+- } while ((txp++)->flags & XEN_NETTXF_more_data);
+- return frags;
++
++ more_data = txp->flags & XEN_NETTXF_more_data;
++
++ if (!drop_err)
++ txp++;
++
++ } while (more_data);
++
++ if (drop_err) {
++ netbk_tx_err(vif, first, cons + slots);
++ return drop_err;
++ }
++
++ return slots;
+ }
+
+ static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
+- struct sk_buff *skb,
+ u16 pending_idx)
+ {
+ struct page *page;
+@@ -935,50 +1025,114 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frags = shinfo->frags;
+ u16 pending_idx = *((u16 *)skb->data);
+- int i, start;
++ u16 head_idx = 0;
++ int slot, start;
++ struct page *page;
++ pending_ring_idx_t index, start_idx = 0;
++ uint16_t dst_offset;
++ unsigned int nr_slots;
++ struct pending_tx_info *first = NULL;
++
++ /* At this point shinfo->nr_frags is in fact the number of
++ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
++ */
++ nr_slots = shinfo->nr_frags;
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+
+- for (i = start; i < shinfo->nr_frags; i++, txp++) {
+- struct page *page;
+- pending_ring_idx_t index;
++ /* Coalesce tx requests, at this point the packet passed in
++ * should be <= 64K. Any packets larger than 64K have been
++ * handled in netbk_count_requests().
++ */
++ for (shinfo->nr_frags = slot = start; slot < nr_slots;
++ shinfo->nr_frags++) {
+ struct pending_tx_info *pending_tx_info =
+ netbk->pending_tx_info;
+
+- index = pending_index(netbk->pending_cons++);
+- pending_idx = netbk->pending_ring[index];
+- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
++ page = alloc_page(GFP_KERNEL|__GFP_COLD);
+ if (!page)
+ goto err;
+
+- netbk->mmap_pages[pending_idx] = page;
+-
+- gop->source.u.ref = txp->gref;
+- gop->source.domid = vif->domid;
+- gop->source.offset = txp->offset;
+-
+- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
+- gop->dest.domid = DOMID_SELF;
+- gop->dest.offset = txp->offset;
+-
+- gop->len = txp->size;
+- gop->flags = GNTCOPY_source_gref;
++ dst_offset = 0;
++ first = NULL;
++ while (dst_offset < PAGE_SIZE && slot < nr_slots) {
++ gop->flags = GNTCOPY_source_gref;
++
++ gop->source.u.ref = txp->gref;
++ gop->source.domid = vif->domid;
++ gop->source.offset = txp->offset;
++
++ gop->dest.domid = DOMID_SELF;
++
++ gop->dest.offset = dst_offset;
++ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
++
++ if (dst_offset + txp->size > PAGE_SIZE) {
++ /* This page can only merge a portion
++ * of tx request. Do not increment any
++ * pointer / counter here. The txp
++ * will be dealt with in future
++ * rounds, eventually hitting the
++ * `else` branch.
++ */
++ gop->len = PAGE_SIZE - dst_offset;
++ txp->offset += gop->len;
++ txp->size -= gop->len;
++ dst_offset += gop->len; /* quit loop */
++ } else {
++ /* This tx request can be merged in the page */
++ gop->len = txp->size;
++ dst_offset += gop->len;
++
++ index = pending_index(netbk->pending_cons++);
++
++ pending_idx = netbk->pending_ring[index];
++
++ memcpy(&pending_tx_info[pending_idx].req, txp,
++ sizeof(*txp));
++ xenvif_get(vif);
++
++ pending_tx_info[pending_idx].vif = vif;
++
++ /* Poison these fields, corresponding
++ * fields for head tx req will be set
++ * to correct values after the loop.
++ */
++ netbk->mmap_pages[pending_idx] = (void *)(~0UL);
++ pending_tx_info[pending_idx].head =
++ INVALID_PENDING_RING_IDX;
++
++ if (!first) {
++ first = &pending_tx_info[pending_idx];
++ start_idx = index;
++ head_idx = pending_idx;
++ }
++
++ txp++;
++ slot++;
++ }
+
+- gop++;
++ gop++;
++ }
+
+- memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
+- xenvif_get(vif);
+- pending_tx_info[pending_idx].vif = vif;
+- frag_set_pending_idx(&frags[i], pending_idx);
++ first->req.offset = 0;
++ first->req.size = dst_offset;
++ first->head = start_idx;
++ set_page_ext(page, netbk, head_idx);
++ netbk->mmap_pages[head_idx] = page;
++ frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+ }
+
++ BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
++
+ return gop;
+ err:
+ /* Unwind, freeing all pages and sending error responses. */
+- while (i-- > start) {
+- xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+- XEN_NETIF_RSP_ERROR);
++ while (shinfo->nr_frags-- > start) {
++ xen_netbk_idx_release(netbk,
++ frag_get_pending_idx(&frags[shinfo->nr_frags]),
++ XEN_NETIF_RSP_ERROR);
+ }
+ /* The head too, if necessary. */
+ if (start)
+@@ -994,8 +1148,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ struct gnttab_copy *gop = *gopp;
+ u16 pending_idx = *((u16 *)skb->data);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ struct pending_tx_info *tx_info;
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
++ u16 peek; /* peek into next tx request */
+
+ /* Check status of header. */
+ err = gop->status;
+@@ -1007,11 +1163,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
++ pending_ring_idx_t head;
+
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
++ tx_info = &netbk->pending_tx_info[pending_idx];
++ head = tx_info->head;
+
+ /* Check error status: if okay then remember grant handle. */
+- newerr = (++gop)->status;
++ do {
++ newerr = (++gop)->status;
++ if (newerr)
++ break;
++ peek = netbk->pending_ring[pending_index(++head)];
++ } while (!pending_tx_is_head(netbk, peek));
++
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+@@ -1236,11 +1401,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ struct sk_buff *skb;
+ int ret;
+
+- while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
++ < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list)) {
+ struct xenvif *vif;
+ struct xen_netif_tx_request txreq;
+- struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
++ struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
+ struct page *page;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ u16 pending_idx;
+@@ -1328,7 +1494,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ pending_idx = netbk->pending_ring[index];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+- ret < MAX_SKB_FRAGS) ?
++ ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
+ PKT_PROT_LEN : txreq.size;
+
+ skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
+@@ -1355,15 +1521,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ }
+
+ /* XXX could copy straight to head */
+- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
++ page = xen_netbk_alloc_page(netbk, pending_idx);
+ if (!page) {
+ kfree_skb(skb);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+
+- netbk->mmap_pages[pending_idx] = page;
+-
+ gop->source.u.ref = txreq.gref;
+ gop->source.domid = vif->domid;
+ gop->source.offset = txreq.offset;
+@@ -1380,6 +1544,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ memcpy(&netbk->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ netbk->pending_tx_info[pending_idx].vif = vif;
++ netbk->pending_tx_info[pending_idx].head = index;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_put(skb, data_len);
+@@ -1510,7 +1675,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ {
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+- pending_ring_idx_t index;
++ pending_ring_idx_t head;
++ u16 peek; /* peek into next tx request */
++
++ BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
+
+ /* Already complete? */
+ if (netbk->mmap_pages[pending_idx] == NULL)
+@@ -1519,19 +1687,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ pending_tx_info = &netbk->pending_tx_info[pending_idx];
+
+ vif = pending_tx_info->vif;
++ head = pending_tx_info->head;
++
++ BUG_ON(!pending_tx_is_head(netbk, head));
++ BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
+
+- make_tx_response(vif, &pending_tx_info->req, status);
++ do {
++ pending_ring_idx_t index;
++ pending_ring_idx_t idx = pending_index(head);
++ u16 info_idx = netbk->pending_ring[idx];
+
+- index = pending_index(netbk->pending_prod++);
+- netbk->pending_ring[index] = pending_idx;
++ pending_tx_info = &netbk->pending_tx_info[info_idx];
++ make_tx_response(vif, &pending_tx_info->req, status);
+
+- xenvif_put(vif);
++ /* Setting any number other than
++ * INVALID_PENDING_RING_IDX indicates this slot is
++ * starting a new packet / ending a previous packet.
++ */
++ pending_tx_info->head = 0;
++
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = netbk->pending_ring[info_idx];
++
++ xenvif_put(vif);
++
++ peek = netbk->pending_ring[pending_index(++head)];
++
++ } while (!pending_tx_is_head(netbk, peek));
+
+ netbk->mmap_pages[pending_idx]->mapping = 0;
+ put_page(netbk->mmap_pages[pending_idx]);
+ netbk->mmap_pages[pending_idx] = NULL;
+ }
+
++
+ static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st)
+@@ -1584,8 +1773,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
+ static inline int tx_work_todo(struct xen_netbk *netbk)
+ {
+
+- if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+- !list_empty(&netbk->net_schedule_list))
++ if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
++ < MAX_PENDING_REQS) &&
++ !list_empty(&netbk->net_schedule_list))
+ return 1;
+
+ return 0;
+@@ -1668,6 +1858,13 @@ static int __init netback_init(void)
+ if (!xen_pv_domain())
+ return -ENODEV;
+
++ if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
++ printk(KERN_INFO
++ "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
++ fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
++ fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
++ }
++
+ xen_netbk_group_nr = num_online_cpus();
+ xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
+ if (!xen_netbk) {
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 9b9843e..0d9914b 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -36,7 +36,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_ether.h>
+-#include <linux/tcp.h>
++#include <net/tcp.h>
+ #include <linux/udp.h>
+ #include <linux/moduleparam.h>
+ #include <linux/mm.h>
+@@ -490,6 +490,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+
++ /* If skb->len is too big for wire format, drop skb and alert
++ * user about misconfiguration.
++ */
++ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
++ net_alert_ratelimited(
++ "xennet: skb->len = %u, too big for wire format\n",
++ skb->len);
++ goto drop;
++ }
++
+ frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+@@ -1043,7 +1053,8 @@ err:
+
+ static int xennet_change_mtu(struct net_device *dev, int mtu)
+ {
+- int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++ int max = xennet_can_sg(dev) ?
++ XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+@@ -1318,6 +1329,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
+ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
++ netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
++
+ np->netdev = netdev;
+
+ netif_carrier_off(netdev);
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 7b82868..8e6c4fa 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8665,6 +8665,13 @@ static int __must_check __init get_thinkpad_model_data(
+ tp->model_str = kstrdup(s, GFP_KERNEL);
+ if (!tp->model_str)
+ return -ENOMEM;
++ } else {
++ s = dmi_get_system_info(DMI_BIOS_VENDOR);
++ if (s && !(strnicmp(s, "Lenovo", 6))) {
++ tp->model_str = kstrdup(s, GFP_KERNEL);
++ if (!tp->model_str)
++ return -ENOMEM;
++ }
+ }
+
+ s = dmi_get_system_info(DMI_PRODUCT_NAME);
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 23ef16c..84eab3f 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -555,7 +555,7 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ /* For MSI mode re-enable device-level interrupts */
+ if (priv->flags & TSI721_USING_MSI) {
+ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+- TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
++ TSI721_DEV_INT_SMSG_CH;
+ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+ }
+
+diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
+index a3e98f1..b37c8b0 100644
+--- a/drivers/rtc/rtc-twl.c
++++ b/drivers/rtc/rtc-twl.c
+@@ -490,6 +490,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
+ }
+
+ platform_set_drvdata(pdev, rtc);
++ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+
+ out2:
+diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
+index 7b97c60..626ae47 100644
+--- a/drivers/staging/gma500/cdv_intel_display.c
++++ b/drivers/staging/gma500/cdv_intel_display.c
+@@ -1457,6 +1457,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+ kfree(psb_intel_crtc);
+ }
+
++static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
++{
++ struct gtt_range *gt;
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++ if (crtc->fb) {
++ gt = to_psb_fb(crtc->fb)->gtt;
++ psb_gtt_unpin(gt);
++ }
++}
++
+ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .dpms = cdv_intel_crtc_dpms,
+ .mode_fixup = cdv_intel_crtc_mode_fixup,
+@@ -1464,6 +1477,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .mode_set_base = cdv_intel_pipe_set_base,
+ .prepare = cdv_intel_crtc_prepare,
+ .commit = cdv_intel_crtc_commit,
++ .disable = cdv_intel_crtc_disable,
+ };
+
+ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
+index 3f39a37..d28fdc2 100644
+--- a/drivers/staging/gma500/framebuffer.c
++++ b/drivers/staging/gma500/framebuffer.c
+@@ -831,8 +831,8 @@ void psb_modeset_init(struct drm_device *dev)
+ for (i = 0; i < dev_priv->num_pipe; i++)
+ psb_intel_crtc_init(dev, i, mode_dev);
+
+- dev->mode_config.max_width = 2048;
+- dev->mode_config.max_height = 2048;
++ dev->mode_config.max_width = 4096;
++ dev->mode_config.max_height = 4096;
+
+ psb_setup_outputs(dev);
+ }
+diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
+index caa9d86..0d872e9 100644
+--- a/drivers/staging/gma500/psb_intel_display.c
++++ b/drivers/staging/gma500/psb_intel_display.c
+@@ -1255,6 +1255,19 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+ kfree(psb_intel_crtc);
+ }
+
++static void psb_intel_crtc_disable(struct drm_crtc *crtc)
++{
++ struct gtt_range *gt;
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++ if (crtc->fb) {
++ gt = to_psb_fb(crtc->fb)->gtt;
++ psb_gtt_unpin(gt);
++ }
++}
++
+ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .dpms = psb_intel_crtc_dpms,
+ .mode_fixup = psb_intel_crtc_mode_fixup,
+@@ -1262,6 +1275,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .mode_set_base = psb_intel_pipe_set_base,
+ .prepare = psb_intel_crtc_prepare,
+ .commit = psb_intel_crtc_commit,
++ .disable = psb_intel_crtc_disable,
+ };
+
+ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 5b77316..db313ba 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -713,9 +713,9 @@ static int iscsi_add_notunderstood_response(
+ }
+ INIT_LIST_HEAD(&extra_response->er_list);
+
+- strncpy(extra_response->key, key, strlen(key) + 1);
+- strncpy(extra_response->value, NOTUNDERSTOOD,
+- strlen(NOTUNDERSTOOD) + 1);
++ strlcpy(extra_response->key, key, sizeof(extra_response->key));
++ strlcpy(extra_response->value, NOTUNDERSTOOD,
++ sizeof(extra_response->value));
+
+ list_add_tail(&extra_response->er_list,
+ &param_list->extra_response_list);
+@@ -1572,8 +1572,6 @@ int iscsi_decode_text_input(
+
+ if (phase & PHASE_SECURITY) {
+ if (iscsi_check_for_auth_key(key) > 0) {
+- char *tmpptr = key + strlen(key);
+- *tmpptr = '=';
+ kfree(tmpbuf);
+ return 1;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
+index 6a37fd6..83eed65 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.h
++++ b/drivers/target/iscsi/iscsi_target_parameters.h
+@@ -1,8 +1,10 @@
+ #ifndef ISCSI_PARAMETERS_H
+ #define ISCSI_PARAMETERS_H
+
++#include <scsi/iscsi_proto.h>
++
+ struct iscsi_extra_response {
+- char key[64];
++ char key[KEY_MAXLEN];
+ char value[32];
+ struct list_head er_list;
+ } ____cacheline_aligned;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index e9637f9..b368b83 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1310,10 +1310,19 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = dwc->eps[epnum];
+- dwc3_free_trb_pool(dep);
+-
+- if (epnum != 0 && epnum != 1)
++ /*
++ * Physical endpoints 0 and 1 are special; they form the
++ * bi-directional USB endpoint 0.
++ *
++ * For those two physical endpoints, we don't allocate a TRB
++ * pool nor do we add them the endpoints list. Due to that, we
++ * shouldn't do these two operations otherwise we would end up
++ * with all sorts of bugs when removing dwc3.ko.
++ */
++ if (epnum != 0 && epnum != 1) {
++ dwc3_free_trb_pool(dep);
+ list_del(&dep->endpoint.ep_list);
++ }
+
+ kfree(dep);
+ }
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 08e470f..34655d0 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+ }
+
+ static const unsigned char
+-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+
+ /* carryover low/fullspeed bandwidth that crosses uframe boundries */
+ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 430c1d5..5018e33 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1755,6 +1755,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
++ if (!xhci->rh_bw)
++ goto no_bw;
++
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+@@ -1773,6 +1776,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ }
+ }
+
++no_bw:
+ xhci->num_usb2_ports = 0;
+ xhci->num_usb3_ports = 0;
+ xhci->num_active_eps = 0;
+@@ -2184,6 +2188,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ u32 page_size;
+ int i;
+
++ INIT_LIST_HEAD(&xhci->lpm_failed_devs);
++ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
++
+ page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+ xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ for (i = 0; i < 16; i++) {
+@@ -2262,7 +2269,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+- INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+@@ -2363,8 +2369,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ if (xhci_setup_port_arrays(xhci, flags))
+ goto fail;
+
+- INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+-
+ return 0;
+
+ fail:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 2c0350f..136c357 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -938,6 +938,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *secondary_hcd;
+ int retval = 0;
++ bool comp_timer_running = false;
+
+ /* Wait a bit if either of the roothubs need to settle from the
+ * transition into bus suspend.
+@@ -975,6 +976,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+
+ /* If restore operation fails, re-initialize the HC during resume */
+ if ((temp & STS_SRE) || hibernated) {
++
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++ !(xhci_all_ports_seen_u0(xhci))) {
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
++ }
++
+ /* Let the USB core know _both_ roothubs lost power. */
+ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
+ usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
+@@ -1017,6 +1025,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ retval = xhci_init(hcd->primary_hcd);
+ if (retval)
+ return retval;
++ comp_timer_running = true;
++
+ xhci_dbg(xhci, "Start the primary HCD\n");
+ retval = xhci_run(hcd->primary_hcd);
+ if (!retval) {
+@@ -1058,7 +1068,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ * to suffer the Compliance Mode issue again. It doesn't matter if
+ * ports have entered previously to U0 before system's suspension.
+ */
+- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
+ compliance_mode_recovery_timer_init(xhci);
+
+ /* Re-enable port polling. */
+diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
+index 3ca6c0d..1a715f6 100644
+--- a/drivers/usb/serial/ark3116.c
++++ b/drivers/usb/serial/ark3116.c
+@@ -49,7 +49,7 @@ static int debug;
+ #define DRIVER_NAME "ark3116"
+
+ /* usb timeout of 1 second */
+-#define ARK_TIMEOUT (1*HZ)
++#define ARK_TIMEOUT 1000
+
+ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x6547, 0x0232) },
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index 01a44d3..10c30ad 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -96,6 +96,7 @@ static const struct usb_device_id id_table_earthmate[] = {
+ static const struct usb_device_id id_table_cyphidcomrs232[] = {
+ { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
++ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
+ { } /* Terminating entry */
+ };
+
+@@ -109,6 +110,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
+ { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
++ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
+ { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
+ { } /* Terminating entry */
+ };
+@@ -267,6 +269,12 @@ static struct usb_serial_driver cypress_ca42v2_device = {
+ * Cypress serial helper functions
+ *****************************************************************************/
+
++/* FRWD Dongle hidcom needs to skip reset and speed checks */
++static inline bool is_frwd(struct usb_device *dev)
++{
++ return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
++ (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
++}
+
+ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
+ {
+@@ -276,6 +284,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
+ if (unstable_bauds)
+ return new_rate;
+
++ /* FRWD Dongle uses 115200 bps */
++ if (is_frwd(port->serial->dev))
++ return new_rate;
++
+ /*
+ * The general purpose firmware for the Cypress M8 allows for
+ * a maximum speed of 57600bps (I have no idea whether DeLorme
+@@ -488,7 +500,11 @@ static int generic_startup(struct usb_serial *serial)
+ return -ENOMEM;
+ }
+
+- usb_reset_configuration(serial->dev);
++ /* Skip reset for FRWD device. It is a workaound:
++ device hangs if it receives SET_CONFIGURE in Configured
++ state. */
++ if (!is_frwd(serial->dev))
++ usb_reset_configuration(serial->dev);
+
+ priv->cmd_ctrl = 0;
+ priv->line_control = 0;
+diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
+index 67cf608..b461311 100644
+--- a/drivers/usb/serial/cypress_m8.h
++++ b/drivers/usb/serial/cypress_m8.h
+@@ -24,6 +24,10 @@
+ #define VENDOR_ID_CYPRESS 0x04b4
+ #define PRODUCT_ID_CYPHIDCOM 0x5500
+
++/* FRWD Dongle - a GPS sports watch */
++#define VENDOR_ID_FRWD 0x6737
++#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
++
+ /* Powercom UPS, chip CY7C63723 */
+ #define VENDOR_ID_POWERCOM 0x0d9f
+ #define PRODUCT_ID_UPS 0x0002
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 918ec98..ce9f87f 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2169,6 +2169,9 @@ static void ftdi_set_termios(struct tty_struct *tty,
+
+ cflag = termios->c_cflag;
+
++ if (!old_termios)
++ goto no_skip;
++
+ if (old_termios->c_cflag == termios->c_cflag
+ && old_termios->c_ispeed == termios->c_ispeed
+ && old_termios->c_ospeed == termios->c_ospeed)
+@@ -2182,6 +2185,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
+ goto no_data_parity_stop_changes;
+
++no_skip:
+ /* Set number of data bits, parity, stop bits */
+
+ urb_value = 0;
+diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
+index 6aca631..cf2668e 100644
+--- a/drivers/usb/serial/iuu_phoenix.c
++++ b/drivers/usb/serial/iuu_phoenix.c
+@@ -327,7 +327,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
+ usb_bulk_msg(serial->dev,
+ usb_sndbulkpipe(serial->dev,
+ port->bulk_out_endpointAddress), buf,
+- count, &actual, HZ * 1);
++ count, &actual, 1000);
+
+ if (status != IUU_OPERATION_OK)
+ dbg("%s - error = %2x", __func__, status);
+@@ -350,7 +350,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
+ usb_bulk_msg(serial->dev,
+ usb_rcvbulkpipe(serial->dev,
+ port->bulk_in_endpointAddress), buf,
+- count, &actual, HZ * 1);
++ count, &actual, 1000);
+
+ if (status != IUU_OPERATION_OK)
+ dbg("%s - error = %2x", __func__, status);
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index a442352..4f415e28 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -1833,7 +1833,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
+ d_details = s_priv->device_details;
+ device_port = port->number - port->serial->minor;
+
+- outcont_urb = d_details->outcont_endpoints[port->number];
++ outcont_urb = d_details->outcont_endpoints[device_port];
+ this_urb = p_priv->outcont_urb;
+
+ dbg("%s - endpoint %d", __func__, usb_pipeendpoint(this_urb->pipe));
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 3524a10..9580679 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -44,7 +44,7 @@
+ #define DRIVER_DESC "Moschip USB Serial Driver"
+
+ /* default urb timeout */
+-#define MOS_WDR_TIMEOUT (HZ * 5)
++#define MOS_WDR_TIMEOUT 5000
+
+ #define MOS_MAX_PORT 0x02
+ #define MOS_WRITE 0x0E
+@@ -234,11 +234,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ __u8 requesttype = (__u8)0xc0;
+ __u16 index = get_reg_index(reg);
+ __u16 value = get_reg_value(reg, serial_portnum);
+- int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+- index, data, 1, MOS_WDR_TIMEOUT);
+- if (status < 0)
++ u8 *buf;
++ int status;
++
++ buf = kmalloc(1, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ status = usb_control_msg(usbdev, pipe, request, requesttype, value,
++ index, buf, 1, MOS_WDR_TIMEOUT);
++ if (status == 1)
++ *data = *buf;
++ else if (status < 0)
+ dev_err(&usbdev->dev,
+ "mos7720: usb_control_msg() failed: %d", status);
++ kfree(buf);
++
+ return status;
+ }
+
+@@ -1700,7 +1711,7 @@ static void change_port_settings(struct tty_struct *tty,
+ mos7720_port->shadowMCR |= (UART_MCR_XONANY);
+ /* To set hardware flow control to the specified *
+ * serial port, in SP1/2_CONTROL_REG */
+- if (port->number)
++ if (port_number)
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+ else
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+@@ -2112,7 +2123,7 @@ static int mos7720_startup(struct usb_serial *serial)
+
+ /* setting configuration feature to one */
+ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
++ (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
+
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+@@ -2157,7 +2168,7 @@ static void mos7720_release(struct usb_serial *serial)
+ /* wait for synchronous usb calls to return */
+ if (mos_parport->msg_pending)
+ wait_for_completion_timeout(&mos_parport->syncmsg_compl,
+- MOS_WDR_TIMEOUT);
++ msecs_to_jiffies(MOS_WDR_TIMEOUT));
+
+ parport_remove_port(mos_parport->pp);
+ usb_set_serial_data(serial, NULL);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 59c4997..8ea37bc 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -593,6 +593,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index fd86e0e..317e503 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -270,7 +270,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ serial settings even to the same values as before. Thus
+ we actually need to filter in this specific case */
+
+- if (!tty_termios_hw_change(tty->termios, old_termios))
++ if (old_termios && !tty_termios_hw_change(tty->termios, old_termios))
+ return;
+
+ cflag = tty->termios->c_cflag;
+@@ -279,7 +279,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ if (!buf) {
+ dev_err(&port->dev, "%s - out of memory.\n", __func__);
+ /* Report back no change occurred */
+- *tty->termios = *old_termios;
++ if (old_termios)
++ *tty->termios = *old_termios;
+ return;
+ }
+
+@@ -419,7 +420,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ control = priv->line_control;
+ if ((cflag & CBAUD) == B0)
+ priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
+- else if ((old_termios->c_cflag & CBAUD) == B0)
++ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
+ if (control != priv->line_control) {
+ control = priv->line_control;
+@@ -480,7 +481,6 @@ static void pl2303_close(struct usb_serial_port *port)
+
+ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+- struct ktermios tmp_termios;
+ struct usb_serial *serial = port->serial;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ int result;
+@@ -498,7 +498,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ /* Setup termios */
+ if (tty)
+- pl2303_set_termios(tty, port, &tmp_termios);
++ pl2303_set_termios(tty, port, NULL);
+
+ dbg("%s - submitting read urb", __func__);
+ result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 14c4a82..5535c3a 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -115,6 +115,7 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
+ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
++ {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index ba6b438..f3179b0 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -338,7 +338,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int cflag = tty->termios->c_cflag;
+- unsigned int old_cflag = old_termios->c_cflag;
+ unsigned short uartdata;
+ unsigned char buf[2] = {0, 0};
+ int baud;
+@@ -347,15 +346,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+
+
+ /* check that they really want us to change something */
+- if (!tty_termios_hw_change(tty->termios, old_termios))
++ if (old_termios && !tty_termios_hw_change(tty->termios, old_termios))
+ return;
+
+ /* set DTR/RTS active */
+ spin_lock_irqsave(&priv->lock, flags);
+ control = priv->line_control;
+- if ((old_cflag & CBAUD) == B0) {
++ if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
+ priv->line_control |= MCR_DTR;
+- if (!(old_cflag & CRTSCTS))
++ if (!(old_termios->c_cflag & CRTSCTS))
+ priv->line_control |= MCR_RTS;
+ }
+ if (control != priv->line_control) {
+@@ -445,7 +444,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ * status of the device. */
+ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+- struct ktermios tmp_termios;
+ struct usb_serial *serial = port->serial;
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ int ret;
+@@ -468,7 +466,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ /* Setup termios */
+ if (tty)
+- spcp8x5_set_termios(tty, port, &tmp_termios);
++ spcp8x5_set_termios(tty, port, NULL);
+
+ spcp8x5_get_msr(serial->dev, &status, priv->type);
+
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 1c11959..80a6ff6 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -599,7 +599,9 @@ static int treo_attach(struct usb_serial *serial)
+ dest->read_urb = src->read_urb; \
+ dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
+ dest->bulk_in_buffer = src->bulk_in_buffer; \
++ dest->bulk_in_size = src->bulk_in_size; \
+ dest->interrupt_in_urb = src->interrupt_in_urb; \
++ dest->interrupt_in_urb->context = dest; \
+ dest->interrupt_in_endpointAddress = \
+ src->interrupt_in_endpointAddress;\
+ dest->interrupt_in_buffer = src->interrupt_in_buffer; \
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 59d646d..0ec60cd 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -1209,7 +1209,7 @@ static void firm_setup_port(struct tty_struct *tty)
+ struct whiteheat_port_settings port_settings;
+ unsigned int cflag = tty->termios->c_cflag;
+
+- port_settings.port = port->number + 1;
++ port_settings.port = port->number - port->serial->minor + 1;
+
+ /* get the byte size */
+ switch (cflag & CSIZE) {
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index fec1204..11d7b64 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -1176,7 +1176,7 @@ static void __xen_evtchn_do_upcall(void)
+ {
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+- int i;
++ int i, irq;
+ int cpu = get_cpu();
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+@@ -1184,6 +1184,8 @@ static void __xen_evtchn_do_upcall(void)
+
+ do {
+ unsigned long pending_words;
++ unsigned long pending_bits;
++ struct irq_desc *desc;
+
+ vcpu_info->evtchn_upcall_pending = 0;
+
+@@ -1194,6 +1196,17 @@ static void __xen_evtchn_do_upcall(void)
+ /* Clear master flag /before/ clearing selector flag. */
+ wmb();
+ #endif
++ if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
++ int evtchn = evtchn_from_irq(irq);
++ word_idx = evtchn / BITS_PER_LONG;
++ pending_bits = evtchn % BITS_PER_LONG;
++ if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
++ desc = irq_to_desc(irq);
++ if (desc)
++ generic_handle_irq_desc(irq, desc);
++ }
++ }
++
+ pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+@@ -1202,7 +1215,6 @@ static void __xen_evtchn_do_upcall(void)
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
+- unsigned long pending_bits;
+ unsigned long words;
+
+ words = MASK_LSBS(pending_words, word_idx);
+@@ -1231,8 +1243,7 @@ static void __xen_evtchn_do_upcall(void)
+
+ do {
+ unsigned long bits;
+- int port, irq;
+- struct irq_desc *desc;
++ int port;
+
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 2263144..d0e5fc5 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -18,6 +18,7 @@
+ #include <linux/slab.h>
+ #include <linux/vfs.h>
+ #include <linux/fs.h>
++#include <linux/inet.h>
+ #include "cifsglob.h"
+ #include "cifsproto.h"
+ #include "cifsfs.h"
+@@ -150,7 +151,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ * assuming that we have 'unc=' and 'ip=' in
+ * the original sb_mountdata
+ */
+- md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
++ md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12 +
++ INET6_ADDRSTRLEN;
+ mountdata = kzalloc(md_len+1, GFP_KERNEL);
+ if (mountdata == NULL) {
+ rc = -ENOMEM;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index cc386b2..259e950 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2260,7 +2260,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ __func__, inode->i_ino, inode->i_size);
+ jbd_debug(2, "truncating inode %lu to %lld bytes\n",
+ inode->i_ino, inode->i_size);
++ mutex_lock(&inode->i_mutex);
+ ext4_truncate(inode);
++ mutex_unlock(&inode->i_mutex);
+ nr_truncates++;
+ } else {
+ ext4_msg(sb, KERN_DEBUG,
+diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
+index 77b69b2..13fc885 100644
+--- a/fs/jfs/inode.c
++++ b/fs/jfs/inode.c
+@@ -125,7 +125,7 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ {
+ int wait = wbc->sync_mode == WB_SYNC_ALL;
+
+- if (test_cflag(COMMIT_Nolink, inode))
++ if (inode->i_nlink == 0)
+ return 0;
+ /*
+ * If COMMIT_DIRTY is not set, the inode isn't really dirty.
+diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
+index cc5f811..bfb2a91 100644
+--- a/fs/jfs/jfs_logmgr.c
++++ b/fs/jfs/jfs_logmgr.c
+@@ -1058,7 +1058,8 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
+ */
+ void jfs_syncpt(struct jfs_log *log, int hard_sync)
+ { LOG_LOCK(log);
+- lmLogSync(log, hard_sync);
++ if (!test_bit(log_QUIESCE, &log->flag))
++ lmLogSync(log, hard_sync);
+ LOG_UNLOCK(log);
+ }
+
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 23ce927..bd2fb43 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -507,6 +507,28 @@ xfs_vn_getattr(
+ return 0;
+ }
+
++static void
++xfs_setattr_mode(
++ struct xfs_trans *tp,
++ struct xfs_inode *ip,
++ struct iattr *iattr)
++{
++ struct inode *inode = VFS_I(ip);
++ umode_t mode = iattr->ia_mode;
++
++ ASSERT(tp);
++ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
++
++ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
++ mode &= ~S_ISGID;
++
++ ip->i_d.di_mode &= S_IFMT;
++ ip->i_d.di_mode |= mode & ~S_IFMT;
++
++ inode->i_mode &= S_IFMT;
++ inode->i_mode |= mode & ~S_IFMT;
++}
++
+ int
+ xfs_setattr_nonsize(
+ struct xfs_inode *ip,
+@@ -658,18 +680,8 @@ xfs_setattr_nonsize(
+ /*
+ * Change file access modes.
+ */
+- if (mask & ATTR_MODE) {
+- umode_t mode = iattr->ia_mode;
+-
+- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+- mode &= ~S_ISGID;
+-
+- ip->i_d.di_mode &= S_IFMT;
+- ip->i_d.di_mode |= mode & ~S_IFMT;
+-
+- inode->i_mode &= S_IFMT;
+- inode->i_mode |= mode & ~S_IFMT;
+- }
++ if (mask & ATTR_MODE)
++ xfs_setattr_mode(tp, ip, iattr);
+
+ /*
+ * Change file access or modified times.
+@@ -768,9 +780,8 @@ xfs_setattr_size(
+ return XFS_ERROR(error);
+
+ ASSERT(S_ISREG(ip->i_d.di_mode));
+- ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+- ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
+- ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
++ ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
++ ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+
+ lock_flags = XFS_ILOCK_EXCL;
+ if (!(flags & XFS_ATTR_NOLOCK))
+@@ -902,6 +913,12 @@ xfs_setattr_size(
+ xfs_iflags_set(ip, XFS_ITRUNCATED);
+ }
+
++ /*
++ * Change file access modes.
++ */
++ if (mask & ATTR_MODE)
++ xfs_setattr_mode(tp, ip, iattr);
++
+ if (mask & ATTR_CTIME) {
+ inode->i_ctime = iattr->ia_ctime;
+ ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index c692acc..9c3e071 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -168,6 +168,8 @@ extern struct sysdev_class cpu_sysdev_class;
+
+ extern void get_online_cpus(void);
+ extern void put_online_cpus(void);
++extern void cpu_hotplug_disable(void);
++extern void cpu_hotplug_enable(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+ #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+@@ -190,6 +192,8 @@ static inline void cpu_hotplug_driver_unlock(void)
+
+ #define get_online_cpus() do { } while (0)
+ #define put_online_cpus() do { } while (0)
++#define cpu_hotplug_disable() do { } while (0)
++#define cpu_hotplug_enable() do { } while (0)
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+diff --git a/include/linux/net.h b/include/linux/net.h
+index b299230..b7ca08e 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -249,6 +249,29 @@ extern struct socket *sockfd_lookup(int fd, int *err);
+ #define sockfd_put(sock) fput(sock->file)
+ extern int net_ratelimit(void);
+
++#define net_ratelimited_function(function, ...) \
++do { \
++ if (net_ratelimit()) \
++ function(__VA_ARGS__); \
++} while (0)
++
++#define net_emerg_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__)
++#define net_alert_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__)
++#define net_crit_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__)
++#define net_err_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__)
++#define net_notice_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__)
++#define net_warn_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
++#define net_info_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
++#define net_dbg_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
++
+ #define net_random() random32()
+ #define net_srandom(seed) srandom32((__force u32)seed)
+
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index d6955607..7f62faf 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -136,6 +136,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
+
+ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address);
++extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
+ #else
+
+ #define make_migration_entry(page, write) swp_entry(0, 0)
+@@ -147,6 +148,8 @@ static inline int is_migration_entry(swp_entry_t swp)
+ static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address) { }
++static inline void migration_entry_wait_huge(struct mm_struct *mm,
++ pte_t *pte) { }
+ static inline int is_write_migration_entry(swp_entry_t entry)
+ {
+ return 0;
+diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
+index cb94668..d4635cd 100644
+--- a/include/xen/interface/io/netif.h
++++ b/include/xen/interface/io/netif.h
+@@ -13,6 +13,24 @@
+ #include "../grant_table.h"
+
+ /*
++ * Older implementation of Xen network frontend / backend has an
++ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
++ * ring slots a skb can use. Netfront / netback may not work as
++ * expected when frontend and backend have different MAX_SKB_FRAGS.
++ *
++ * A better approach is to add mechanism for netfront / netback to
++ * negotiate this value. However we cannot fix all possible
++ * frontends, so we need to define a value which states the minimum
++ * slots backend must support.
++ *
++ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
++ * (18), which is proved to work with most frontends. Any new backend
++ * which doesn't negotiate with frontend should expect frontend to
++ * send a valid packet using slots up to this value.
++ */
++#define XEN_NETIF_NR_SLOTS_MIN 18
++
++/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
+@@ -47,6 +65,7 @@
+ #define _XEN_NETTXF_extra_info (3)
+ #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
+
++#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
+ struct xen_netif_tx_request {
+ grant_ref_t gref; /* Reference to buffer page */
+ uint16_t offset; /* Offset within buffer page */
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 09fae26..d4bc594 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1167,7 +1167,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
+
+ /* Wait for auditd to drain the queue a little */
+ DECLARE_WAITQUEUE(wait, current);
+- set_current_state(TASK_INTERRUPTIBLE);
++ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&audit_backlog_wait, &wait);
+
+ if (audit_backlog_limit &&
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 563f136..82c91f1 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -124,6 +124,27 @@ static void cpu_hotplug_done(void)
+ mutex_unlock(&cpu_hotplug.lock);
+ }
+
++/*
++ * Wait for currently running CPU hotplug operations to complete (if any) and
++ * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
++ * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
++ * hotplug path before performing hotplug operations. So acquiring that lock
++ * guarantees mutual exclusion from any currently running hotplug operations.
++ */
++void cpu_hotplug_disable(void)
++{
++ cpu_maps_update_begin();
++ cpu_hotplug_disabled = 1;
++ cpu_maps_update_done();
++}
++
++void cpu_hotplug_enable(void)
++{
++ cpu_maps_update_begin();
++ cpu_hotplug_disabled = 0;
++ cpu_maps_update_done();
++}
++
+ #else /* #if CONFIG_HOTPLUG_CPU */
+ static void cpu_hotplug_begin(void) {}
+ static void cpu_hotplug_done(void) {}
+@@ -479,36 +500,6 @@ static int alloc_frozen_cpus(void)
+ core_initcall(alloc_frozen_cpus);
+
+ /*
+- * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
+- * hotplug when tasks are about to be frozen. Also, don't allow the freezer
+- * to continue until any currently running CPU hotplug operation gets
+- * completed.
+- * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
+- * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
+- * CPU hotplug path and released only after it is complete. Thus, we
+- * (and hence the freezer) will block here until any currently running CPU
+- * hotplug operation gets completed.
+- */
+-void cpu_hotplug_disable_before_freeze(void)
+-{
+- cpu_maps_update_begin();
+- cpu_hotplug_disabled = 1;
+- cpu_maps_update_done();
+-}
+-
+-
+-/*
+- * When tasks have been thawed, re-enable regular CPU hotplug (which had been
+- * disabled while beginning to freeze tasks).
+- */
+-void cpu_hotplug_enable_after_thaw(void)
+-{
+- cpu_maps_update_begin();
+- cpu_hotplug_disabled = 0;
+- cpu_maps_update_done();
+-}
+-
+-/*
+ * When callbacks for CPU hotplug notifications are being executed, we must
+ * ensure that the state of the system with respect to the tasks being frozen
+ * or not, as reported by the notification, remains unchanged *throughout the
+@@ -527,12 +518,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
+
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+- cpu_hotplug_disable_before_freeze();
++ cpu_hotplug_disable();
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+- cpu_hotplug_enable_after_thaw();
++ cpu_hotplug_enable();
+ break;
+
+ default:
+diff --git a/kernel/sys.c b/kernel/sys.c
+index be5fa8b..9d557df 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -353,6 +353,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL(unregister_reboot_notifier);
+
++/* Add backwards compatibility for stable trees. */
++#ifndef PF_NO_SETAFFINITY
++#define PF_NO_SETAFFINITY PF_THREAD_BOUND
++#endif
++
++static void migrate_to_reboot_cpu(void)
++{
++ /* The boot cpu is always logical cpu 0 */
++ int cpu = 0;
++
++ cpu_hotplug_disable();
++
++ /* Make certain the cpu I'm about to reboot on is online */
++ if (!cpu_online(cpu))
++ cpu = cpumask_first(cpu_online_mask);
++
++ /* Prevent races with other tasks migrating this task */
++ current->flags |= PF_NO_SETAFFINITY;
++
++ /* Make certain I only run on the appropriate processor */
++ set_cpus_allowed_ptr(current, cpumask_of(cpu));
++}
++
+ /**
+ * kernel_restart - reboot the system
+ * @cmd: pointer to buffer containing command to execute for restart
+@@ -364,7 +387,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
+ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
+- disable_nonboot_cpus();
++ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+@@ -391,7 +414,7 @@ static void kernel_shutdown_prepare(enum system_states state)
+ void kernel_halt(void)
+ {
+ kernel_shutdown_prepare(SYSTEM_HALT);
+- disable_nonboot_cpus();
++ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ printk(KERN_EMERG "System halted.\n");
+ kmsg_dump(KMSG_DUMP_HALT);
+@@ -410,7 +433,7 @@ void kernel_power_off(void)
+ kernel_shutdown_prepare(SYSTEM_POWER_OFF);
+ if (pm_power_off_prepare)
+ pm_power_off_prepare();
+- disable_nonboot_cpus();
++ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ printk(KERN_EMERG "Power down.\n");
+ kmsg_dump(KMSG_DUMP_POWEROFF);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 24b3759..226776b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -929,6 +929,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+
+ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
++static loff_t
++ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
++{
++ loff_t ret;
++
++ if (file->f_mode & FMODE_READ)
++ ret = seq_lseek(file, offset, whence);
++ else
++ file->f_pos = ret = 1;
++
++ return ret;
++}
++
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+ #ifndef CONFIG_FTRACE_MCOUNT_RECORD
+@@ -2315,19 +2328,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
+ inode, file);
+ }
+
+-static loff_t
+-ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
+-{
+- loff_t ret;
+-
+- if (file->f_mode & FMODE_READ)
+- ret = seq_lseek(file, offset, origin);
+- else
+- file->f_pos = ret = 1;
+-
+- return ret;
+-}
+-
+ static int ftrace_match(char *str, char *regex, int len, int type)
+ {
+ int matched = 0;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 70b4733..2dcd716 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2751,7 +2751,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (ptep) {
+ entry = huge_ptep_get(ptep);
+ if (unlikely(is_hugetlb_entry_migration(entry))) {
+- migration_entry_wait(mm, (pmd_t *)ptep, address);
++ migration_entry_wait_huge(mm, ptep);
+ return 0;
+ } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
+ return VM_FAULT_HWPOISON_LARGE |
+diff --git a/mm/migrate.c b/mm/migrate.c
+index e1052d1..09d6a9d 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -184,15 +184,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
+ *
+ * This function is called from do_swap_page().
+ */
+-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+- unsigned long address)
++static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++ spinlock_t *ptl)
+ {
+- pte_t *ptep, pte;
+- spinlock_t *ptl;
++ pte_t pte;
+ swp_entry_t entry;
+ struct page *page;
+
+- ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
++ spin_lock(ptl);
+ pte = *ptep;
+ if (!is_swap_pte(pte))
+ goto out;
+@@ -220,6 +219,20 @@ out:
+ pte_unmap_unlock(ptep, ptl);
+ }
+
++void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long address)
++{
++ spinlock_t *ptl = pte_lockptr(mm, pmd);
++ pte_t *ptep = pte_offset_map(pmd, address);
++ __migration_entry_wait(mm, ptep, ptl);
++}
++
++void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
++{
++ spinlock_t *ptl = &(mm)->page_table_lock;
++ __migration_entry_wait(mm, pte, ptl);
++}
++
+ #ifdef CONFIG_BLOCK
+ /* Returns true if all buffers are successfully locked */
+ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index 7704d9c..7b3dadd 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -314,8 +314,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ * Swap entry may have been freed since our caller observed it.
+ */
+ err = swapcache_prepare(entry);
+- if (err == -EEXIST) { /* seems racy */
++ if (err == -EEXIST) {
+ radix_tree_preload_end();
++ /*
++ * We might race against get_swap_page() and stumble
++ * across a SWAP_HAS_CACHE swap_map entry whose page
++ * has not been brought into the swapcache yet, while
++ * the other end is scheduled away waiting on discard
++ * I/O completion at scan_swap_map().
++ *
++ * In order to avoid turning this transitory state
++ * into a permanent loop around this -EEXIST case
++ * if !CONFIG_PREEMPT and the I/O completion happens
++ * to be waiting on the CPU waitqueue where we are now
++ * busy looping, we just conditionally invoke the
++ * scheduler here, if there are some more important
++ * tasks to run.
++ */
++ cond_resched();
+ continue;
+ }
+ if (err) { /* swp entry is obsolete ? */
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 04175d9..a0b6c50 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2297,10 +2297,15 @@ done:
+ }
+ }
+
+-static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_command_rej(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
+
++ if (cmd_len < sizeof(*rej))
++ return -EPROTO;
++
+ if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
+ return 0;
+
+@@ -2317,7 +2322,8 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ return 0;
+ }
+
+-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static int l2cap_connect_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+ {
+ struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
+ struct l2cap_conn_rsp rsp;
+@@ -2325,8 +2331,14 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ struct sock *parent, *sk = NULL;
+ int result, status = L2CAP_CS_NO_INFO;
+
+- u16 dcid = 0, scid = __le16_to_cpu(req->scid);
+- __le16 psm = req->psm;
++ u16 dcid = 0, scid;
++ __le16 psm;
++
++ if (cmd_len < sizeof(struct l2cap_conn_req))
++ return -EPROTO;
++
++ scid = __le16_to_cpu(req->scid);
++ psm = req->psm;
+
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+
+@@ -2451,7 +2463,9 @@ sendresp:
+ return 0;
+ }
+
+-static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static int l2cap_connect_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
+ u16 scid, dcid, result, status;
+@@ -2459,6 +2473,9 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ struct sock *sk;
+ u8 req[128];
+
++ if (cmd_len < sizeof(*rsp))
++ return -EPROTO;
++
+ scid = __le16_to_cpu(rsp->scid);
+ dcid = __le16_to_cpu(rsp->dcid);
+ result = __le16_to_cpu(rsp->result);
+@@ -2534,6 +2551,9 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
+ struct sock *sk;
+ int len;
+
++ if (cmd_len < sizeof(*req))
++ return -EPROTO;
++
+ dcid = __le16_to_cpu(req->dcid);
+ flags = __le16_to_cpu(req->flags);
+
+@@ -2559,7 +2579,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
+
+ /* Reject if config buffer is too small. */
+ len = cmd_len - sizeof(*req);
+- if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
++ if (chan->conf_len + len > sizeof(chan->conf_req)) {
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_REJECT, flags), rsp);
+@@ -2621,13 +2641,18 @@ unlock:
+ return 0;
+ }
+
+-static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_config_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
+ u16 scid, flags, result;
+ struct l2cap_chan *chan;
+ struct sock *sk;
+- int len = cmd->len - sizeof(*rsp);
++ int len = cmd_len - sizeof(*rsp);
++
++ if (cmd_len < sizeof(*rsp))
++ return -EPROTO;
+
+ scid = __le16_to_cpu(rsp->scid);
+ flags = __le16_to_cpu(rsp->flags);
+@@ -2703,7 +2728,9 @@ done:
+ return 0;
+ }
+
+-static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
+ struct l2cap_disconn_rsp rsp;
+@@ -2711,6 +2738,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
+ struct l2cap_chan *chan;
+ struct sock *sk;
+
++ if (cmd_len != sizeof(*req))
++ return -EPROTO;
++
+ scid = __le16_to_cpu(req->scid);
+ dcid = __le16_to_cpu(req->dcid);
+
+@@ -2744,13 +2774,18 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
+ return 0;
+ }
+
+-static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
+ u16 dcid, scid;
+ struct l2cap_chan *chan;
+ struct sock *sk;
+
++ if (cmd_len != sizeof(*rsp))
++ return -EPROTO;
++
+ scid = __le16_to_cpu(rsp->scid);
+ dcid = __le16_to_cpu(rsp->dcid);
+
+@@ -2778,11 +2813,16 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
+ return 0;
+ }
+
+-static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_information_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_info_req *req = (struct l2cap_info_req *) data;
+ u16 type;
+
++ if (cmd_len != sizeof(*req))
++ return -EPROTO;
++
+ type = __le16_to_cpu(req->type);
+
+ BT_DBG("type 0x%4.4x", type);
+@@ -2818,11 +2858,16 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
+ return 0;
+ }
+
+-static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_information_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+ u16 type, result;
+
++ if (cmd_len != sizeof(*rsp))
++ return -EPROTO;
++
+ type = __le16_to_cpu(rsp->type);
+ result = __le16_to_cpu(rsp->result);
+
+@@ -2941,15 +2986,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+- l2cap_command_rej(conn, cmd, data);
++ l2cap_command_rej(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_REQ:
+- err = l2cap_connect_req(conn, cmd, data);
++ err = l2cap_connect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_RSP:
+- err = l2cap_connect_rsp(conn, cmd, data);
++ err = l2cap_connect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONF_REQ:
+@@ -2957,15 +3002,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ break;
+
+ case L2CAP_CONF_RSP:
+- err = l2cap_config_rsp(conn, cmd, data);
++ err = l2cap_config_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_REQ:
+- err = l2cap_disconnect_req(conn, cmd, data);
++ err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_RSP:
+- err = l2cap_disconnect_rsp(conn, cmd, data);
++ err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_ECHO_REQ:
+@@ -2976,11 +3021,11 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ break;
+
+ case L2CAP_INFO_REQ:
+- err = l2cap_information_req(conn, cmd, data);
++ err = l2cap_information_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_INFO_RSP:
+- err = l2cap_information_rsp(conn, cmd, data);
++ err = l2cap_information_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ default:
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index f4ddf34..8260cd5 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1242,6 +1242,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
+
+ ASSERT_RTNL();
+
++ /*
++ * Close all AP_VLAN interfaces first, as otherwise they
++ * might be closed while the AP interface they belong to
++ * is closed, causing unregister_netdevice_many() to crash.
++ */
++ list_for_each_entry(sdata, &local->interfaces, list)
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ dev_close(sdata->dev);
++
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
+ list_del(&sdata->list);
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 0acfdc9..c1c6e6d 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -220,6 +220,9 @@ void cfg80211_conn_work(struct work_struct *work)
+ mutex_lock(&rdev->devlist_mtx);
+
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
++ if (!wdev->netdev)
++ continue;
++
+ wdev_lock(wdev);
+ if (!netif_running(wdev->netdev)) {
+ wdev_unlock(wdev);
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 2b7559c..0a7ca6c 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -1,6 +1,7 @@
+ #ifndef __USBAUDIO_CARD_H
+ #define __USBAUDIO_CARD_H
+
++#define MAX_NR_RATES 1024
+ #define MAX_PACKS 20
+ #define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */
+ #define MAX_URBS 8
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 89421d1..ddfef57 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -226,7 +226,7 @@ static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets,
+ int min = combine_quad(&data[2 + 12 * i]);
+ int max = combine_quad(&data[6 + 12 * i]);
+ int res = combine_quad(&data[10 + 12 * i]);
+- int rate;
++ unsigned int rate;
+
+ if ((max < 0) || (min < 0) || (res < 0) || (max < min))
+ continue;
+@@ -253,6 +253,10 @@ static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets,
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+
+ nr_rates++;
++ if (nr_rates >= MAX_NR_RATES) {
++ snd_printk(KERN_ERR "invalid uac2 rates\n");
++ break;
++ }
+
+ /* avoid endless loop */
+ if (res == 0)
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index f4540bf..97ec155 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -822,6 +822,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ case USB_ID(0x046d, 0x0808):
+ case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
++ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0991):
+ /* Most audio usb devices lie about volume resolution.
+ * Most Logitech webcams have res = 384.
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 4e25148..e467a58 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -157,7 +157,13 @@
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
+ },
+ {
+- USB_DEVICE(0x046d, 0x0990),
++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++ USB_DEVICE_ID_MATCH_INT_CLASS |
++ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++ .idVendor = 0x046d,
++ .idProduct = 0x0990,
++ .bInterfaceClass = USB_CLASS_AUDIO,
++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Logitech, Inc.",
+ .product_name = "QuickCam Pro 9000",
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 42eeee8..9c82f8b 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -132,10 +132,14 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ unsigned *rate_table = NULL;
+
+ fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
+- if (! fp) {
++ if (!fp) {
+ snd_printk(KERN_ERR "cannot memdup\n");
+ return -ENOMEM;
+ }
++ if (fp->nr_rates > MAX_NR_RATES) {
++ kfree(fp);
++ return -EINVAL;
++ }
+ if (fp->nr_rates > 0) {
+ rate_table = kmemdup(fp->rate_table,
+ sizeof(int) * fp->nr_rates, GFP_KERNEL);
diff --git a/3.2.54/1047_linux-3.2.48.patch b/3.2.54/1047_linux-3.2.48.patch
new file mode 100644
index 0000000..6d55b1f
--- /dev/null
+++ b/3.2.54/1047_linux-3.2.48.patch
@@ -0,0 +1,952 @@
+diff --git a/Makefile b/Makefile
+index 40e2a11..299e2eb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 1252a26..1397408 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -301,9 +301,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
+ }
+
+ #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+-static inline void flush_kernel_dcache_page(struct page *page)
+-{
+-}
++extern void flush_kernel_dcache_page(struct page *);
+
+ #define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index 8fda9f7..fe61cab 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -304,6 +304,39 @@ void flush_dcache_page(struct page *page)
+ EXPORT_SYMBOL(flush_dcache_page);
+
+ /*
++ * Ensure cache coherency for the kernel mapping of this page. We can
++ * assume that the page is pinned via kmap.
++ *
++ * If the page only exists in the page cache and there are no user
++ * space mappings, this is a no-op since the page was already marked
++ * dirty at creation. Otherwise, we need to flush the dirty kernel
++ * cache lines directly.
++ */
++void flush_kernel_dcache_page(struct page *page)
++{
++ if (cache_is_vivt() || cache_is_vipt_aliasing()) {
++ struct address_space *mapping;
++
++ mapping = page_mapping(page);
++
++ if (!mapping || mapping_mapped(mapping)) {
++ void *addr;
++
++ addr = page_address(page);
++ /*
++ * kmap_atomic() doesn't set the page virtual
++ * address for highmem pages, and
++ * kunmap_atomic() takes care of cache
++ * flushing already.
++ */
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
++ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
++ }
++ }
++}
++EXPORT_SYMBOL(flush_kernel_dcache_page);
++
++/*
+ * Flush an anonymous page so that users of get_user_pages()
+ * can safely access the data. The expected sequence is:
+ *
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index 941a98c..a5018fb 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -53,6 +53,12 @@ void flush_dcache_page(struct page *page)
+ }
+ EXPORT_SYMBOL(flush_dcache_page);
+
++void flush_kernel_dcache_page(struct page *page)
++{
++ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
++}
++EXPORT_SYMBOL(flush_kernel_dcache_page);
++
+ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
+index 2a81d32..e51e5cd 100644
+--- a/arch/tile/lib/exports.c
++++ b/arch/tile/lib/exports.c
+@@ -90,4 +90,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
+ EXPORT_SYMBOL(__ashrdi3);
+ uint64_t __ashldi3(uint64_t, unsigned int);
+ EXPORT_SYMBOL(__ashldi3);
++int __ffsdi2(uint64_t);
++EXPORT_SYMBOL(__ffsdi2);
+ #endif
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9a42703..fb2e69d 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2120,6 +2120,7 @@ source "fs/Kconfig.binfmt"
+ config IA32_EMULATION
+ bool "IA32 Emulation"
+ depends on X86_64
++ select BINFMT_ELF
+ select COMPAT_BINFMT_ELF
+ ---help---
+ Include code to run 32-bit programs under a 64-bit kernel. You should
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e82a53a..57867e4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -551,8 +551,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ if (index != XCR_XFEATURE_ENABLED_MASK)
+ return 1;
+ xcr0 = xcr;
+- if (kvm_x86_ops->get_cpl(vcpu) != 0)
+- return 1;
+ if (!(xcr0 & XSTATE_FP))
+ return 1;
+ if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
+@@ -566,7 +564,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+
+ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ {
+- if (__kvm_set_xcr(vcpu, index, xcr)) {
++ if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
++ __kvm_set_xcr(vcpu, index, xcr)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 07ef7e8..f9537e3 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -49,6 +49,13 @@
+ #define EFI_DEBUG 1
+ #define PFX "EFI: "
+
++#define EFI_MIN_RESERVE 5120
++
++#define EFI_DUMMY_GUID \
++ EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
++
++static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
++
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+@@ -787,6 +794,13 @@ void __init efi_enter_virtual_mode(void)
+ early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
+ memmap.map = NULL;
+ kfree(new_memmap);
++
++ /* clean DUMMY object */
++ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
++ EFI_VARIABLE_NON_VOLATILE |
++ EFI_VARIABLE_BOOTSERVICE_ACCESS |
++ EFI_VARIABLE_RUNTIME_ACCESS,
++ 0, NULL);
+ }
+
+ /*
+@@ -838,22 +852,70 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+ efi_status_t status;
+ u64 storage_size, remaining_size, max_size;
+
++ if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
++ return 0;
++
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+- if (!max_size && remaining_size > size)
+- printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
+- " is returning MaxVariableSize=0\n");
++ /*
++ * Some firmware implementations refuse to boot if there's insufficient
++ * space in the variable store. We account for that by refusing the
++ * write if permitting it would reduce the available space to under
++ * 5KB. This figure was provided by Samsung, so should be safe.
++ */
++ if ((remaining_size - size < EFI_MIN_RESERVE) &&
++ !efi_no_storage_paranoia) {
++
++ /*
++ * Triggering garbage collection may require that the firmware
++ * generate a real EFI_OUT_OF_RESOURCES error. We can force
++ * that by attempting to use more space than is available.
++ */
++ unsigned long dummy_size = remaining_size + 1024;
++ void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
++
++ if (!dummy)
++ return EFI_OUT_OF_RESOURCES;
+
+- if (!storage_size || size > remaining_size ||
+- (max_size && size > max_size))
+- return EFI_OUT_OF_RESOURCES;
++ status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
++ EFI_VARIABLE_NON_VOLATILE |
++ EFI_VARIABLE_BOOTSERVICE_ACCESS |
++ EFI_VARIABLE_RUNTIME_ACCESS,
++ dummy_size, dummy);
+
+- if (!efi_no_storage_paranoia &&
+- (remaining_size - size) < (storage_size / 2))
+- return EFI_OUT_OF_RESOURCES;
++ if (status == EFI_SUCCESS) {
++ /*
++ * This should have failed, so if it didn't make sure
++ * that we delete it...
++ */
++ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
++ EFI_VARIABLE_NON_VOLATILE |
++ EFI_VARIABLE_BOOTSERVICE_ACCESS |
++ EFI_VARIABLE_RUNTIME_ACCESS,
++ 0, dummy);
++ }
++
++ kfree(dummy);
++
++ /*
++ * The runtime code may now have triggered a garbage collection
++ * run, so check the variable info again
++ */
++ status = efi.query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++
++ if (status != EFI_SUCCESS)
++ return status;
++
++ /*
++ * There still isn't enough room, so return an error
++ */
++ if (remaining_size - size < EFI_MIN_RESERVE)
++ return EFI_OUT_OF_RESOURCES;
++ }
+
+ return EFI_SUCCESS;
+ }
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 166cb36..c5f7b2c 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -343,6 +343,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
++ revalidate_disk(vblk->disk);
+ done:
+ mutex_unlock(&vblk->config_lock);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 4fddd21..38a7793 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -408,11 +408,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, GFX_MODE_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+- /* Required for the hardware to program scanline values for waiting */
+- if (INTEL_INFO(dev)->gen == 6)
+- I915_WRITE(GFX_MODE,
+- GFX_MODE_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+-
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index 69c3adf..c2ab21c 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -520,6 +520,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
+ return 0;
+
+ no_clock:
++ iounmap(etsects->regs);
+ no_ioremap:
+ release_resource(etsects->rsrc);
+ no_resource:
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index f698183..ed7a5a6 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5524,7 +5524,20 @@ err_out:
+ return -EIO;
+ }
+
+-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
++static bool rtl_skb_pad(struct sk_buff *skb)
++{
++ if (skb_padto(skb, ETH_ZLEN))
++ return false;
++ skb_put(skb, ETH_ZLEN - skb->len);
++ return true;
++}
++
++static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
++{
++ return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
++}
++
++static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
+ {
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+@@ -5537,13 +5550,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+
++ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++ return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
++
+ if (ip->protocol == IPPROTO_TCP)
+ opts[offset] |= info->checksum.tcp;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
++ } else {
++ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++ return rtl_skb_pad(skb);
+ }
++ return true;
+ }
+
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+@@ -5575,6 +5595,12 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
++ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
++ opts[0] = DescOwn;
++
++ if (!rtl8169_tso_csum(tp, skb, opts))
++ goto err_update_stats;
++
+ len = skb_headlen(skb);
+ mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+@@ -5586,11 +5612,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
+
+- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+- opts[0] = DescOwn;
+-
+- rtl8169_tso_csum(tp, skb, opts);
+-
+ frags = rtl8169_xmit_frags(tp, skb, opts);
+ if (frags < 0)
+ goto err_dma_1;
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 4b805be..9d3b39e 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -178,7 +178,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+ };
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
+index b353e7e..4a2423e 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.h
++++ b/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -52,7 +52,9 @@
+
+ /* Abbott Diabetics vendor and product ids */
+ #define ABBOTT_VENDOR_ID 0x1a61
+-#define ABBOTT_PRODUCT_ID 0x3410
++#define ABBOTT_STEREO_PLUG_ID 0x3410
++#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
++#define ABBOTT_STRIP_PORT_ID 0x3420
+
+ /* Commands */
+ #define TI_GET_VERSION 0x01
+diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
+index 9c51f62..844bd64 100644
+--- a/fs/ncpfs/dir.c
++++ b/fs/ncpfs/dir.c
+@@ -1033,15 +1033,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
+ DPRINTK("ncp_rmdir: removing %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+- /*
+- * fail with EBUSY if there are still references to this
+- * directory.
+- */
+- dentry_unhash(dentry);
+- error = -EBUSY;
+- if (!d_unhashed(dentry))
+- goto out;
+-
+ len = sizeof(__name);
+ error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+ dentry->d_name.len, !ncp_preserve_case(dir));
+diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
+index 2ae1371..1c33dd7 100644
+--- a/include/linux/rculist_nulls.h
++++ b/include/linux/rculist_nulls.h
+@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ *
++ * The barrier() is needed to make sure compiler doesn't cache first element [1],
++ * as this loop can be restarted [2]
++ * [1] Documentation/atomic_ops.txt around line 114
++ * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ */
+ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+- for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
++ for (({barrier();}), \
++ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 2acd2e2..7e9f2d3 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -336,6 +336,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+
+ struct timespec;
+
++/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
++extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
++extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ unsigned int flags, struct timespec *timeout);
+ extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
+diff --git a/net/compat.c b/net/compat.c
+index 6def90e..8c979cc 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -733,19 +733,25 @@ static unsigned char nas[21] = {
+
+ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
+ {
+- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags)
+ {
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
+ return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+ {
+- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+@@ -767,6 +773,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ int datagrams;
+ struct timespec ktspec;
+
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++
+ if (timeout == NULL)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, NULL);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index d55110e..5f28fab 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -716,6 +716,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
+ tiph = &tunnel->parms.iph;
+ }
+
++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ if ((dst = tiph->daddr) == 0) {
+ /* NBMA tunnel */
+
+@@ -851,7 +852,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
+ skb_reset_transport_header(skb);
+ skb_push(skb, gre_hlen);
+ skb_reset_network_header(skb);
+- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 17ad951..5dc5137 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -448,6 +448,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (tos & 1)
+ tos = old_iph->tos;
+
++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ if (!dst) {
+ /* NBMA tunnel */
+ if ((rt = skb_rtable(skb)) == NULL) {
+@@ -531,7 +532,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb->transport_header = skb->network_header;
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index fe381c2..ec8b4b7e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3037,8 +3037,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
+
+ for (i = 0; i < shi->nr_frags; ++i) {
+ const struct skb_frag_struct *f = &shi->frags[i];
+- struct page *page = skb_frag_page(f);
+- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
++ unsigned int offset = f->page_offset;
++ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
++
++ sg_set_page(&sg, page, skb_frag_size(f),
++ offset_in_page(offset));
+ if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
+ return 1;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 5c1807c..3add486 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -835,11 +835,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ &md5);
+ tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
+
+- if (tcp_packets_in_flight(tp) == 0) {
++ if (tcp_packets_in_flight(tp) == 0)
+ tcp_ca_event(sk, CA_EVENT_TX_START);
+- skb->ooo_okay = 1;
+- } else
+- skb->ooo_okay = 0;
++
++ /* if no packet is in qdisc/device queue, then allow XPS to select
++ * another queue.
++ */
++ skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
+
+ skb_push(skb, tcp_header_size);
+ skb_reset_transport_header(skb);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index d84033b..d603caa 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2437,8 +2437,10 @@ static void init_loopback(struct net_device *dev)
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+ /* Failure cases are ignored */
+- if (!IS_ERR(sp_rt))
++ if (!IS_ERR(sp_rt)) {
++ sp_ifa->rt = sp_rt;
+ ip6_ins_rt(sp_rt);
++ }
+ }
+ read_unlock_bh(&idev->lock);
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 3ccd9b2..6aadaa8 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1233,7 +1233,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ if (WARN_ON(np->cork.opt))
+ return -EINVAL;
+
+- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
++ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
+ if (unlikely(np->cork.opt == NULL))
+ return -ENOBUFS;
+
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 6f60175..74410e6 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -350,19 +350,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
++ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
++ total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error_put_sess_tun;
+ }
+- skb_put(skb, total_len);
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(ps->tunnel_sock);
+ sock_put(sk);
+
+- return error;
++ return total_len;
+
+ error_put_sess_tun:
+ sock_put(ps->tunnel_sock);
+diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
+index e5330ed..bf99567 100644
+--- a/net/netlabel/netlabel_domainhash.c
++++ b/net/netlabel/netlabel_domainhash.c
+@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
+ }
+ }
+
++/**
++ * netlbl_domhsh_validate - Validate a new domain mapping entry
++ * @entry: the entry to validate
++ *
++ * This function validates the new domain mapping entry to ensure that it is
++ * a valid entry. Returns zero on success, negative values on failure.
++ *
++ */
++static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
++{
++ struct netlbl_af4list *iter4;
++ struct netlbl_domaddr4_map *map4;
++#if IS_ENABLED(CONFIG_IPV6)
++ struct netlbl_af6list *iter6;
++ struct netlbl_domaddr6_map *map6;
++#endif /* IPv6 */
++
++ if (entry == NULL)
++ return -EINVAL;
++
++ switch (entry->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ if (entry->type_def.cipsov4 != NULL ||
++ entry->type_def.addrsel != NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_CIPSOV4:
++ if (entry->type_def.cipsov4 == NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_ADDRSELECT:
++ netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
++ map4 = netlbl_domhsh_addr4_entry(iter4);
++ switch (map4->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ if (map4->type_def.cipsov4 != NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_CIPSOV4:
++ if (map4->type_def.cipsov4 == NULL)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++#if IS_ENABLED(CONFIG_IPV6)
++ netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
++ map6 = netlbl_domhsh_addr6_entry(iter6);
++ switch (map6->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++#endif /* IPv6 */
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ /*
+ * Domain Hash Table Functions
+ */
+@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
+ struct netlbl_af6list *tmp6;
+ #endif /* IPv6 */
+
++ ret_val = netlbl_domhsh_validate(entry);
++ if (ret_val != 0)
++ return ret_val;
++
+ /* XXX - we can remove this RCU read lock as the spinlock protects the
+ * entire function, but before we do we need to fixup the
+ * netlbl_af[4,6]list RCU functions to do "the right thing" with
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 5a70215..a2ac2c3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2820,12 +2820,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
+ return -EOPNOTSUPP;
+
+ uaddr->sa_family = AF_PACKET;
++ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ if (dev)
+- strncpy(uaddr->sa_data, dev->name, 14);
+- else
+- memset(uaddr->sa_data, 0, 14);
++ strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ rcu_read_unlock();
+ *uaddr_len = sizeof(*uaddr);
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 5e0d86e..ba0108f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3929,6 +3929,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
+
+ /* Release our hold on the endpoint. */
+ sp = sctp_sk(sk);
++ /* This could happen during socket init, thus we bail out
++ * early, since the rest of the below is not setup either.
++ */
++ if (sp->ep == NULL)
++ return;
++
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
+diff --git a/net/socket.c b/net/socket.c
+index 68879db..cf546a3 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1876,9 +1876,9 @@ struct used_address {
+ unsigned int name_len;
+ };
+
+-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+- struct msghdr *msg_sys, unsigned flags,
+- struct used_address *used_address)
++static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
++ struct msghdr *msg_sys, unsigned flags,
++ struct used_address *used_address)
+ {
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+@@ -1998,22 +1998,30 @@ out:
+ * BSD sendmsg interface
+ */
+
+-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
++long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+ {
+ int fput_needed, err;
+ struct msghdr msg_sys;
+- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ struct socket *sock;
+
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+- err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
++ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+
+ fput_light(sock->file, fput_needed);
+ out:
+ return err;
+ }
+
++SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
++{
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_sendmsg(fd, msg, flags);
++}
++
+ /*
+ * Linux sendmmsg interface
+ */
+@@ -2044,15 +2052,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+
+ while (datagrams < vlen) {
+ if (MSG_CMSG_COMPAT & flags) {
+- err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags, &used_address);
++ err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
++ &msg_sys, flags, &used_address);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+- err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags, &used_address);
++ err = ___sys_sendmsg(sock,
++ (struct msghdr __user *)entry,
++ &msg_sys, flags, &used_address);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+@@ -2076,11 +2085,13 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags)
+ {
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
+ return __sys_sendmmsg(fd, mmsg, vlen, flags);
+ }
+
+-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+- struct msghdr *msg_sys, unsigned flags, int nosec)
++static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
++ struct msghdr *msg_sys, unsigned flags, int nosec)
+ {
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+@@ -2177,23 +2188,31 @@ out:
+ * BSD recvmsg interface
+ */
+
+-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+- unsigned int, flags)
++long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
+ {
+ int fput_needed, err;
+ struct msghdr msg_sys;
+- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ struct socket *sock;
+
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+- err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
++ err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+
+ fput_light(sock->file, fput_needed);
+ out:
+ return err;
+ }
+
++SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
++ unsigned int, flags)
++{
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_recvmsg(fd, msg, flags);
++}
++
+ /*
+ * Linux recvmmsg interface
+ */
+@@ -2231,17 +2250,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ * No need to ask LSM for more than the first datagram.
+ */
+ if (MSG_CMSG_COMPAT & flags) {
+- err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags & ~MSG_WAITFORONE,
+- datagrams);
++ err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags & ~MSG_WAITFORONE,
+- datagrams);
++ err = ___sys_recvmsg(sock,
++ (struct msghdr __user *)entry,
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+@@ -2308,6 +2328,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
+ int datagrams;
+ struct timespec timeout_sys;
+
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++
+ if (!timeout)
+ return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
+
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index acb7fac..3b79a4a 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -149,14 +149,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ return -EINVAL;
+ }
+
++ alts = &iface->altsetting[0];
++ altsd = get_iface_desc(alts);
++
++ /*
++ * Android with both accessory and audio interfaces enabled gets the
++ * interface numbers wrong.
++ */
++ if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
++ chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
++ interface == 0 &&
++ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
++ altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
++ interface = 2;
++ iface = usb_ifnum_to_if(dev, interface);
++ if (!iface)
++ return -EINVAL;
++ alts = &iface->altsetting[0];
++ altsd = get_iface_desc(alts);
++ }
++
+ if (usb_interface_claimed(iface)) {
+ snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
+ dev->devnum, ctrlif, interface);
+ return -EINVAL;
+ }
+
+- alts = &iface->altsetting[0];
+- altsd = get_iface_desc(alts);
+ if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
+ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
+ altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 97ec155..aeb26eb 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -821,6 +821,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+
+ case USB_ID(0x046d, 0x0808):
+ case USB_ID(0x046d, 0x0809):
++ case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0991):
diff --git a/3.2.54/1048_linux-3.2.49.patch b/3.2.54/1048_linux-3.2.49.patch
new file mode 100644
index 0000000..2dab0cf
--- /dev/null
+++ b/3.2.54/1048_linux-3.2.49.patch
@@ -0,0 +1,2970 @@
+diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
+index 475bb4a..65da157 100644
+--- a/Documentation/i2c/busses/i2c-piix4
++++ b/Documentation/i2c/busses/i2c-piix4
+@@ -8,7 +8,7 @@ Supported adapters:
+ Datasheet: Only available via NDA from ServerWorks
+ * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
+ Datasheet: Not publicly available
+- * AMD Hudson-2
++ * AMD Hudson-2, CZ
+ Datasheet: Not publicly available
+ * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
+ Datasheet: Publicly available at the SMSC website http://www.smsc.com
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 83f156e..8659eba 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -159,7 +159,7 @@ S: Maintained
+ F: drivers/net/ethernet/realtek/r8169.c
+
+ 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-serial@vger.kernel.org
+ W: http://serial.sourceforge.net
+ S: Maintained
+@@ -1781,9 +1781,9 @@ X: net/wireless/wext*
+
+ CHAR and MISC DRIVERS
+ M: Arnd Bergmann <arnd@arndb.de>
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+-S: Maintained
++S: Supported
+ F: drivers/char/*
+ F: drivers/misc/*
+
+@@ -2315,7 +2315,7 @@ F: lib/lru_cache.c
+ F: Documentation/blockdev/drbd/
+
+ DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
+ S: Supported
+ F: Documentation/kobject.txt
+@@ -6257,15 +6257,16 @@ S: Maintained
+ F: arch/alpha/kernel/srm_env.c
+
+ STABLE BRANCH
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: stable@vger.kernel.org
+-S: Maintained
++S: Supported
++F: Documentation/stable_kernel_rules.txt
+
+ STAGING SUBSYSTEM
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+ L: devel@driverdev.osuosl.org
+-S: Maintained
++S: Supported
+ F: drivers/staging/
+
+ STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS
+@@ -6654,8 +6655,8 @@ S: Maintained
+ K: ^Subject:.*(?i)trivial
+
+ TTY LAYER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
+-S: Maintained
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++S: Supported
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
+ F: drivers/tty/*
+ F: drivers/tty/serial/serial_core.c
+@@ -6943,7 +6944,7 @@ S: Maintained
+ F: drivers/usb/serial/digi_acceleport.c
+
+ USB SERIAL DRIVER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+ S: Supported
+ F: Documentation/usb/usb-serial.txt
+@@ -6958,9 +6959,8 @@ S: Maintained
+ F: drivers/usb/serial/empeg.c
+
+ USB SERIAL KEYSPAN DRIVER
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+-W: http://www.kroah.com/linux/
+ S: Maintained
+ F: drivers/usb/serial/*keyspan*
+
+@@ -6988,7 +6988,7 @@ F: Documentation/video4linux/sn9c102.txt
+ F: drivers/media/video/sn9c102/
+
+ USB SUBSYSTEM
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+ W: http://www.linux-usb.org
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
+@@ -7075,7 +7075,7 @@ F: fs/hppfs/
+
+ USERSPACE I/O (UIO)
+ M: "Hans J. Koch" <hjk@hansjkoch.de>
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ S: Maintained
+ F: Documentation/DocBook/uio-howto.tmpl
+ F: drivers/uio/
+diff --git a/Makefile b/Makefile
+index 299e2eb..2e3d791 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index a559ee7..778d248 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -795,6 +795,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ struct frame_tail __user *tail;
+
+
++ perf_callchain_store(entry, regs->ARM_pc);
+ tail = (struct frame_tail __user *)regs->ARM_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index fb9bb46..2c8890a 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -74,7 +74,7 @@
+ #endif
+
+ int boot_cpuid = 0;
+-int __initdata spinning_secondaries;
++int spinning_secondaries;
+ u64 ppc64_pft_size;
+
+ /* Pick defaults since we might want to patch instructions
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 054cc01..d50a821 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+ /* snapshots of runstate info */
+ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
+
+-/* unused ns of stolen and blocked time */
++/* unused ns of stolen time */
+ static DEFINE_PER_CPU(u64, xen_residual_stolen);
+-static DEFINE_PER_CPU(u64, xen_residual_blocked);
+
+ /* return an consistent snapshot of 64-bit time/counter value */
+ static u64 get64(const u64 *p)
+@@ -115,7 +114,7 @@ static void do_stolen_accounting(void)
+ {
+ struct vcpu_runstate_info state;
+ struct vcpu_runstate_info *snap;
+- s64 blocked, runnable, offline, stolen;
++ s64 runnable, offline, stolen;
+ cputime_t ticks;
+
+ get_runstate_snapshot(&state);
+@@ -125,7 +124,6 @@ static void do_stolen_accounting(void)
+ snap = &__get_cpu_var(xen_runstate_snapshot);
+
+ /* work out how much time the VCPU has not been runn*ing* */
+- blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
+ runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
+ offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
+
+@@ -141,17 +139,6 @@ static void do_stolen_accounting(void)
+ ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
+ __this_cpu_write(xen_residual_stolen, stolen);
+ account_steal_ticks(ticks);
+-
+- /* Add the appropriate number of ticks of blocked time,
+- including any left-overs from last time. */
+- blocked += __this_cpu_read(xen_residual_blocked);
+-
+- if (blocked < 0)
+- blocked = 0;
+-
+- ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
+- __this_cpu_write(xen_residual_blocked, blocked);
+- account_idle_ticks(ticks);
+ }
+
+ /* Get the TSC speed from Xen */
+diff --git a/block/genhd.c b/block/genhd.c
+index 6edf228..8bd4ef2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -519,7 +519,7 @@ void register_disk(struct gendisk *disk)
+
+ ddev->parent = disk->driverfs_dev;
+
+- dev_set_name(ddev, disk->disk_name);
++ dev_set_name(ddev, "%s", disk->disk_name);
+
+ /* delay uevents, until we scanned partition table */
+ dev_set_uevent_suppress(ddev, 1);
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 54dd4e3..dc9991f 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -477,7 +477,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
+
+ struct crypto_template *crypto_lookup_template(const char *name)
+ {
+- return try_then_request_module(__crypto_lookup_template(name), name);
++ return try_then_request_module(__crypto_lookup_template(name), "%s",
++ name);
+ }
+ EXPORT_SYMBOL_GPL(crypto_lookup_template);
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 87acc23..0445f52 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -302,6 +302,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -318,6 +319,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+
+ /* AMD */
+ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
++ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+ /* AMD is using RAID class only for ahci controllers */
+ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 0e92326..7a949af 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -360,6 +360,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (BayTrail) */
+ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
++ /* SATA Controller IDE (Coleto Creek) */
++ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
+ { } /* terminate list */
+ };
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 3c92dbd..60def03 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1541,8 +1541,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+- ata_link_online(&ap->pmp_link[pmp])) {
++ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 21b80c5..f63a588 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -389,9 +389,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ /* link reports offline after LPM */
+ link->flags |= ATA_LFLAG_NO_LPM;
+
+- /* Class code report is unreliable. */
++ /*
++ * Class code report is unreliable and SRST times
++ * out under certain configurations.
++ */
+ if (link->pmp < 5)
+- link->flags |= ATA_LFLAG_ASSUME_ATA;
++ link->flags |= ATA_LFLAG_NO_SRST |
++ ATA_LFLAG_ASSUME_ATA;
+
+ /* port 5 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 5)
+@@ -399,20 +403,17 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ ATA_LFLAG_ASSUME_SEMB;
+ }
+ } else if (vendor == 0x1095 && devid == 0x4723) {
+- /* sil4723 quirks */
+- ata_for_each_link(link, ap, EDGE) {
+- /* link reports offline after LPM */
+- link->flags |= ATA_LFLAG_NO_LPM;
+-
+- /* class code report is unreliable */
+- if (link->pmp < 2)
+- link->flags |= ATA_LFLAG_ASSUME_ATA;
+-
+- /* the config device at port 2 locks up on SRST */
+- if (link->pmp == 2)
+- link->flags |= ATA_LFLAG_NO_SRST |
+- ATA_LFLAG_ASSUME_ATA;
+- }
++ /*
++ * sil4723 quirks
++ *
++ * Link reports offline after LPM. Class code report is
++ * unreliable. SIMG PMPs never got SRST reliable and the
++ * config device at port 2 locks up on SRST.
++ */
++ ata_for_each_link(link, ap, EDGE)
++ link->flags |= ATA_LFLAG_NO_LPM |
++ ATA_LFLAG_NO_SRST |
++ ATA_LFLAG_ASSUME_ATA;
+ } else if (vendor == 0x1095 && devid == 0x4726) {
+ /* sil4726 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 40a0fcb..5fb6885 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -598,8 +598,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ if (!lo->sock)
+ return -EINVAL;
+
++ lo->disconnect = 1;
++
+ nbd_send_req(lo, &sreq);
+- return 0;
++ return 0;
+ }
+
+ case NBD_CLEAR_SOCK: {
+@@ -629,6 +631,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->sock = SOCKET_I(inode);
+ if (max_part > 0)
+ bdev->bd_invalidated = 1;
++ lo->disconnect = 0; /* we're connected now */
+ return 0;
+ } else {
+ fput(file);
+@@ -675,7 +678,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+
+ mutex_unlock(&lo->tx_lock);
+
+- thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
++ thread = kthread_create(nbd_thread, lo, "%s",
++ lo->disk->disk_name);
+ if (IS_ERR(thread)) {
+ mutex_lock(&lo->tx_lock);
+ return PTR_ERR(thread);
+@@ -700,6 +704,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ set_capacity(lo->disk, 0);
+ if (max_part > 0)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
++ if (lo->disconnect) /* user requested, ignore socket errors */
++ return 0;
+ return lo->harderror;
+ }
+
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 2678b6f..1331740 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2885,7 +2885,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
+ if (lba < 0)
+ return -EINVAL;
+
+- cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
++ cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
+ if (cgc->buffer == NULL)
+ return -ENOMEM;
+
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index e8eedb7..720cace 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -349,10 +349,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+- spin_lock_irqsave(&pch->lock, flags);
+-
+ tasklet_kill(&pch->task);
+
++ spin_lock_irqsave(&pch->lock, flags);
++
+ pl330_release_channel(pch->pl330_chid);
+ pch->pl330_chid = NULL;
+
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 8af25a0..810658e 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -383,7 +383,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ sizeof(u64));
+
+ /* Make sure we flush all writes before updating the writeIndex */
+- smp_wmb();
++ wmb();
+
+ /* Now, update the write location */
+ hv_set_next_write_location(outring_info, next_write_location);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index d2d0a2a..44442d5 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -466,7 +466,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+- smp_mb();
++ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 60f593c..dbd4fa5 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -137,6 +137,7 @@ config I2C_PIIX4
+ ATI SB700
+ ATI SB800
+ AMD Hudson-2
++ AMD CZ
+ Serverworks OSB4
+ Serverworks CSB5
+ Serverworks CSB6
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 6d14ac2..14b588c 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -22,7 +22,7 @@
+ Intel PIIX4, 440MX
+ Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
+ ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
+- AMD Hudson-2
++ AMD Hudson-2, CZ
+ SMSC Victory66
+
+ Note: we assume there can only be one device, with one SMBus interface.
+@@ -481,6 +481,7 @@ static const struct pci_device_id piix4_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_OSB4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 07cb1a6..6cc8e67 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1076,6 +1076,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+
+ /* Large PTE found which maps this address */
+ unmap_size = PTE_PAGE_SIZE(*pte);
++
++ /* Only unmap from the first pte in the page */
++ if ((unmap_size - 1) & bus_addr)
++ break;
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+@@ -1085,7 +1089,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ unmapped += unmap_size;
+ }
+
+- BUG_ON(!is_power_of_2(unmapped));
++ BUG_ON(unmapped && !is_power_of_2(unmapped));
+
+ return unmapped;
+ }
+diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
+index e4b5c03..4f8c3f7 100644
+--- a/drivers/media/dvb/dvb-core/dmxdev.c
++++ b/drivers/media/dvb/dvb-core/dmxdev.c
+@@ -380,10 +380,8 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
+ ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
+ buffer2_len);
+ }
+- if (ret < 0) {
+- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
++ if (ret < 0)
+ dmxdevfilter->buffer.error = ret;
+- }
+ if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
+ dmxdevfilter->state = DMXDEV_STATE_DONE;
+ spin_unlock(&dmxdevfilter->dev->lock);
+@@ -419,10 +417,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
+ ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ if (ret == buffer1_len)
+ ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
+- if (ret < 0) {
+- dvb_ringbuffer_flush(buffer);
++ if (ret < 0)
+ buffer->error = ret;
+- }
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up(&buffer->queue);
+ return 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index ed7a5a6..a3bd0ba 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5584,14 +5584,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ goto err_stop_0;
+ }
+
+- /* 8168evl does not automatically pad to minimum length. */
+- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+- skb->len < ETH_ZLEN)) {
+- if (skb_padto(skb, ETH_ZLEN))
+- goto err_update_stats;
+- skb_put(skb, ETH_ZLEN - skb->len);
+- }
+-
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
+index 48ab38a..02c939e 100644
+--- a/drivers/net/wan/dlci.c
++++ b/drivers/net/wan/dlci.c
+@@ -385,21 +385,37 @@ static int dlci_del(struct dlci_add *dlci)
+ struct frad_local *flp;
+ struct net_device *master, *slave;
+ int err;
++ bool found = false;
++
++ rtnl_lock();
+
+ /* validate slave device */
+ master = __dev_get_by_name(&init_net, dlci->devname);
+- if (!master)
+- return -ENODEV;
++ if (!master) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ list_for_each_entry(dlp, &dlci_devs, list) {
++ if (dlp->master == master) {
++ found = true;
++ break;
++ }
++ }
++ if (!found) {
++ err = -ENODEV;
++ goto out;
++ }
+
+ if (netif_running(master)) {
+- return -EBUSY;
++ err = -EBUSY;
++ goto out;
+ }
+
+ dlp = netdev_priv(master);
+ slave = dlp->slave;
+ flp = netdev_priv(slave);
+
+- rtnl_lock();
+ err = (*flp->deassoc)(slave, master);
+ if (!err) {
+ list_del(&dlp->list);
+@@ -408,8 +424,8 @@ static int dlci_del(struct dlci_add *dlci)
+
+ dev_put(slave);
+ }
++out:
+ rtnl_unlock();
+-
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+index 3b262ba..c41eb9d 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+@@ -3625,7 +3625,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+ {
+ int chain;
+- u32 regval;
++ u32 regval, value;
+ u32 ant_div_ctl1;
+ static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
+ AR_PHY_SWITCH_CHAIN_0,
+@@ -3633,7 +3633,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+ AR_PHY_SWITCH_CHAIN_2,
+ };
+
+- u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
++ if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
++ ath9k_hw_cfg_output(ah, AR9300_EXT_LNA_CTL_GPIO_AR9485,
++ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
++
++ value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+
+ if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462_10(ah)) {
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+index 4114fe7..4e9b71b 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+@@ -334,6 +334,8 @@
+
+ #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
+
++#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
++
+ /*
+ * AGC Field Definitions
+ */
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index bcabfbf..5e522e4 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -391,7 +391,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
+
+ if (!caldata) {
+ chan->noisefloor = nf;
+- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ return false;
+ }
+
+@@ -413,6 +412,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
++ ah->caldata->chanmode = chan->chanmode;
+ h = ah->caldata->nfCalHist;
+ default_nf = ath9k_hw_get_default_nf(ah, chan);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 2b8e957..c623527 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1540,7 +1540,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ if (caldata &&
+ (chan->channel != caldata->channel ||
+ (chan->channelFlags & ~CHANNEL_CW_INT) !=
+- (caldata->channelFlags & ~CHANNEL_CW_INT))) {
++ (caldata->channelFlags & ~CHANNEL_CW_INT) ||
++ chan->chanmode != caldata->chanmode)) {
+ /* Operating channel changed, reset channel calibration data */
+ memset(caldata, 0, sizeof(*caldata));
+ ath9k_init_nfcal_hist_buffer(ah, chan);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 0c65a09..dc774cd 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -352,6 +352,7 @@ struct ath9k_rtt_hist {
+ struct ath9k_hw_cal_data {
+ u16 channel;
+ u32 channelFlags;
++ u32 chanmode;
+ int32_t CalValid;
+ int8_t iCoff;
+ int8_t qCoff;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index df3e27c..a59267a 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1688,13 +1688,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+- /*
+- * Preserve the current channel values, before updating
+- * the same channel
+- */
+- if (ah->curchan && (old_pos == pos))
+- ath9k_hw_getnf(ah, ah->curchan);
+-
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
+
+diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
+index b97a40e..36a7ce3 100644
+--- a/drivers/net/wireless/b43/Kconfig
++++ b/drivers/net/wireless/b43/Kconfig
+@@ -28,12 +28,12 @@ config B43
+
+ config B43_BCMA
+ bool "Support for BCMA bus"
+- depends on B43 && BCMA
++ depends on B43 && (BCMA = y || BCMA = B43)
+ default y
+
+ config B43_SSB
+ bool
+- depends on B43 && SSB
++ depends on B43 && (SSB = y || SSB = B43)
+ default y
+
+ # Auto-select SSB PCI-HOST support, if possible
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+index 1e851aa..17a8e96 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ tx_agc[RF90_PATH_A] = 0x10101010;
+ tx_agc[RF90_PATH_B] = 0x10101010;
+ } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+- TXHIGHPWRLEVEL_LEVEL1) {
++ TXHIGHPWRLEVEL_LEVEL2) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+ } else{
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 0984dcf..016ef86 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -367,6 +367,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
++ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+ {}
+ };
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index cab24f7..f0c8c5d 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1123,6 +1123,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+
+ /*
+ * Serverworks CSB5 IDE does not fully support native mode
+diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
+index ea09ff2..5317d94 100644
+--- a/drivers/rtc/rtc-rv3029c2.c
++++ b/drivers/rtc/rtc-rv3029c2.c
+@@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
+ dev_dbg(&client->dev, "alarm IRQ armed\n");
+ } else {
+ /* disable AIE irq */
+- ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
++ ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 4f1b10b..3743ac9 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -3,7 +3,7 @@
+ *
+ * Module interface and handling of zfcp data structures.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ /*
+@@ -23,6 +23,7 @@
+ * Christof Schmitt
+ * Martin Petermann
+ * Sven Schuetz
++ * Steffen Maier
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -415,6 +416,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
++ adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
++
+ if (!zfcp_scsi_adapter_register(adapter))
+ return adapter;
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 8c849f0..8bfd579 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -3,7 +3,7 @@
+ *
+ * Implementation of FSF commands.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -455,11 +455,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+
+ fc_host_port_name(shost) = nsp->fl_wwpn;
+ fc_host_node_name(shost) = nsp->fl_wwnn;
+- fc_host_port_id(shost) = ntoh24(bottom->s_id);
+- fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+- adapter->hydra_version = bottom->adapter_type;
+ adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+ (u16)FSF_STATUS_READS_RECOM);
+@@ -467,6 +464,18 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ if (fc_host_permanent_port_name(shost) == -1)
+ fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+
++ zfcp_scsi_set_prot(adapter);
++
++ /* no error return above here, otherwise must fix call chains */
++ /* do not evaluate invalid fields */
++ if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
++ return 0;
++
++ fc_host_port_id(shost) = ntoh24(bottom->s_id);
++ fc_host_speed(shost) = bottom->fc_link_speed;
++
++ adapter->hydra_version = bottom->adapter_type;
++
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+@@ -488,8 +497,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ return -EIO;
+ }
+
+- zfcp_scsi_set_prot(adapter);
+-
+ return 0;
+ }
+
+@@ -534,8 +541,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+ adapter->hydra_version = 0;
+
++ /* avoids adapter shutdown to be able to recognize
++ * events such as LINK UP */
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
++ &adapter->status);
+ zfcp_fsf_link_down_info_eval(req,
+ &qtcb->header.fsf_status_qual.link_down_info);
++ if (zfcp_fsf_exchange_config_evaluate(req))
++ return;
+ break;
+ default:
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index b79576b..7b35364 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -311,8 +311,12 @@ static struct scsi_host_template zfcp_scsi_host_template = {
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+- .sg_tablesize = 1, /* adjusted later */
+- .max_sectors = 8, /* adjusted later */
++ .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
++ /* GCD, adjusted later */
++ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
++ /* GCD, adjusted later */
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 7c471eb..fc5a2ef 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -4886,10 +4886,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ sense, sense_handle);
+ }
+
+- for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
+- dma_free_coherent(&instance->pdev->dev,
+- kern_sge32[i].length,
+- kbuff_arr[i], kern_sge32[i].phys_addr);
++ for (i = 0; i < ioc->sge_count; i++) {
++ if (kbuff_arr[i])
++ dma_free_coherent(&instance->pdev->dev,
++ kern_sge32[i].length,
++ kbuff_arr[i],
++ kern_sge32[i].phys_addr);
+ }
+
+ megasas_return_cmd(instance, cmd);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 17de348..a11a909 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -79,10 +79,6 @@ static int msix_disable = -1;
+ module_param(msix_disable, int, 0);
+ MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+-static int missing_delay[2] = {-1, -1};
+-module_param_array(missing_delay, int, NULL, 0);
+-MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+-
+ static int mpt2sas_fwfault_debug;
+ MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+ "and halt firmware - (default=0)");
+@@ -2104,7 +2100,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+ }
+
+ /**
+- * _base_update_missing_delay - change the missing delay timers
++ * mpt2sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+@@ -2115,8 +2111,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+-static void
+-_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
++void
++mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+ {
+ u16 dmd, dmd_new, dmd_orignal;
+@@ -4302,9 +4298,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ if (r)
+ goto out_free_resources;
+
+- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+- _base_update_missing_delay(ioc, missing_delay[0],
+- missing_delay[1]);
+
+ return 0;
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
+index 3c3babc..aa4daf6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -1029,6 +1029,9 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
+
+ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
+
++void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
++ u16 device_missing_delay, u8 io_missing_delay);
++
+ int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
+ /* scsih shared API */
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 2824a90..987c6d6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -101,6 +101,10 @@ static ushort max_sectors = 0xFFFF;
+ module_param(max_sectors, ushort, 0);
+ MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+
++static int missing_delay[2] = {-1, -1};
++module_param_array(missing_delay, int, NULL, 0);
++MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
++
+ /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+ #define MPT2SAS_MAX_LUN (16895)
+ static int max_lun = MPT2SAS_MAX_LUN;
+@@ -3930,11 +3934,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+ else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+-/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
+-/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
+- */
+- mpi_control |= (0x500);
+-
++ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ /* Make sure Device is not raid volume.
+@@ -7006,11 +7006,14 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
++ u8 retry_count;
+
+ printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders start\n",
++ ioc->name);
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+@@ -7019,19 +7022,39 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+- else
++ else {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding expander: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding expander: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(expander_pg0.SASAddress));
++ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders complete\n",
++ ioc->name);
++
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices phys disk start\n", ioc->name);
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+@@ -7041,6 +7064,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+@@ -7050,17 +7080,46 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding phys disk: "
++ " handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+- _scsih_add_device(ioc, handle, 0, 1);
++ retry_count = 0;
++ /* This will retry adding the end device.
++ * _scsih_add_device() will decide on retries and
++ * return "1" when it should be retried
++ */
++ while (_scsih_add_device(ioc, handle, retry_count++,
++ 1)) {
++ ssleep(1);
++ }
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding phys disk: "
++ " handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: phys disk complete\n",
++ ioc->name);
++
++ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name);
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+@@ -7069,6 +7128,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+@@ -7078,18 +7144,38 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding volume: "
++ " handle (0x%04x)\n", ioc->name,
++ volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding volume: "
++ " handle (0x%04x)\n", ioc->name,
++ volume_pg1.DevHandle);
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes complete\n",
++ ioc->name);
++
+ skip_to_sas:
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices start\n",
++ ioc->name);
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+@@ -7099,6 +7185,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
++ " ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+@@ -7109,12 +7202,31 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+- _scsih_add_device(ioc, handle, 0, 0);
++ retry_count = 0;
++ /* This will retry adding the end device.
++ * _scsih_add_device() will decide on retries and
++ * return "1" when it should be retried
++ */
++ while (_scsih_add_device(ioc, handle, retry_count++,
++ 0)) {
++ ssleep(1);
++ }
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding end device: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices complete\n",
++ ioc->name);
++
+ printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
+ }
+
+@@ -7206,7 +7318,9 @@ _firmware_event_work(struct work_struct *work)
+ case MPT2SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+
+-
++ if (missing_delay[0] != -1 && missing_delay[1] != -1)
++ mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
++ missing_delay[1]);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+ "from worker thread\n", ioc->name));
+diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
+index d4ed9eb..caac1b2 100644
+--- a/drivers/scsi/osd/osd_uld.c
++++ b/drivers/scsi/osd/osd_uld.c
+@@ -465,7 +465,7 @@ static int osd_probe(struct device *dev)
+ oud->class_dev.class = &osd_uld_class;
+ oud->class_dev.parent = dev;
+ oud->class_dev.release = __remove;
+- error = dev_set_name(&oud->class_dev, disk->disk_name);
++ error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
+ if (error) {
+ OSD_ERR("dev_set_name failed => %d\n", error);
+ goto err_put_cdev;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f44d633..6dace1a 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -138,6 +138,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ char *buffer_data;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
++ static const char temp[] = "temporary ";
+ int len;
+
+ if (sdp->type != TYPE_DISK)
+@@ -146,6 +147,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ * it's not worth the risk */
+ return -EINVAL;
+
++ if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
++ buf += sizeof(temp) - 1;
++ sdkp->cache_override = 1;
++ } else {
++ sdkp->cache_override = 0;
++ }
++
+ for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
+ len = strlen(sd_cache_types[i]);
+ if (strncmp(sd_cache_types[i], buf, len) == 0 &&
+@@ -158,6 +166,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ return -EINVAL;
+ rcd = ct & 0x01 ? 1 : 0;
+ wce = ct & 0x02 ? 1 : 0;
++
++ if (sdkp->cache_override) {
++ sdkp->WCE = wce;
++ sdkp->RCD = rcd;
++ return count;
++ }
++
+ if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, NULL))
+ return -EINVAL;
+@@ -2037,6 +2052,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ int old_rcd = sdkp->RCD;
+ int old_dpofua = sdkp->DPOFUA;
+
++
++ if (sdkp->cache_override)
++ return;
++
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+@@ -2518,6 +2537,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
+ sdkp->capacity = 0;
+ sdkp->media_present = 1;
+ sdkp->write_prot = 0;
++ sdkp->cache_override = 0;
+ sdkp->WCE = 0;
+ sdkp->RCD = 0;
+ sdkp->ATO = 0;
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 4163f29..e3e3cd2 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -64,6 +64,7 @@ struct scsi_disk {
+ u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
+ unsigned ATO : 1; /* state of disk ATO bit */
++ unsigned cache_override : 1; /* temp override of WCE,RCD */
+ unsigned WCE : 1; /* state of disk WCE bit */
+ unsigned RCD : 1; /* state of disk RCD bit, unused */
+ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 2594a31..926d483 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -541,13 +541,20 @@ out:
+ */
+ static inline int valid_io_request(struct zram *zram, struct bio *bio)
+ {
+- if (unlikely(
+- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
+- (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
+- (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
++ u64 start, end, bound;
++
++ /* unaligned request */
++ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
++ return 0;
++ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
++ return 0;
+
++ start = bio->bi_sector;
++ end = start + (bio->bi_size >> SECTOR_SHIFT);
++ bound = zram->disksize >> SECTOR_SHIFT;
++ /* out of range range */
++ if (unlikely(start >= bound || end >= bound || start > end))
+ return 0;
+- }
+
+ /* I/O request is valid */
+ return 1;
+@@ -702,7 +709,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ struct zram *zram;
+
+ zram = bdev->bd_disk->private_data;
++ down_write(&zram->lock);
+ zram_free_page(zram, index);
++ up_write(&zram->lock);
+ zram_stat64_inc(zram, &zram->stats.notify_free);
+ }
+
+@@ -713,7 +722,7 @@ static const struct block_device_operations zram_devops = {
+
+ static int create_device(struct zram *zram, int device_id)
+ {
+- int ret = 0;
++ int ret = -ENOMEM;
+
+ init_rwsem(&zram->lock);
+ init_rwsem(&zram->init_lock);
+@@ -723,7 +732,6 @@ static int create_device(struct zram *zram, int device_id)
+ if (!zram->queue) {
+ pr_err("Error allocating disk queue for device %d\n",
+ device_id);
+- ret = -ENOMEM;
+ goto out;
+ }
+
+@@ -733,11 +741,9 @@ static int create_device(struct zram *zram, int device_id)
+ /* gendisk structure */
+ zram->disk = alloc_disk(1);
+ if (!zram->disk) {
+- blk_cleanup_queue(zram->queue);
+ pr_warning("Error allocating disk structure for device %d\n",
+ device_id);
+- ret = -ENOMEM;
+- goto out;
++ goto out_free_queue;
+ }
+
+ zram->disk->major = zram_major;
+@@ -766,11 +772,17 @@ static int create_device(struct zram *zram, int device_id)
+ &zram_disk_attr_group);
+ if (ret < 0) {
+ pr_warning("Error creating sysfs group");
+- goto out;
++ goto out_free_disk;
+ }
+
+ zram->init_done = 0;
++ return 0;
+
++out_free_disk:
++ del_gendisk(zram->disk);
++ put_disk(zram->disk);
++out_free_queue:
++ blk_cleanup_queue(zram->queue);
+ out:
+ return ret;
+ }
+@@ -846,9 +858,11 @@ static void __exit zram_exit(void)
+ for (i = 0; i < zram_num_devices; i++) {
+ zram = &zram_devices[i];
+
++ get_disk(zram->disk);
+ destroy_device(zram);
+ if (zram->init_done)
+ zram_reset_device(zram);
++ put_disk(zram->disk);
+ }
+
+ unregister_blkdev(zram_major, "zram");
+diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
+index e5cd246..87f2fec 100644
+--- a/drivers/staging/zram/zram_drv.h
++++ b/drivers/staging/zram/zram_drv.h
+@@ -107,8 +107,9 @@ struct zram {
+ void *compress_buffer;
+ struct table *table;
+ spinlock_t stat64_lock; /* protect 64-bit stats */
+- struct rw_semaphore lock; /* protect compression buffers and table
+- * against concurrent read and writes */
++ struct rw_semaphore lock; /* protect compression buffers, table,
++ * 32bit stat counters against concurrent
++ * notifications, reads and writes */
+ struct request_queue *queue;
+ struct gendisk *disk;
+ int init_done;
+diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
+index 0ea8ed2..1fae1e9 100644
+--- a/drivers/staging/zram/zram_sysfs.c
++++ b/drivers/staging/zram/zram_sysfs.c
+@@ -186,10 +186,12 @@ static ssize_t mem_used_total_show(struct device *dev,
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+
++ down_read(&zram->init_lock);
+ if (zram->init_done) {
+ val = xv_get_total_size_bytes(zram->mem_pool) +
+ ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
+ }
++ up_read(&zram->init_lock);
+
+ return sprintf(buf, "%llu\n", val);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 83dcf49..3b80285 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -419,7 +419,7 @@ static ssize_t __iscsi_##prefix##_store_##name( \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+- snprintf(auth->name, PAGE_SIZE, "%s", page); \
++ snprintf(auth->name, sizeof(auth->name), "%s", page); \
+ if (!strncmp("NULL", auth->name, 4)) \
+ auth->naf_flags &= ~flags; \
+ else \
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 6986256..6c9bcdf 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -4083,10 +4083,6 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_VENDOR_ID_IBM, 0x0299,
+ 0, 0, pbn_b0_bt_2_115200 },
+
+- { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
+- 0x1000, 0x0012,
+- 0, 0, pbn_b0_bt_2_115200 },
+-
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 8d70fbc..c0b4872 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -940,22 +940,37 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
+ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+ {
+ u8 fcr = ioread8(priv->membase + UART_FCR);
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++ char *error_msg[5] = {};
++ int i = 0;
+
+ /* Reset FIFO */
+ fcr |= UART_FCR_CLEAR_RCVR;
+ iowrite8(fcr, priv->membase + UART_FCR);
+
+ if (lsr & PCH_UART_LSR_ERR)
+- dev_err(&priv->pdev->dev, "Error data in FIFO\n");
++ error_msg[i++] = "Error data in FIFO\n";
+
+- if (lsr & UART_LSR_FE)
+- dev_err(&priv->pdev->dev, "Framing Error\n");
++ if (lsr & UART_LSR_FE) {
++ port->icount.frame++;
++ error_msg[i++] = " Framing Error\n";
++ }
+
+- if (lsr & UART_LSR_PE)
+- dev_err(&priv->pdev->dev, "Parity Error\n");
++ if (lsr & UART_LSR_PE) {
++ port->icount.parity++;
++ error_msg[i++] = " Parity Error\n";
++ }
+
+- if (lsr & UART_LSR_OE)
+- dev_err(&priv->pdev->dev, "Overrun Error\n");
++ if (lsr & UART_LSR_OE) {
++ port->icount.overrun++;
++ error_msg[i++] = " Overrun Error\n";
++ }
++
++ if (tty == NULL) {
++ for (i = 0; error_msg[i] != NULL; i++)
++ dev_err(&priv->pdev->dev, error_msg[i]);
++ }
+ }
+
+ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
+index a5570b6..8d7fb6b 100644
+--- a/drivers/usb/gadget/f_mass_storage.c
++++ b/drivers/usb/gadget/f_mass_storage.c
+@@ -512,6 +512,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+ /* Caller must hold fsg->lock */
+ static void wakeup_thread(struct fsg_common *common)
+ {
++ smp_wmb(); /* ensure the write of bh->state is complete */
+ /* Tell the main thread that something has happened */
+ common->thread_wakeup_needed = 1;
+ if (common->thread_task)
+@@ -731,6 +732,7 @@ static int sleep_thread(struct fsg_common *common)
+ }
+ __set_current_state(TASK_RUNNING);
+ common->thread_wakeup_needed = 0;
++ smp_rmb(); /* ensure the latest bh->state is visible */
+ return rc;
+ }
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5018e33..ec73541 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -271,6 +271,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
+ ctx->size += CTX_SIZE(xhci->hcc_params);
+
+ ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
++ if (!ctx->bytes) {
++ kfree(ctx);
++ return NULL;
++ }
+ memset(ctx->bytes, 0, ctx->size);
+ return ctx;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8ea37bc..b8365a7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -159,8 +159,6 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362 0x9010
+-#define NOVATELWIRELESS_PRODUCT_G1 0xA001
+-#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+
+@@ -744,8 +742,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 5535c3a..e8c4f0c 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -37,7 +37,13 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
+ {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+ {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
+- {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
++ {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
+ {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+ {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 5855d17..9d8feac 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -42,6 +42,7 @@
+ #include <linux/kd.h>
+ #include <linux/slab.h>
+ #include <linux/vt_kern.h>
++#include <linux/sched.h>
+ #include <linux/selection.h>
+ #include <linux/spinlock.h>
+ #include <linux/ioport.h>
+@@ -1124,11 +1125,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+
+ if (arg) {
+ if (set)
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ vga_writeb(arg[i], charmap + i);
++ cond_resched();
++ }
+ else
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ arg[i] = vga_readb(charmap + i);
++ cond_resched();
++ }
+
+ /*
+ * In 512-character mode, the character map is not contiguous if
+@@ -1139,11 +1144,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ charmap += 2 * cmapsz;
+ arg += cmapsz;
+ if (set)
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ vga_writeb(arg[i], charmap + i);
++ cond_resched();
++ }
+ else
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ arg[i] = vga_readb(charmap + i);
++ cond_resched();
++ }
+ }
+ }
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 53ab273..c103267 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -55,17 +55,24 @@ static void bdev_inode_switch_bdi(struct inode *inode,
+ struct backing_dev_info *dst)
+ {
+ struct backing_dev_info *old = inode->i_data.backing_dev_info;
++ bool wakeup_bdi = false;
+
+ if (unlikely(dst == old)) /* deadlock avoidance */
+ return;
+ bdi_lock_two(&old->wb, &dst->wb);
+ spin_lock(&inode->i_lock);
+ inode->i_data.backing_dev_info = dst;
+- if (inode->i_state & I_DIRTY)
++ if (inode->i_state & I_DIRTY) {
++ if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
++ wakeup_bdi = true;
+ list_move(&inode->i_wb_list, &dst->wb.b_dirty);
++ }
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&old->wb.list_lock);
+ spin_unlock(&dst->wb.list_lock);
++
++ if (wakeup_bdi)
++ bdi_wakeup_thread_delayed(dst);
+ }
+
+ sector_t blkdev_max_block(struct block_device *bdev)
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index b48f15f..de268a8 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -70,8 +70,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ /*
+ * express utilization in terms of large blocks to avoid
+ * overflow on 32-bit machines.
++ *
++ * NOTE: for the time being, we make bsize == frsize to humor
++ * not-yet-ancient versions of glibc that are broken.
++ * Someday, we will probably want to report a real block
++ * size... whatever that may mean for a network file system!
+ */
+ buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
++ buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
+ buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+@@ -79,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_files = le64_to_cpu(st.num_objects);
+ buf->f_ffree = -1;
+ buf->f_namelen = NAME_MAX;
+- buf->f_frsize = PAGE_CACHE_SIZE;
+
+ /* leave fsid little-endian, regardless of host endianness */
+ fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index edcbf37..a097817 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -21,7 +21,7 @@
+
+ /* large granularity for statfs utilization stats to facilitate
+ * large volume sizes on 32-bit machines. */
+-#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
++#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
+ #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+
+ #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+index 6d02fd5..aab18fe 100644
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -323,14 +323,14 @@ UniToupper(register wchar_t uc)
+ /*
+ * UniStrupr: Upper case a unicode string
+ */
+-static inline wchar_t *
+-UniStrupr(register wchar_t *upin)
++static inline __le16 *
++UniStrupr(register __le16 *upin)
+ {
+- register wchar_t *up;
++ register __le16 *up;
+
+ up = upin;
+ while (*up) { /* For all characters */
+- *up = UniToupper(*up);
++ *up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
+ up++;
+ }
+ return upin; /* Return input pointer */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 5d9b9ac..cdcd665 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -394,7 +394,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ int rc = 0;
+ int len;
+ char nt_hash[CIFS_NTHASH_SIZE];
+- wchar_t *user;
++ __le16 *user;
+ wchar_t *domain;
+ wchar_t *server;
+
+@@ -419,7 +419,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ return rc;
+ }
+
+- /* convert ses->user_name to unicode and uppercase */
++ /* convert ses->user_name to unicode */
+ len = strlen(ses->user_name);
+ user = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if (user == NULL) {
+@@ -427,7 +427,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ rc = -ENOMEM;
+ return rc;
+ }
+- len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
++ len = cifs_strtoUCS(user, ses->user_name, len, nls_cp);
+ UniStrupr(user);
+
+ rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
+diff --git a/fs/exec.c b/fs/exec.c
+index 312e297..a2d0e51 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1159,13 +1159,6 @@ void setup_new_exec(struct linux_binprm * bprm)
+ set_dumpable(current->mm, suid_dumpable);
+ }
+
+- /*
+- * Flush performance counters when crossing a
+- * security domain:
+- */
+- if (!get_dumpable(current->mm))
+- perf_event_exit_task(current);
+-
+ /* An exec changes our domain. We are no longer part of the thread
+ group */
+
+@@ -1229,6 +1222,15 @@ void install_exec_creds(struct linux_binprm *bprm)
+
+ commit_creds(bprm->cred);
+ bprm->cred = NULL;
++
++ /*
++ * Disable monitoring for regular users
++ * when executing setuid binaries. Must
++ * wait until new credentials are committed
++ * by commit_creds() above
++ */
++ if (get_dumpable(current->mm) != SUID_DUMP_USER)
++ perf_event_exit_task(current);
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+ * ptrace_attach() from altering our determination of the task's
+diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
+index 642dc6d..1272dfb 100644
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+ (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+ +((char *)de - bh->b_data))) {
+- /* On error, skip the f_pos to the next block. */
+- dir_file->f_pos = (dir_file->f_pos |
+- (dir->i_sb->s_blocksize - 1)) + 1;
+- brelse (bh);
+- return count;
++ /* silently ignore the rest of the block */
++ break;
+ }
+ ext3fs_dirhash(de->name, de->name_len, hinfo);
+ if ((hinfo->hash < start_hash) ||
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ce0bc25..3e8fc80 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4801,7 +4801,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ return error;
+- physical = iloc.bh->b_blocknr << blockbits;
++ physical = (__u64)iloc.bh->b_blocknr << blockbits;
+ offset = EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize;
+ physical += offset;
+@@ -4809,7 +4809,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
+ } else { /* external block */
+- physical = EXT4_I(inode)->i_file_acl << blockbits;
++ physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
+ length = inode->i_sb->s_blocksize;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 025b4b6..45778a6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4335,7 +4335,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+ {
+ struct inode *inode;
+- unsigned long delalloc_blocks;
++ unsigned long long delalloc_blocks;
+
+ inode = dentry->d_inode;
+ generic_fillattr(inode, stat);
+@@ -4352,7 +4352,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ */
+ delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+
+- stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
++ stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
+ return 0;
+ }
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 88f97e5..3ca3b7f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ + ((char *)de - bh->b_data))) {
+- /* On error, skip the f_pos to the next block. */
+- dir_file->f_pos = (dir_file->f_pos |
+- (dir->i_sb->s_blocksize - 1)) + 1;
+- brelse(bh);
+- return count;
++ /* silently ignore the rest of the block */
++ break;
+ }
+ ext4fs_dirhash(de->name, de->name_len, hinfo);
+ if ((hinfo->hash < start_hash) ||
+diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
+index a790821..ea3d1ca 100644
+--- a/fs/hpfs/map.c
++++ b/fs/hpfs/map.c
+@@ -17,7 +17,8 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+ struct quad_buffer_head *qbh, char *id)
+ {
+ secno sec;
+- if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
++ unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
++ if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
+ hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
+ return NULL;
+ }
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 98580a3..f760c15 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -553,7 +553,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+ sbi->sb_cp_table = NULL;
+ sbi->sb_c_bitmap = -1;
+ sbi->sb_max_fwd_alloc = 0xffffff;
+-
++
++ if (sbi->sb_fs_size >= 0x80000000) {
++ hpfs_error(s, "invalid size in superblock: %08x",
++ (unsigned)sbi->sb_fs_size);
++ goto bail4;
++ }
++
+ /* Load bitmap directory */
+ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
+ goto bail4;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6ac5bb1..18ea4d9 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -470,10 +470,10 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
+ &transaction->t_outstanding_credits);
+ if (atomic_dec_and_test(&transaction->t_updates))
+ wake_up(&journal->j_wait_updates);
++ tid = transaction->t_tid;
+ spin_unlock(&transaction->t_handle_lock);
+
+ jbd_debug(2, "restarting handle %p\n", handle);
+- tid = transaction->t_tid;
+ need_to_start = !tid_geq(journal->j_commit_request, tid);
+ read_unlock(&journal->j_state_lock);
+ if (need_to_start)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ade5316..99625b8 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -177,8 +177,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
+ */
+ memcpy(p, argp->p, avail);
+ /* step to next page */
+- argp->p = page_address(argp->pagelist[0]);
+ argp->pagelist++;
++ argp->p = page_address(argp->pagelist[0]);
+ if (argp->pagelen < PAGE_SIZE) {
+ argp->end = argp->p + (argp->pagelen>>2);
+ argp->pagelen = 0;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 0d5ea9c..bef187b 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -6499,6 +6499,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
+ }
+
+ new_oi = OCFS2_I(args->new_inode);
++ /*
++ * Adjust extent record count to reserve space for extended attribute.
++ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
++ */
++ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
++ !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
++ struct ocfs2_extent_list *el = &new_di->id2.i_list;
++ le16_add_cpu(&el->l_count, -(inline_size /
++ sizeof(struct ocfs2_extent_rec)));
++ }
+ spin_lock(&new_oi->ip_lock);
+ new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
+ new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 6834920..aaebf0f 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type)
+ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ {
+ int err, over = 0;
++ loff_t pos = file->f_pos;
+ struct qstr nm;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent;
+ struct inode *dir = file->f_path.dentry->d_inode;
+ struct ubifs_info *c = dir->i_sb->s_fs_info;
+
+- dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
++ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
+
+- if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
++ if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
+ /*
+ * The directory was seek'ed to a senseless position or there
+ * are no more entries.
+ */
+ return 0;
+
++ if (file->f_version == 0) {
++ /*
++ * The file was seek'ed, which means that @file->private_data
++ * is now invalid. This may also be just the first
++ * 'ubifs_readdir()' invocation, in which case
++ * @file->private_data is NULL, and the below code is
++ * basically a no-op.
++ */
++ kfree(file->private_data);
++ file->private_data = NULL;
++ }
++
++ /*
++ * 'generic_file_llseek()' unconditionally sets @file->f_version to
++ * zero, and we use this for detecting whether the file was seek'ed.
++ */
++ file->f_version = 1;
++
+ /* File positions 0 and 1 correspond to "." and ".." */
+- if (file->f_pos == 0) {
++ if (pos == 0) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
+ if (over)
+ return 0;
+- file->f_pos = 1;
++ file->f_pos = pos = 1;
+ }
+
+- if (file->f_pos == 1) {
++ if (pos == 1) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, "..", 2, 1,
+ parent_ino(file->f_path.dentry), DT_DIR);
+@@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ goto out;
+ }
+
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+@@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ if (!dent) {
+ /*
+ * The directory was seek'ed to and is now readdir'ed.
+- * Find the entry corresponding to @file->f_pos or the
+- * closest one.
++ * Find the entry corresponding to @pos or the closest one.
+ */
+- dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
++ dent_key_init_hash(c, &key, dir->i_ino, pos);
+ nm.name = NULL;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+@@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ ubifs_inode(dir)->creat_sqnum);
+
+ nm.len = le16_to_cpu(dent->nlen);
+- over = filldir(dirent, dent->name, nm.len, file->f_pos,
++ over = filldir(dirent, dent->name, nm.len, pos,
+ le64_to_cpu(dent->inum),
+ vfs_dent_type(dent->type));
+ if (over)
+@@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ }
+
+ kfree(file->private_data);
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ cond_resched();
++
++ if (file->f_version == 0)
++ /*
++ * The file was seek'ed meanwhile, lets return and start
++ * reading direntries from the new position on the next
++ * invocation.
++ */
++ return 0;
+ }
+
+ out:
+@@ -456,15 +482,13 @@ out:
+
+ kfree(file->private_data);
+ file->private_data = NULL;
++ /* 2 is a special value indicating that there are no more direntries */
+ file->f_pos = 2;
+ return 0;
+ }
+
+-/* If a directory is seeked, we have to free saved readdir() state */
+ static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
+ {
+- kfree(file->private_data);
+- file->private_data = NULL;
+ return generic_file_llseek(file, offset, origin);
+ }
+
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 9bab75f..ace0984 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -531,16 +531,54 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
+ return cgrp->subsys[subsys_id];
+ }
+
+-/*
+- * function to get the cgroup_subsys_state which allows for extra
+- * rcu_dereference_check() conditions, such as locks used during the
+- * cgroup_subsys::attach() methods.
++/**
++ * task_css_set_check - obtain a task's css_set with extra access conditions
++ * @task: the task to obtain css_set for
++ * @__c: extra condition expression to be passed to rcu_dereference_check()
++ *
++ * A task's css_set is RCU protected, initialized and exited while holding
++ * task_lock(), and can only be modified while holding both cgroup_mutex
++ * and task_lock() while the task is alive. This macro verifies that the
++ * caller is inside proper critical section and returns @task's css_set.
++ *
++ * The caller can also specify additional allowed conditions via @__c, such
++ * as locks used during the cgroup_subsys::attach() methods.
++ */
++#define task_css_set_check(task, __c) \
++ rcu_dereference_check((task)->cgroups, \
++ lockdep_is_held(&(task)->alloc_lock) || \
++ cgroup_lock_is_held() || (__c))
++
++/**
++ * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
++ * @task: the target task
++ * @subsys_id: the target subsystem ID
++ * @__c: extra condition expression to be passed to rcu_dereference_check()
++ *
++ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
++ * synchronization rules are the same as task_css_set_check().
+ */
+ #define task_subsys_state_check(task, subsys_id, __c) \
+- rcu_dereference_check(task->cgroups->subsys[subsys_id], \
+- lockdep_is_held(&task->alloc_lock) || \
+- cgroup_lock_is_held() || (__c))
++ task_css_set_check((task), (__c))->subsys[(subsys_id)]
++
++/**
++ * task_css_set - obtain a task's css_set
++ * @task: the task to obtain css_set for
++ *
++ * See task_css_set_check().
++ */
++static inline struct css_set *task_css_set(struct task_struct *task)
++{
++ return task_css_set_check(task, false);
++}
+
++/**
++ * task_subsys_state - obtain css for (task, subsys)
++ * @task: the target task
++ * @subsys_id: the target subsystem ID
++ *
++ * See task_subsys_state_check().
++ */
+ static inline struct cgroup_subsys_state *
+ task_subsys_state(struct task_struct *task, int subsys_id)
+ {
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index a2227f7..32697c1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -327,6 +327,17 @@ static inline unsigned hstate_index_to_shift(unsigned index)
+ return hstates[index].order + PAGE_SHIFT;
+ }
+
++pgoff_t __basepage_index(struct page *page);
++
++/* Return page->index in PAGE_SIZE units */
++static inline pgoff_t basepage_index(struct page *page)
++{
++ if (!PageCompound(page))
++ return page->index;
++
++ return __basepage_index(page);
++}
++
+ #else
+ struct hstate {};
+ #define alloc_huge_page_node(h, nid) NULL
+@@ -345,6 +356,11 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
+ return 1;
+ }
+ #define hstate_index_to_shift(index) 0
++
++static inline pgoff_t basepage_index(struct page *page)
++{
++ return page->index;
++}
+ #endif
+
+ #endif /* _LINUX_HUGETLB_H */
+diff --git a/include/linux/nbd.h b/include/linux/nbd.h
+index d146ca1..e6fe174 100644
+--- a/include/linux/nbd.h
++++ b/include/linux/nbd.h
+@@ -68,6 +68,7 @@ struct nbd_device {
+ u64 bytesize;
+ pid_t pid; /* pid of nbd-client, if attached */
+ int xmit_timeout;
++ int disconnect; /* a disconnect has been requested by user */
+ };
+
+ #endif
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 9b9b2aa..3cfcfea 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -819,8 +819,7 @@ struct perf_event {
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+- int mmap_locked;
+- struct user_struct *mmap_user;
++
+ struct ring_buffer *rb;
+ struct list_head rb_entry;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 9f21915..8be9b746 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -185,9 +185,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+ static void update_context_time(struct perf_event_context *ctx);
+ static u64 perf_event_time(struct perf_event *event);
+
+-static void ring_buffer_attach(struct perf_event *event,
+- struct ring_buffer *rb);
+-
+ void __weak perf_event_print_debug(void) { }
+
+ extern __weak const char *perf_pmu_name(void)
+@@ -714,8 +711,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
+ {
+ struct perf_event_context *ctx;
+
+- rcu_read_lock();
+ retry:
++ /*
++ * One of the few rules of preemptible RCU is that one cannot do
++ * rcu_read_unlock() while holding a scheduler (or nested) lock when
++ * part of the read side critical section was preemptible -- see
++ * rcu_read_unlock_special().
++ *
++ * Since ctx->lock nests under rq->lock we must ensure the entire read
++ * side critical section is non-preemptible.
++ */
++ preempt_disable();
++ rcu_read_lock();
+ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
+ if (ctx) {
+ /*
+@@ -731,6 +738,8 @@ retry:
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
+ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
++ rcu_read_unlock();
++ preempt_enable();
+ goto retry;
+ }
+
+@@ -740,6 +749,7 @@ retry:
+ }
+ }
+ rcu_read_unlock();
++ preempt_enable();
+ return ctx;
+ }
+
+@@ -1687,7 +1697,16 @@ static int __perf_event_enable(void *info)
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ int err;
+
+- if (WARN_ON_ONCE(!ctx->is_active))
++ /*
++ * There's a time window between 'ctx->is_active' check
++ * in perf_event_enable function and this place having:
++ * - IRQs on
++ * - ctx->lock unlocked
++ *
++ * where the task could be killed and 'ctx' deactivated
++ * by perf_event_exit_task.
++ */
++ if (!ctx->is_active)
+ return -EINVAL;
+
+ raw_spin_lock(&ctx->lock);
+@@ -2939,6 +2958,7 @@ static void free_event_rcu(struct rcu_head *head)
+ }
+
+ static void ring_buffer_put(struct ring_buffer *rb);
++static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+
+ static void free_event(struct perf_event *event)
+ {
+@@ -2962,8 +2982,22 @@ static void free_event(struct perf_event *event)
+ }
+
+ if (event->rb) {
+- ring_buffer_put(event->rb);
+- event->rb = NULL;
++ struct ring_buffer *rb;
++
++ /*
++ * Can happen when we close an event with re-directed output.
++ *
++ * Since we have a 0 refcount, perf_mmap_close() will skip
++ * over us; possibly making our ring_buffer_put() the last.
++ */
++ mutex_lock(&event->mmap_mutex);
++ rb = event->rb;
++ if (rb) {
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ ring_buffer_put(rb); /* could be last */
++ }
++ mutex_unlock(&event->mmap_mutex);
+ }
+
+ if (is_cgroup_event(event))
+@@ -3201,30 +3235,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
+ unsigned int events = POLL_HUP;
+
+ /*
+- * Race between perf_event_set_output() and perf_poll(): perf_poll()
+- * grabs the rb reference but perf_event_set_output() overrides it.
+- * Here is the timeline for two threads T1, T2:
+- * t0: T1, rb = rcu_dereference(event->rb)
+- * t1: T2, old_rb = event->rb
+- * t2: T2, event->rb = new rb
+- * t3: T2, ring_buffer_detach(old_rb)
+- * t4: T1, ring_buffer_attach(rb1)
+- * t5: T1, poll_wait(event->waitq)
+- *
+- * To avoid this problem, we grab mmap_mutex in perf_poll()
+- * thereby ensuring that the assignment of the new ring buffer
+- * and the detachment of the old buffer appear atomic to perf_poll()
++ * Pin the event->rb by taking event->mmap_mutex; otherwise
++ * perf_event_set_output() can swizzle our rb and make us miss wakeups.
+ */
+ mutex_lock(&event->mmap_mutex);
+-
+- rcu_read_lock();
+- rb = rcu_dereference(event->rb);
+- if (rb) {
+- ring_buffer_attach(event, rb);
++ rb = event->rb;
++ if (rb)
+ events = atomic_xchg(&rb->poll, 0);
+- }
+- rcu_read_unlock();
+-
+ mutex_unlock(&event->mmap_mutex);
+
+ poll_wait(file, &event->waitq, wait);
+@@ -3538,16 +3555,12 @@ static void ring_buffer_attach(struct perf_event *event,
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+- if (!list_empty(&event->rb_entry))
+- goto unlock;
+-
+- list_add(&event->rb_entry, &rb->event_list);
+-unlock:
++ if (list_empty(&event->rb_entry))
++ list_add(&event->rb_entry, &rb->event_list);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+ }
+
+-static void ring_buffer_detach(struct perf_event *event,
+- struct ring_buffer *rb)
++static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
+ {
+ unsigned long flags;
+
+@@ -3566,13 +3579,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+- if (!rb)
+- goto unlock;
+-
+- list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+- wake_up_all(&event->waitq);
+-
+-unlock:
++ if (rb) {
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
++ wake_up_all(&event->waitq);
++ }
+ rcu_read_unlock();
+ }
+
+@@ -3601,18 +3611,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
+
+ static void ring_buffer_put(struct ring_buffer *rb)
+ {
+- struct perf_event *event, *n;
+- unsigned long flags;
+-
+ if (!atomic_dec_and_test(&rb->refcount))
+ return;
+
+- spin_lock_irqsave(&rb->event_lock, flags);
+- list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+- list_del_init(&event->rb_entry);
+- wake_up_all(&event->waitq);
+- }
+- spin_unlock_irqrestore(&rb->event_lock, flags);
++ WARN_ON_ONCE(!list_empty(&rb->event_list));
+
+ call_rcu(&rb->rcu_head, rb_free_rcu);
+ }
+@@ -3622,26 +3624,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
+ struct perf_event *event = vma->vm_file->private_data;
+
+ atomic_inc(&event->mmap_count);
++ atomic_inc(&event->rb->mmap_count);
+ }
+
++/*
++ * A buffer can be mmap()ed multiple times; either directly through the same
++ * event, or through other events by use of perf_event_set_output().
++ *
++ * In order to undo the VM accounting done by perf_mmap() we need to destroy
++ * the buffer here, where we still have a VM context. This means we need
++ * to detach all events redirecting to us.
++ */
+ static void perf_mmap_close(struct vm_area_struct *vma)
+ {
+ struct perf_event *event = vma->vm_file->private_data;
+
+- if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
+- unsigned long size = perf_data_size(event->rb);
+- struct user_struct *user = event->mmap_user;
+- struct ring_buffer *rb = event->rb;
++ struct ring_buffer *rb = event->rb;
++ struct user_struct *mmap_user = rb->mmap_user;
++ int mmap_locked = rb->mmap_locked;
++ unsigned long size = perf_data_size(rb);
++
++ atomic_dec(&rb->mmap_count);
++
++ if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
++ return;
++
++ /* Detach current event from the buffer. */
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ mutex_unlock(&event->mmap_mutex);
++
++ /* If there's still other mmap()s of this buffer, we're done. */
++ if (atomic_read(&rb->mmap_count)) {
++ ring_buffer_put(rb); /* can't be last */
++ return;
++ }
++
++ /*
++ * No other mmap()s, detach from all other events that might redirect
++ * into the now unreachable buffer. Somewhat complicated by the
++ * fact that rb::event_lock otherwise nests inside mmap_mutex.
++ */
++again:
++ rcu_read_lock();
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
++ if (!atomic_long_inc_not_zero(&event->refcount)) {
++ /*
++ * This event is en-route to free_event() which will
++ * detach it and remove it from the list.
++ */
++ continue;
++ }
++ rcu_read_unlock();
+
+- atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
+- vma->vm_mm->pinned_vm -= event->mmap_locked;
+- rcu_assign_pointer(event->rb, NULL);
+- ring_buffer_detach(event, rb);
++ mutex_lock(&event->mmap_mutex);
++ /*
++ * Check we didn't race with perf_event_set_output() which can
++ * swizzle the rb from under us while we were waiting to
++ * acquire mmap_mutex.
++ *
++ * If we find a different rb; ignore this event, a next
++ * iteration will no longer find it on the list. We have to
++ * still restart the iteration to make sure we're not now
++ * iterating the wrong list.
++ */
++ if (event->rb == rb) {
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ ring_buffer_put(rb); /* can't be last, we still have one */
++ }
+ mutex_unlock(&event->mmap_mutex);
++ put_event(event);
+
+- ring_buffer_put(rb);
+- free_uid(user);
++ /*
++ * Restart the iteration; either we're on the wrong list or
++ * destroyed its integrity by doing a deletion.
++ */
++ goto again;
+ }
++ rcu_read_unlock();
++
++ /*
++ * It could be there's still a few 0-ref events on the list; they'll
++ * get cleaned up by free_event() -- they'll also still have their
++ * ref on the rb and will free it whenever they are done with it.
++ *
++ * Aside from that, this buffer is 'fully' detached and unmapped,
++ * undo the VM accounting.
++ */
++
++ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
++ vma->vm_mm->pinned_vm -= mmap_locked;
++ free_uid(mmap_user);
++
++ ring_buffer_put(rb); /* could be last */
+ }
+
+ static const struct vm_operations_struct perf_mmap_vmops = {
+@@ -3691,12 +3767,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ return -EINVAL;
+
+ WARN_ON_ONCE(event->ctx->parent_ctx);
++again:
+ mutex_lock(&event->mmap_mutex);
+ if (event->rb) {
+- if (event->rb->nr_pages == nr_pages)
+- atomic_inc(&event->rb->refcount);
+- else
++ if (event->rb->nr_pages != nr_pages) {
+ ret = -EINVAL;
++ goto unlock;
++ }
++
++ if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
++ /*
++ * Raced against perf_mmap_close() through
++ * perf_event_set_output(). Try again, hope for better
++ * luck.
++ */
++ mutex_unlock(&event->mmap_mutex);
++ goto again;
++ }
++
+ goto unlock;
+ }
+
+@@ -3737,19 +3825,27 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ ret = -ENOMEM;
+ goto unlock;
+ }
+- rcu_assign_pointer(event->rb, rb);
++
++ atomic_set(&rb->mmap_count, 1);
++ rb->mmap_locked = extra;
++ rb->mmap_user = get_current_user();
+
+ atomic_long_add(user_extra, &user->locked_vm);
+- event->mmap_locked = extra;
+- event->mmap_user = get_current_user();
+- vma->vm_mm->pinned_vm += event->mmap_locked;
++ vma->vm_mm->pinned_vm += extra;
++
++ ring_buffer_attach(event, rb);
++ rcu_assign_pointer(event->rb, rb);
+
+ unlock:
+ if (!ret)
+ atomic_inc(&event->mmap_count);
+ mutex_unlock(&event->mmap_mutex);
+
+- vma->vm_flags |= VM_RESERVED;
++ /*
++ * Since pinned accounting is per vm we cannot allow fork() to copy our
++ * vma.
++ */
++ vma->vm_flags |= VM_DONTCOPY | VM_RESERVED;
+ vma->vm_ops = &perf_mmap_vmops;
+
+ return ret;
+@@ -6114,6 +6210,8 @@ set:
+ if (atomic_read(&event->mmap_count))
+ goto unlock;
+
++ old_rb = event->rb;
++
+ if (output_event) {
+ /* get the rb we want to redirect to */
+ rb = ring_buffer_get(output_event);
+@@ -6121,16 +6219,28 @@ set:
+ goto unlock;
+ }
+
+- old_rb = event->rb;
+- rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
++
++ if (rb)
++ ring_buffer_attach(event, rb);
++
++ rcu_assign_pointer(event->rb, rb);
++
++ if (old_rb) {
++ ring_buffer_put(old_rb);
++ /*
++ * Since we detached before setting the new rb, so that we
++ * could attach the new rb, we could have missed a wakeup.
++ * Provide it now.
++ */
++ wake_up_all(&event->waitq);
++ }
++
+ ret = 0;
+ unlock:
+ mutex_unlock(&event->mmap_mutex);
+
+- if (old_rb)
+- ring_buffer_put(old_rb);
+ out:
+ return ret;
+ }
+@@ -6797,7 +6907,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
+ * child.
+ */
+
+- child_ctx = alloc_perf_context(event->pmu, child);
++ child_ctx = alloc_perf_context(parent_ctx->pmu, child);
+ if (!child_ctx)
+ return -ENOMEM;
+
+diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
+index b7971d6..98ac24e 100644
+--- a/kernel/events/hw_breakpoint.c
++++ b/kernel/events/hw_breakpoint.c
+@@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
+ return;
+ }
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ unsigned int nr;
+
+ nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
+@@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
+ if (cpu >= 0) {
+ toggle_bp_task_slot(bp, cpu, enable, type, weight);
+ } else {
+- for_each_online_cpu(cpu)
++ for_each_possible_cpu(cpu)
+ toggle_bp_task_slot(bp, cpu, enable, type, weight);
+ }
+
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 64568a6..a2101bb 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -26,6 +26,10 @@ struct ring_buffer {
+ spinlock_t event_lock;
+ struct list_head event_list;
+
++ atomic_t mmap_count;
++ unsigned long mmap_locked;
++ struct user_struct *mmap_user;
++
+ struct perf_event_mmap_page *user_page;
+ void *data_pages[0];
+ };
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 77bccfc..1d0538e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -60,6 +60,7 @@
+ #include <linux/pid.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ptrace.h>
++#include <linux/hugetlb.h>
+
+ #include <asm/futex.h>
+
+@@ -363,7 +364,7 @@ again:
+ } else {
+ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ key->shared.inode = page_head->mapping->host;
+- key->shared.pgoff = page_head->index;
++ key->shared.pgoff = basepage_index(page);
+ }
+
+ get_futex_key_refs(key);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 382a6bd..52bdd58 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -539,9 +539,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
+ return 0;
+
+ if (irq_settings_can_request(desc)) {
+- if (desc->action)
+- if (irqflags & desc->action->flags & IRQF_SHARED)
+- canrequest =1;
++ if (!desc->action ||
++ irqflags & desc->action->flags & IRQF_SHARED)
++ canrequest = 1;
+ }
+ irq_put_desc_unlock(desc, flags);
+ return canrequest;
+diff --git a/kernel/printk.c b/kernel/printk.c
+index c0d12ea..16688ec 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -813,9 +813,9 @@ static int console_trylock_for_printk(unsigned int cpu)
+ }
+ }
+ printk_cpu = UINT_MAX;
++ raw_spin_unlock(&logbuf_lock);
+ if (wake)
+ up(&console_sem);
+- raw_spin_unlock(&logbuf_lock);
+ return retval;
+ }
+ static const char recursion_bug_msg [] =
+diff --git a/kernel/timer.c b/kernel/timer.c
+index f2f71d7..f8b05a4 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -145,9 +145,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu,
+ /* now that we have rounded, subtract the extra skew again */
+ j -= cpu * 3;
+
+- if (j <= jiffies) /* rounding ate our timeout entirely; */
+- return original;
+- return j;
++ /*
++ * Make sure j is still in the future. Otherwise return the
++ * unmodified value.
++ */
++ return time_is_after_jiffies(j) ? j : original;
+ }
+
+ /**
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index cb65454..7c75bbb 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -303,6 +303,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int size;
+ int syscall_nr;
+
+@@ -318,8 +320,11 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+
+ size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ event = trace_current_buffer_lock_reserve(&buffer,
+- sys_data->enter_event->event.type, size, 0, 0);
++ sys_data->enter_event->event.type, size, irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -329,7 +334,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+
+ if (!filter_current_check_discard(buffer, sys_data->enter_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+@@ -338,6 +344,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int syscall_nr;
+
+ syscall_nr = syscall_get_nr(current, regs);
+@@ -350,8 +358,12 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ if (!sys_data)
+ return;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ event = trace_current_buffer_lock_reserve(&buffer,
+- sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
++ sys_data->exit_event->event.type, sizeof(*entry),
++ irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -361,7 +373,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+
+ if (!filter_current_check_discard(buffer, sys_data->exit_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ int reg_event_syscall_enter(struct ftrace_event_call *call)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 2dcd716..ddf2128 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -679,6 +679,23 @@ int PageHuge(struct page *page)
+ }
+ EXPORT_SYMBOL_GPL(PageHuge);
+
++pgoff_t __basepage_index(struct page *page)
++{
++ struct page *page_head = compound_head(page);
++ pgoff_t index = page_index(page_head);
++ unsigned long compound_idx;
++
++ if (!PageHuge(page_head))
++ return page_index(page);
++
++ if (compound_order(page_head) >= MAX_ORDER)
++ compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
++ else
++ compound_idx = page - page_head;
++
++ return (index << compound_order(page_head)) + compound_idx;
++}
++
+ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
+ {
+ struct page *page;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a0b6c50..dd7c019 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1737,6 +1737,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
+ conn, code, ident, dlen);
+
++ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
++ return NULL;
++
+ len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
+ count = min_t(unsigned int, conn->mtu, len);
+
+@@ -2865,7 +2868,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+ u16 type, result;
+
+- if (cmd_len != sizeof(*rsp))
++ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ type = __le16_to_cpu(rsp->type);
+diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
+index 214c2bb..9f78c5f 100644
+--- a/net/ceph/auth_none.c
++++ b/net/ceph/auth_none.c
+@@ -39,6 +39,11 @@ static int should_authenticate(struct ceph_auth_client *ac)
+ return xi->starting;
+ }
+
++static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
++{
++ return 0;
++}
++
+ /*
+ * the generic auth code decode the global_id, and we carry no actual
+ * authenticate state, so nothing happens here.
+@@ -107,6 +112,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
+ .destroy = destroy,
+ .is_authenticated = is_authenticated,
+ .should_authenticate = should_authenticate,
++ .build_request = build_request,
+ .handle_reply = handle_reply,
+ .create_authorizer = ceph_auth_none_create_authorizer,
+ .destroy_authorizer = ceph_auth_none_destroy_authorizer,
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 3c8bc6e..d148a2b 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -902,7 +902,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ per_cvt->assigned = 1;
+ hinfo->nid = per_cvt->cvt_nid;
+
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
++ snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mux_idx);
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index e97df24..8b687da 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2117,7 +2117,6 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+- u16 *reg_cache = codec->reg_cache;
+ int ret;
+
+ /* Apply the update (if any) */
+@@ -2126,16 +2125,19 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
+ return 0;
+
+ /* If the left PGA is enabled hit that VU bit... */
+- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
+- return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
+- reg_cache[WM8962_HPOUTL_VOLUME]);
++ ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
++ if (ret & WM8962_HPOUTL_PGA_ENA) {
++ snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
++ snd_soc_read(codec, WM8962_HPOUTL_VOLUME));
++ return 1;
++ }
+
+ /* ...otherwise the right. The VU is stereo. */
+- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
+- return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
+- reg_cache[WM8962_HPOUTR_VOLUME]);
++ if (ret & WM8962_HPOUTR_PGA_ENA)
++ snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
++ snd_soc_read(codec, WM8962_HPOUTR_VOLUME));
+
+- return 0;
++ return 1;
+ }
+
+ /* The VU bits for the speakers are in a different register to the mute
+@@ -3944,7 +3946,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
+ int ret;
+ struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
+ struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
+- u16 *reg_cache = codec->reg_cache;
+ int i, trigger, irq_pol;
+ bool dmicclk, dmicdat;
+
+@@ -4055,8 +4056,9 @@ static int wm8962_probe(struct snd_soc_codec *codec)
+
+ /* Put the speakers into mono mode? */
+ if (pdata->spk_mono)
+- reg_cache[WM8962_CLASS_D_CONTROL_2]
+- |= WM8962_SPK_MONO;
++ snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
++ WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
++
+
+ /* Micbias setup, detection enable and detection
+ * threasholds. */
diff --git a/3.2.54/1049_linux-3.2.50.patch b/3.2.54/1049_linux-3.2.50.patch
new file mode 100644
index 0000000..20b3015
--- /dev/null
+++ b/3.2.54/1049_linux-3.2.50.patch
@@ -0,0 +1,2495 @@
+diff --git a/Makefile b/Makefile
+index 2e3d791..0799e8e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
+index 0192a4e..80de64b 100644
+--- a/arch/powerpc/include/asm/module.h
++++ b/arch/powerpc/include/asm/module.h
+@@ -87,10 +87,9 @@ struct exception_table_entry;
+ void sort_ex_table(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+
+-#ifdef CONFIG_MODVERSIONS
++#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
+ #define ARCH_RELOCATES_KCRCTAB
+-
+-extern const unsigned long reloc_start[];
++#define reloc_start PHYSICAL_START
+ #endif
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_MODULE_H */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 920276c..3e8fe4b 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
+ #endif
+ SECTIONS
+ {
+- . = 0;
+- reloc_start = .;
+-
+ . = KERNELBASE;
+
+ /*
+diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
+index 68f7e11..ce48203 100644
+--- a/arch/sparc/kernel/asm-offsets.c
++++ b/arch/sparc/kernel/asm-offsets.c
+@@ -34,6 +34,8 @@ int foo(void)
+ DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
+ BLANK();
+ DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
++ BLANK();
++ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+
+ /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
+ return 0;
+diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
+index 44aad32..969f964 100644
+--- a/arch/sparc/mm/hypersparc.S
++++ b/arch/sparc/mm/hypersparc.S
+@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
+
+ /* The things we do for performance... */
+ hypersparc_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ #ifndef CONFIG_SMP
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
+ */
+ /* Verified, my ass... */
+ hypersparc_flush_cache_page:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %g2
+ #ifndef CONFIG_SMP
+ cmp %g2, -1
+@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ hypersparc_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ hypersparc_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 6ff4d78..b4989f9 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1071,7 +1071,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
+ m->size = *val;
+ val = mdesc_get_property(md, node,
+ "address-congruence-offset", NULL);
+- m->offset = *val;
++
++ /* The address-congruence-offset property is optional.
++ * Explicity zero it be identifty this.
++ */
++ if (val)
++ m->offset = *val;
++ else
++ m->offset = 0UL;
+
+ numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
+ count - 1, m->base, m->size, m->offset);
+diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
+index c801c39..5d2b88d 100644
+--- a/arch/sparc/mm/swift.S
++++ b/arch/sparc/mm/swift.S
+@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
+
+ .globl swift_flush_cache_range
+ swift_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ sub %o2, %o1, %o2
+ sethi %hi(4096), %o3
+ cmp %o2, %o3
+@@ -116,7 +116,7 @@ swift_flush_cache_range:
+
+ .globl swift_flush_cache_page
+ swift_flush_cache_page:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ 70:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -219,7 +219,7 @@ swift_flush_sig_insns:
+ .globl swift_flush_tlb_range
+ .globl swift_flush_tlb_all
+ swift_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ swift_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
+
+ .globl swift_flush_tlb_page
+ swift_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index afd021e..072f553 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -115,8 +115,8 @@ no_cache_flush:
+ }
+
+ if (!tb->active) {
+- global_flush_tlb_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr);
++ global_flush_tlb_page(mm, vaddr);
+ goto out;
+ }
+
+diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
+index 4e55e8f..bf10a34 100644
+--- a/arch/sparc/mm/tsunami.S
++++ b/arch/sparc/mm/tsunami.S
+@@ -24,7 +24,7 @@
+ /* Sliiick... */
+ tsunami_flush_cache_page:
+ tsunami_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ tsunami_flush_cache_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
+
+ /* More slick stuff... */
+ tsunami_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ tsunami_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
+
+ /* This one can be done in a fine grained manner... */
+ tsunami_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
+index 6dfcc13..a516372 100644
+--- a/arch/sparc/mm/viking.S
++++ b/arch/sparc/mm/viking.S
+@@ -109,7 +109,7 @@ viking_mxcc_flush_page:
+ viking_flush_cache_page:
+ viking_flush_cache_range:
+ #ifndef CONFIG_SMP
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ #endif
+ viking_flush_cache_mm:
+ #ifndef CONFIG_SMP
+@@ -149,7 +149,7 @@ viking_flush_tlb_mm:
+ #endif
+
+ viking_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -174,7 +174,7 @@ viking_flush_tlb_range:
+ #endif
+
+ viking_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -240,7 +240,7 @@ sun4dsmp_flush_tlb_range:
+ tst %g5
+ bne 3f
+ mov SRMMU_CTX_REG, %g1
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+@@ -266,7 +266,7 @@ sun4dsmp_flush_tlb_page:
+ tst %g5
+ bne 2f
+ mov SRMMU_CTX_REG, %g1
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ and %o1, PAGE_MASK, %o1
+diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
+index d985713..f81597f 100644
+--- a/drivers/acpi/acpi_memhotplug.c
++++ b/drivers/acpi/acpi_memhotplug.c
+@@ -421,6 +421,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
++ device->driver_data = NULL;
+ kfree(mem_device);
+ return result;
+ }
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 7a949af..5b0b5f7 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -352,7 +352,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+- { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 85fdd4b..2232b85 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -277,6 +277,7 @@ int xen_blkif_schedule(void *arg)
+ {
+ struct xen_blkif *blkif = arg;
+ struct xen_vbd *vbd = &blkif->vbd;
++ int ret;
+
+ xen_blkif_get(blkif);
+
+@@ -297,8 +298,12 @@ int xen_blkif_schedule(void *arg)
+ blkif->waiting_reqs = 0;
+ smp_mb(); /* clear flag *before* checking for work */
+
+- if (do_block_io_op(blkif))
++ ret = do_block_io_op(blkif);
++ if (ret > 0)
+ blkif->waiting_reqs = 1;
++ if (ret == -EACCES)
++ wait_event_interruptible(blkif->shutdown_wq,
++ kthread_should_stop());
+
+ if (log_stats && time_after(jiffies, blkif->st_print))
+ print_stats(blkif);
+@@ -539,6 +544,12 @@ __do_block_io_op(struct xen_blkif *blkif)
+ rp = blk_rings->common.sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
++ if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
++ rc = blk_rings->common.rsp_prod_pvt;
++ pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
++ rp, rc, rp - rc, blkif->vbd.pdevice);
++ return -EACCES;
++ }
+ while (rc != rp) {
+
+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index dfb1b3a..f67985d 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -198,6 +198,8 @@ struct xen_blkif {
+ int st_wr_sect;
+
+ wait_queue_head_t waiting_to_free;
++ /* Thread shutdown wait queue. */
++ wait_queue_head_t shutdown_wq;
+ };
+
+
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 674e3c2..77aed26 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -118,6 +118,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
+ atomic_set(&blkif->drain, 0);
+ blkif->st_print = jiffies;
+ init_waitqueue_head(&blkif->waiting_to_free);
++ init_waitqueue_head(&blkif->shutdown_wq);
+
+ return blkif;
+ }
+@@ -178,6 +179,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
+ {
+ if (blkif->xenblkd) {
+ kthread_stop(blkif->xenblkd);
++ wake_up(&blkif->shutdown_wq);
+ blkif->xenblkd = NULL;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index c32fd93..8115557 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ enum radeon_combios_table_offset table)
+ {
+ struct radeon_device *rdev = dev->dev_private;
+- int rev;
++ int rev, size;
+ uint16_t offset = 0, check_offset;
+
+ if (!rdev->bios)
+@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ switch (table) {
+ /* absolute offset tables */
+ case COMBIOS_ASIC_INIT_1_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0xc;
+ break;
+ case COMBIOS_BIOS_SUPPORT_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x14;
+ break;
+ case COMBIOS_DAC_PROGRAMMING_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2a;
+ break;
+ case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2c;
+ break;
+ case COMBIOS_CRTC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2e;
+ break;
+ case COMBIOS_PLL_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x30;
+ break;
+ case COMBIOS_TV_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x32;
+ break;
+ case COMBIOS_DFP_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x34;
+ break;
+ case COMBIOS_HW_CONFIG_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x36;
+ break;
+ case COMBIOS_MULTIMEDIA_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x38;
+ break;
+ case COMBIOS_TV_STD_PATCH_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x3e;
+ break;
+ case COMBIOS_LCD_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x40;
+ break;
+ case COMBIOS_MOBILE_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x42;
+ break;
+ case COMBIOS_PLL_INIT_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x46;
+ break;
+ case COMBIOS_MEM_CONFIG_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x48;
+ break;
+ case COMBIOS_SAVE_MASK_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4a;
+ break;
+ case COMBIOS_HARDCODED_EDID_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4c;
+ break;
+ case COMBIOS_ASIC_INIT_2_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4e;
+ break;
+ case COMBIOS_CONNECTOR_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x50;
+ break;
+ case COMBIOS_DYN_CLK_1_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x52;
+ break;
+ case COMBIOS_RESERVED_MEM_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x54;
+ break;
+ case COMBIOS_EXT_TMDS_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x58;
+ break;
+ case COMBIOS_MEM_CLK_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5a;
+ break;
+ case COMBIOS_EXT_DAC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5c;
+ break;
+ case COMBIOS_MISC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5e;
+ break;
+ case COMBIOS_CRT_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x60;
+ break;
+ case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x62;
+ break;
+ case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x64;
+ break;
+ case COMBIOS_FAN_SPEED_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x66;
+ break;
+ case COMBIOS_OVERDRIVE_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x68;
+ break;
+ case COMBIOS_OEM_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6a;
+ break;
+ case COMBIOS_DYN_CLK_2_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6c;
+ break;
+ case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6e;
+ break;
+ case COMBIOS_I2C_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x70;
+ break;
+ /* relative offset tables */
+ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
+@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ }
+ break;
+ default:
++ check_offset = 0;
+ break;
+ }
+
+- return offset;
++ size = RBIOS8(rdev->bios_header_start + 0x6);
++ /* check absolute offset tables */
++ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
++ offset = RBIOS16(rdev->bios_header_start + check_offset);
+
++ return offset;
+ }
+
+ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+@@ -953,16 +890,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ dac = RBIOS8(dac_info + 0x3) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ }
+- /* if the values are all zeros, use the table */
+- if (p_dac->ps2_pdac_adj)
++ /* if the values are zeros, use the table */
++ if ((dac == 0) || (bg == 0))
++ found = 0;
++ else
+ found = 1;
+ }
+
+ /* quirks */
++ /* Radeon 7000 (RV100) */
++ if (((dev->pdev->device == 0x5159) &&
++ (dev->pdev->subsystem_vendor == 0x174B) &&
++ (dev->pdev->subsystem_device == 0x7c28)) ||
+ /* Radeon 9100 (R200) */
+- if ((dev->pdev->device == 0x514D) &&
++ ((dev->pdev->device == 0x514D) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+- (dev->pdev->subsystem_device == 0x7149)) {
++ (dev->pdev->subsystem_device == 0x7149))) {
+ /* vbios value is bad, use the default */
+ found = 0;
+ }
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index 68fe73c..99b1145 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -186,6 +186,8 @@ static int __init dummy_init_module(void)
+
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
++ if (err < 0)
++ goto out;
+
+ for (i = 0; i < numdummies && !err; i++) {
+ err = dummy_init_one();
+@@ -193,6 +195,8 @@ static int __init dummy_init_module(void)
+ }
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
++
++out:
+ rtnl_unlock();
+
+ return err;
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index dd893b3..87851f0 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1685,8 +1685,8 @@ check_sum:
+ return 0;
+ }
+
+-static void atl1e_tx_map(struct atl1e_adapter *adapter,
+- struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
++static int atl1e_tx_map(struct atl1e_adapter *adapter,
++ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+ {
+ struct atl1e_tpd_desc *use_tpd = NULL;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+@@ -1697,6 +1697,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ u16 nr_frags;
+ u16 f;
+ int segment;
++ int ring_start = adapter->tx_ring.next_to_use;
++ int ring_end;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+@@ -1709,6 +1711,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ tx_buffer->length = map_len;
+ tx_buffer->dma = pci_map_single(adapter->pdev,
+ skb->data, hdr_len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
++ return -ENOSPC;
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+@@ -1735,6 +1740,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ tx_buffer->dma =
+ pci_map_single(adapter->pdev, skb->data + mapped_len,
+ map_len, PCI_DMA_TODEVICE);
++
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
++ /* We need to unwind the mappings we've done */
++ ring_end = adapter->tx_ring.next_to_use;
++ adapter->tx_ring.next_to_use = ring_start;
++ while (adapter->tx_ring.next_to_use != ring_end) {
++ tpd = atl1e_get_tpd(adapter);
++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
++ pci_unmap_single(adapter->pdev, tx_buffer->dma,
++ tx_buffer->length, PCI_DMA_TODEVICE);
++ }
++ /* Reset the tx rings next pointer */
++ adapter->tx_ring.next_to_use = ring_start;
++ return -ENOSPC;
++ }
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+@@ -1770,6 +1791,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
++ /* We need to unwind the mappings we've done */
++ ring_end = adapter->tx_ring.next_to_use;
++ adapter->tx_ring.next_to_use = ring_start;
++ while (adapter->tx_ring.next_to_use != ring_end) {
++ tpd = atl1e_get_tpd(adapter);
++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
++ dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
++ tx_buffer->length, DMA_TO_DEVICE);
++ }
++
++ /* Reset the ring next to use pointer */
++ adapter->tx_ring.next_to_use = ring_start;
++ return -ENOSPC;
++ }
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+@@ -1787,6 +1825,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ /* The last buffer info contain the skb address,
+ so it will be free after unmap */
+ tx_buffer->skb = skb;
++ return 0;
+ }
+
+ static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
+@@ -1854,10 +1893,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+- atl1e_tx_map(adapter, skb, tpd);
++ if (atl1e_tx_map(adapter, skb, tpd)) {
++ dev_kfree_skb_any(skb);
++ goto out;
++ }
++
+ atl1e_tx_queue(adapter, tpd_req, tpd);
+
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
++out:
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 9b23074..b2077ca 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -136,8 +136,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .rmcr_value = 0x00000001,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+- .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+- EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
++ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
++ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
++ EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+@@ -251,9 +252,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+- EESR_ECI,
++ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
++ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
++ EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+ .fdr_value = 0x0000072f,
+@@ -355,9 +356,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+- EESR_ECI,
++ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
++ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
++ EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+
+diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
+index 47877b1..590705c 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.h
++++ b/drivers/net/ethernet/renesas/sh_eth.h
+@@ -461,7 +461,7 @@ enum EESR_BIT {
+
+ #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
+ EESR_RTO)
+-#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
++#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
+ EESR_RDE | EESR_RFRMER | EESR_ADE | \
+ EESR_TFE | EESR_TDE | EESR_ECI)
+ #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index 8c6c059..bd08919 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -1248,6 +1248,8 @@ static int vnet_port_remove(struct vio_dev *vdev)
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
++
++ unregister_netdev(vp->dev);
+ }
+ return 0;
+ }
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index 46b5f5f..b19841a 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -290,11 +290,17 @@ static int __init ifb_init_module(void)
+
+ rtnl_lock();
+ err = __rtnl_link_register(&ifb_link_ops);
++ if (err < 0)
++ goto out;
+
+- for (i = 0; i < numifbs && !err; i++)
++ for (i = 0; i < numifbs && !err; i++) {
+ err = ifb_init_one(i);
++ cond_resched();
++ }
+ if (err)
+ __rtnl_link_unregister(&ifb_link_ops);
++
++out:
+ rtnl_unlock();
+
+ return err;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 26106c0..96b9e3c 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -532,8 +532,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+- for (i = 0; i < num_pages; i++)
+- put_page(page[i]);
++ int j;
++
++ for (j = 0; j < num_pages; j++)
++ put_page(page[i + j]);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+@@ -653,6 +655,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ int vnet_hdr_len = 0;
+ int copylen = 0;
+ bool zerocopy = false;
++ size_t linear;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = q->vnet_hdr_sz;
+@@ -707,11 +710,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+- } else
++ linear = copylen;
++ } else {
+ copylen = len;
++ linear = vnet_hdr.hdr_len;
++ }
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+- vnet_hdr.hdr_len, noblock, &err);
++ linear, noblock, &err);
+ if (!skb)
+ goto err;
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6ee8410..43a6a11 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -508,7 +508,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ {
+ struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
+ void *buf;
+- unsigned int len, received = 0;
++ unsigned int r, len, received = 0;
+
+ again:
+ while (received < budget &&
+@@ -525,8 +525,9 @@ again:
+
+ /* Out of packets? */
+ if (received < budget) {
++ r = virtqueue_enable_cb_prepare(vi->rvq);
+ napi_complete(napi);
+- if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
++ if (unlikely(virtqueue_poll(vi->rvq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ __napi_schedule(napi);
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
+index 84a78af..182fcb2 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
+@@ -1788,7 +1788,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+- memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
++ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 66ad3dc..e294d11 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -1038,6 +1038,7 @@ int isci_task_abort_task(struct sas_task *task)
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ int perform_termination = 0;
++ int target_done_already = 0;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+@@ -1052,9 +1053,11 @@ int isci_task_abort_task(struct sas_task *task)
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+- old_request)
++ old_request) {
+ isci_device = isci_lookup_device(task->dev);
+-
++ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
++ &old_request->flags);
++ }
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+@@ -1116,7 +1119,7 @@ int isci_task_abort_task(struct sas_task *task)
+ }
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ sas_protocol_ata(task->task_proto) ||
+- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
++ target_done_already) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index a4b267e..9fbe260 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -423,6 +423,8 @@ qla2x00_start_scsi(srb_t *sp)
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
++ } else {
++ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ }
+
+ /* Load SCSI command packet. */
+@@ -1244,11 +1246,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+- fcp_cmnd->task_attribute = 0;
++ fcp_cmnd->task_attribute = TSK_SIMPLE;
+ break;
+ }
+ } else {
+- fcp_cmnd->task_attribute = 0;
++ fcp_cmnd->task_attribute = TSK_SIMPLE;
+ }
+
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+@@ -1454,7 +1456,12 @@ qla24xx_start_scsi(srb_t *sp)
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
++ default:
++ cmd_pkt->task = TSK_SIMPLE;
++ break;
+ }
++ } else {
++ cmd_pkt->task = TSK_SIMPLE;
+ }
+
+ /* Load SCSI command packet. */
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6dace1a..17603da 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -641,10 +641,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
+
+ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
+ {
++ struct scsi_cmnd *SCpnt = rq->special;
++
+ if (rq->cmd_flags & REQ_DISCARD) {
+ free_page((unsigned long)rq->buffer);
+ rq->buffer = NULL;
+ }
++ if (SCpnt->cmnd != rq->cmd) {
++ mempool_free(SCpnt->cmnd, sd_cdb_pool);
++ SCpnt->cmnd = NULL;
++ SCpnt->cmd_len = 0;
++ }
+ }
+
+ /**
+@@ -1452,21 +1459,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
+ sd_dif_complete(SCpnt, good_bytes);
+
+- if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
+- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
+-
+- /* We have to print a failed command here as the
+- * extended CDB gets freed before scsi_io_completion()
+- * is called.
+- */
+- if (result)
+- scsi_print_command(SCpnt);
+-
+- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+- SCpnt->cmnd = NULL;
+- SCpnt->cmd_len = 0;
+- }
+-
+ return good_bytes;
+ }
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index fe4dbf3..7e42190 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -1078,22 +1078,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ DPRINTK("subdevice busy\n");
+ return -EBUSY;
+ }
+- s->busy = file;
+
+ /* make sure channel/gain list isn't too long */
+ if (user_cmd.chanlist_len > s->len_chanlist) {
+ DPRINTK("channel/gain list too long %u > %d\n",
+ user_cmd.chanlist_len, s->len_chanlist);
+- ret = -EINVAL;
+- goto cleanup;
++ return -EINVAL;
+ }
+
+ /* make sure channel/gain list isn't too short */
+ if (user_cmd.chanlist_len < 1) {
+ DPRINTK("channel/gain list too short %u < 1\n",
+ user_cmd.chanlist_len);
+- ret = -EINVAL;
+- goto cleanup;
++ return -EINVAL;
+ }
+
+ async->cmd = user_cmd;
+@@ -1103,8 +1100,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
+ if (!async->cmd.chanlist) {
+ DPRINTK("allocation failed\n");
+- ret = -ENOMEM;
+- goto cleanup;
++ return -ENOMEM;
+ }
+
+ if (copy_from_user(async->cmd.chanlist, user_cmd.chanlist,
+@@ -1156,6 +1152,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+
+ comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+
++ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
++ * comedi_read() or comedi_write() */
++ s->busy = file;
+ ret = s->do_cmd(dev, s);
+ if (ret == 0)
+ return 0;
+@@ -1370,6 +1369,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
+ void *file)
+ {
+ struct comedi_subdevice *s;
++ int ret;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+@@ -1386,7 +1386,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
+ if (s->busy != file)
+ return -EBUSY;
+
+- return do_cancel(dev, s);
++ ret = do_cancel(dev, s);
++ if (comedi_get_subdevice_runflags(s) & SRF_USER)
++ wake_up_interruptible(&s->async->wait_head);
++
++ return ret;
+ }
+
+ /*
+@@ -1653,6 +1657,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+
+ if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+ if (count == 0) {
++ mutex_lock(&dev->mutex);
+ if (comedi_get_subdevice_runflags(s) &
+ SRF_ERROR) {
+ retval = -EPIPE;
+@@ -1660,6 +1665,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ retval = 0;
+ }
+ do_become_nonbusy(dev, s);
++ mutex_unlock(&dev->mutex);
+ }
+ break;
+ }
+@@ -1774,6 +1780,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+
+ if (n == 0) {
+ if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
++ mutex_lock(&dev->mutex);
+ do_become_nonbusy(dev, s);
+ if (comedi_get_subdevice_runflags(s) &
+ SRF_ERROR) {
+@@ -1781,6 +1788,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ } else {
+ retval = 0;
+ }
++ mutex_unlock(&dev->mutex);
+ break;
+ }
+ if (file->f_flags & O_NONBLOCK) {
+@@ -1818,9 +1826,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ buf += n;
+ break; /* makes device work like a pipe */
+ }
+- if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
+- async->buf_read_count - async->buf_write_count == 0) {
+- do_become_nonbusy(dev, s);
++ if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING))) {
++ mutex_lock(&dev->mutex);
++ if (async->buf_read_count - async->buf_write_count == 0)
++ do_become_nonbusy(dev, s);
++ mutex_unlock(&dev->mutex);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&async->wait_head, &wait);
+diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
+index 9d4c8a6..2d3a420 100644
+--- a/drivers/staging/line6/pcm.c
++++ b/drivers/staging/line6/pcm.c
+@@ -360,8 +360,11 @@ static int snd_line6_pcm_free(struct snd_device *device)
+ */
+ static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
+ {
+- if (substream->runtime && snd_pcm_running(substream))
++ if (substream->runtime && snd_pcm_running(substream)) {
++ snd_pcm_stream_lock_irq(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++ snd_pcm_stream_unlock_irq(substream);
++ }
+ }
+
+ /*
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 22cbe06..2768a7e 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -463,6 +463,15 @@ resubmit:
+ static inline int
+ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
+ {
++ /* Need to clear both directions for control ep */
++ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
++ USB_ENDPOINT_XFER_CONTROL) {
++ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
++ devinfo ^ 0x8000, tt, NULL, 0, 1000);
++ if (status)
++ return status;
++ }
+ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
+ tt, NULL, 0, 1000);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 29a8e16..4795c0c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -643,8 +643,8 @@ struct dwc3 {
+
+ struct dwc3_event_type {
+ u32 is_devspec:1;
+- u32 type:6;
+- u32 reserved8_31:25;
++ u32 type:7;
++ u32 reserved8_31:24;
+ } __packed;
+
+ #define DWC3_DEPEVT_XFERCOMPLETE 0x01
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index b368b83..619ee19 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1217,6 +1217,7 @@ err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+ err0:
++ dwc->gadget_driver = NULL;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index aca647a..79d2720 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -89,7 +89,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+- xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d08a804..633476e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -463,7 +463,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+
+ /* A ring has pending URBs if its TD list is not empty */
+ if (!(ep->ep_state & EP_HAS_STREAMS)) {
+- if (!(list_empty(&ep->ring->td_list)))
++ if (ep->ring && !(list_empty(&ep->ring->td_list)))
+ xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ return;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 136c357..6e1c92a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1153,9 +1153,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+
+ xhci = hcd_to_xhci(hcd);
+- if (xhci->xhc_state & XHCI_STATE_HALTED)
+- return -ENODEV;
+-
+ if (check_virt_dev) {
+ if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
+ printk(KERN_DEBUG "xHCI %s called with unaddressed "
+@@ -1171,6 +1168,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+ }
+
++ if (xhci->xhc_state & XHCI_STATE_HALTED)
++ return -ENODEV;
++
+ return 1;
+ }
+
+@@ -4178,6 +4178,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+
+ get_quirks(dev, xhci);
+
++ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
++ * success event after a short transfer. This quirk will ignore such
++ * spurious event.
++ */
++ if (xhci->hci_version > 0x96)
++ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
++
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index dd573ab..7af163d 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
+ { USB_DEVICE(0x0711, 0x0903) },
+ { USB_DEVICE(0x0711, 0x0918) },
+ { USB_DEVICE(0x0711, 0x0920) },
++ { USB_DEVICE(0x0711, 0x0950) },
+ { USB_DEVICE(0x182d, 0x021c) },
+ { USB_DEVICE(0x182d, 0x0269) },
+ { }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 913a178..c408ff7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
++ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+@@ -124,6 +125,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
++ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+@@ -154,6 +157,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
++ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index e89ee48..5e8c736 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -925,20 +925,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
+ if (status < 0) {
+ dbg("Reading Spreg failed");
+- return -1;
++ goto err;
+ }
+ Data |= 0x80;
+ status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Spreg failed");
+- return -1;
++ goto err;
+ }
+
+ Data &= ~0x80;
+ status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Spreg failed");
+- return -1;
++ goto err;
+ }
+ /* End of block to be checked */
+
+@@ -947,7 +947,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ &Data);
+ if (status < 0) {
+ dbg("Reading Controlreg failed");
+- return -1;
++ goto err;
+ }
+ Data |= 0x08; /* Driver done bit */
+ Data |= 0x20; /* rx_disable */
+@@ -955,7 +955,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ mos7840_port->ControlRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Controlreg failed");
+- return -1;
++ goto err;
+ }
+ /* do register settings here */
+ /* Set all regs to the device default values. */
+@@ -966,21 +966,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
+ if (status < 0) {
+ dbg("disabling interrupts failed");
+- return -1;
++ goto err;
+ }
+ /* Set FIFO_CONTROL_REGISTER to the default value */
+ Data = 0x00;
+ status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
+ if (status < 0) {
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
+- return -1;
++ goto err;
+ }
+
+ Data = 0xcf;
+ status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
+ if (status < 0) {
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
+- return -1;
++ goto err;
+ }
+
+ Data = 0x03;
+@@ -1136,7 +1136,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ dbg ("%s leave", __func__);
+
+ return 0;
+-
++err:
++ for (j = 0; j < NUM_URBS; ++j) {
++ urb = mos7840_port->write_urb_pool[j];
++ if (!urb)
++ continue;
++ kfree(urb->transfer_buffer);
++ usb_free_urb(urb);
++ }
++ return status;
+ }
+
+ /*****************************************************************************
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index b8365a7..c2103f4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -347,17 +347,12 @@ static void option_instat_callback(struct urb *urb);
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003
++#define OLIVETTI_PRODUCT_OLICARD200 0xc005
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+ #define CELOT_PRODUCT_CT680M 0x6801
+
+-/* ONDA Communication vendor id */
+-#define ONDA_VENDOR_ID 0x1ee8
+-
+-/* ONDA MT825UP HSDPA 14.2 modem */
+-#define ONDA_MT825UP 0x000b
+-
+ /* Samsung products */
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define SAMSUNG_PRODUCT_GT_B3730 0x6889
+@@ -450,7 +445,8 @@ static void option_instat_callback(struct urb *urb);
+
+ /* Hyundai Petatel Inc. products */
+ #define PETATEL_VENDOR_ID 0x1ff4
+-#define PETATEL_PRODUCT_NP10T 0x600e
++#define PETATEL_PRODUCT_NP10T_600A 0x600a
++#define PETATEL_PRODUCT_NP10T_600E 0x600e
+
+ /* TP-LINK Incorporated products */
+ #define TPLINK_VENDOR_ID 0x2357
+@@ -797,6 +793,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+@@ -832,7 +829,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1278,8 +1276,8 @@ static const struct usb_device_id option_ids[] = {
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
+@@ -1351,9 +1349,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+- { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
+@@ -1361,6 +1362,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 9d3b39e..42038ba 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -408,7 +408,7 @@ static int ti_startup(struct usb_serial *serial)
+ usb_set_serial_data(serial, tdev);
+
+ /* determine device type */
+- if (usb_match_id(serial->interface, ti_id_table_3410))
++ if (serial->type == &ti_1port_device)
+ tdev->td_is_3410 = 1;
+ dbg("%s - device type is %s", __func__,
+ tdev->td_is_3410 ? "3410" : "5052");
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 7b8d564..8a3b531 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -657,6 +657,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_INQUIRY ),
+
++/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
++UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
++ "Sony Corp.",
++ "MicroVault Flash Drive",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_READ_CAPACITY_16 ),
++
+ /* floppy reports multiple luns */
+ UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
+ "SAMSUNG",
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index dc2eed1..4a88ac3 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -360,9 +360,22 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+
+-bool virtqueue_enable_cb(struct virtqueue *_vq)
++/**
++ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
++ * @vq: the struct virtqueue we're talking about.
++ *
++ * This re-enables callbacks; it returns current queue state
++ * in an opaque unsigned value. This value should be later tested by
++ * virtqueue_poll, to detect a possible race between the driver checking for
++ * more work, and enabling callbacks.
++ *
++ * Caller must ensure we don't call this with other virtqueue
++ * operations at the same time (except where noted).
++ */
++unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
++ u16 last_used_idx;
+
+ START_USE(vq);
+
+@@ -372,15 +385,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+- vring_used_event(&vq->vring) = vq->last_used_idx;
++ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
++ END_USE(vq);
++ return last_used_idx;
++}
++EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
++
++/**
++ * virtqueue_poll - query pending used buffers
++ * @vq: the struct virtqueue we're talking about.
++ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
++ *
++ * Returns "true" if there are pending used buffers in the queue.
++ *
++ * This does not need to be serialized.
++ */
++bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
++{
++ struct vring_virtqueue *vq = to_vvq(_vq);
++
+ virtio_mb();
+- if (unlikely(more_used(vq))) {
+- END_USE(vq);
+- return false;
+- }
++ return (u16)last_used_idx != vq->vring.used->idx;
++}
++EXPORT_SYMBOL_GPL(virtqueue_poll);
+
+- END_USE(vq);
+- return true;
++/**
++ * virtqueue_enable_cb - restart callbacks after disable_cb.
++ * @vq: the struct virtqueue we're talking about.
++ *
++ * This re-enables callbacks; it returns "false" if there are pending
++ * buffers in the queue, to detect a possible race between the driver
++ * checking for more work, and enabling callbacks.
++ *
++ * Caller must ensure we don't call this with other virtqueue
++ * operations at the same time (except where noted).
++ */
++bool virtqueue_enable_cb(struct virtqueue *_vq)
++{
++ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
++ return !virtqueue_poll(_vq, last_used_idx);
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8d4d53d..49eefdb 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6560,6 +6560,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ int err = 0;
+ int ret;
+ int level;
++ bool root_dropped = false;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+@@ -6614,6 +6615,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ while (1) {
+ btrfs_tree_lock(path->nodes[level]);
+ btrfs_set_lock_blocking(path->nodes[level]);
++ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ path->nodes[level]->start,
+@@ -6627,6 +6629,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ break;
+
+ btrfs_tree_unlock(path->nodes[level]);
++ path->locks[level] = 0;
+ WARN_ON(wc->refs[level] != 1);
+ level--;
+ }
+@@ -6707,11 +6710,21 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ free_extent_buffer(root->commit_root);
+ kfree(root);
+ }
++ root_dropped = true;
+ out_free:
+ btrfs_end_transaction_throttle(trans, tree_root);
+ kfree(wc);
+ btrfs_free_path(path);
+ out:
++ /*
++ * So if we need to stop dropping the snapshot for whatever reason we
++ * need to make sure to add it back to the dead root list so that we
++ * keep trying to do the work later. This also cleans up roots if we
++ * don't have it in the radix (like when we recover after a power fail
++ * or unmount) so we don't leak memory.
++ */
++ if (root_dropped == false)
++ btrfs_add_dead_root(root);
+ if (err)
+ btrfs_std_error(root->fs_info, err);
+ return;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9243103..9b8c131 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4696,11 +4696,16 @@ do_more:
+ * blocks being freed are metadata. these blocks shouldn't
+ * be used until this transaction is committed
+ */
++ retry:
+ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ if (!new_entry) {
+- ext4_mb_unload_buddy(&e4b);
+- err = -ENOMEM;
+- goto error_return;
++ /*
++ * We use a retry loop because
++ * ext4_free_blocks() is not allowed to fail.
++ */
++ cond_resched();
++ congestion_wait(BLK_RW_ASYNC, HZ/50);
++ goto retry;
+ }
+ new_entry->start_cluster = bit;
+ new_entry->group = block_group;
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index f0179c3..cd8703d 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -913,6 +913,7 @@ nlmsvc_retry_blocked(void)
+ unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
+ struct nlm_block *block;
+
++ spin_lock(&nlm_blocked_lock);
+ while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
+ block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
+
+@@ -922,6 +923,7 @@ nlmsvc_retry_blocked(void)
+ timeout = block->b_when - jiffies;
+ break;
+ }
++ spin_unlock(&nlm_blocked_lock);
+
+ dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
+ block, block->b_when);
+@@ -931,7 +933,9 @@ nlmsvc_retry_blocked(void)
+ retry_deferred_block(block);
+ } else
+ nlmsvc_grant_blocked(block);
++ spin_lock(&nlm_blocked_lock);
+ }
++ spin_unlock(&nlm_blocked_lock);
+
+ return timeout;
+ }
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 1ec1fde..561a3dc 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -782,9 +782,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ }
+ *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
+ flags, current_cred());
+- if (IS_ERR(*filp))
++ if (IS_ERR(*filp)) {
+ host_err = PTR_ERR(*filp);
+- else
++ *filp = NULL;
++ } else
+ host_err = ima_file_check(*filp, access);
+ out_nfserr:
+ err = nfserrno(host_err);
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9fde1c0..9860f6b 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -118,6 +118,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->metadata_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
++ metadata->reserved = 0;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ if (unlikely(event->mask & FAN_Q_OVERFLOW))
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index b5f927f..732c962 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -128,11 +128,11 @@ struct pppoe_tag {
+
+ struct pppoe_hdr {
+ #if defined(__LITTLE_ENDIAN_BITFIELD)
+- __u8 ver : 4;
+ __u8 type : 4;
++ __u8 ver : 4;
+ #elif defined(__BIG_ENDIAN_BITFIELD)
+- __u8 type : 4;
+ __u8 ver : 4;
++ __u8 type : 4;
+ #else
+ #error "Please fix <asm/byteorder.h>"
+ #endif
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 4c069d8b..96c7843 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -96,6 +96,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
+
+ bool virtqueue_enable_cb(struct virtqueue *vq);
+
++unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
++
++bool virtqueue_poll(struct virtqueue *vq, unsigned);
++
+ bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+
+ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index cbc6bb0..44b1110 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -81,6 +81,9 @@ extern int ipv6_dev_get_saddr(struct net *net,
+ const struct in6_addr *daddr,
+ unsigned int srcprefs,
+ struct in6_addr *saddr);
++extern int __ipv6_get_lladdr(struct inet6_dev *idev,
++ struct in6_addr *addr,
++ unsigned char banned_flags);
+ extern int ipv6_get_lladdr(struct net_device *dev,
+ struct in6_addr *addr,
+ unsigned char banned_flags);
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 3b285f4..e158330 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -180,6 +180,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
+ extern void udp_err(struct sk_buff *, u32);
+ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len);
++extern int udp_push_pending_frames(struct sock *sk);
+ extern void udp_flush_pending_frames(struct sock *sk);
+ extern int udp_rcv(struct sk_buff *skb);
+ extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
+index 75271b9..7d28aff 100644
+--- a/include/xen/interface/io/ring.h
++++ b/include/xen/interface/io/ring.h
+@@ -188,6 +188,11 @@ struct __name##_back_ring { \
+ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
++/* Ill-behaved frontend determination: Can there be this many requests? */
++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
++ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
++
++
+ #define RING_PUSH_REQUESTS(_r) do { \
+ wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 0ec6c34..a584ad9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -631,7 +631,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+
+ memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
+ max_data->pid = tsk->pid;
+- max_data->uid = task_uid(tsk);
++ /*
++ * If tsk == current, then use current_uid(), as that does not use
++ * RCU. The irq tracer can be called out of RCU scope.
++ */
++ if (tsk == current)
++ max_data->uid = current_uid();
++ else
++ max_data->uid = task_uid(tsk);
++
+ max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
+ max_data->policy = tsk->policy;
+ max_data->rt_priority = tsk->rt_priority;
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 0cccca8..b40d3da 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -72,6 +72,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct vlan_priority_tci_mapping *mp;
+
++ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
++
+ mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+ while (mp) {
+ if (mp->priority == skb->priority) {
+@@ -232,6 +234,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
+ np->next = mp;
+ np->priority = skb_prio;
+ np->vlan_qos = vlan_qos;
++ /* Before inserting this element in hash table, make sure all its fields
++ * are committed to memory.
++ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
++ */
++ smp_wmb();
+ vlan->egress_priority_map[skb_prio & 0xF] = np;
+ if (vlan_qos)
+ vlan->nr_egress_mappings++;
+diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
+index de8df95..2ee3879 100644
+--- a/net/9p/trans_common.c
++++ b/net/9p/trans_common.c
+@@ -24,11 +24,11 @@
+ */
+ void p9_release_pages(struct page **pages, int nr_pages)
+ {
+- int i = 0;
+- while (pages[i] && nr_pages--) {
+- put_page(pages[i]);
+- i++;
+- }
++ int i;
++
++ for (i = 0; i < nr_pages; i++)
++ if (pages[i])
++ put_page(pages[i]);
+ }
+ EXPORT_SYMBOL(p9_release_pages);
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 5ac1811..b81500c 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -467,8 +467,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ skb_set_transport_header(skb, skb->len);
+ mldq = (struct mld_msg *) icmp6_hdr(skb);
+
+- interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
+- br->multicast_query_response_interval;
++ interval = ipv6_addr_any(group) ?
++ br->multicast_query_response_interval :
++ br->multicast_last_member_interval;
+
+ mldq->mld_type = ICMPV6_MGM_QUERY;
+ mldq->mld_code = 0;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 5b9709f..0ea3fd3 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -237,7 +237,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+ we must kill timers etc. and move
+ it to safe state.
+ */
+- skb_queue_purge(&n->arp_queue);
++ __skb_queue_purge(&n->arp_queue);
+ n->output = neigh_blackhole;
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+@@ -291,7 +291,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+ if (!n)
+ goto out_entries;
+
+- skb_queue_head_init(&n->arp_queue);
++ __skb_queue_head_init(&n->arp_queue);
+ rwlock_init(&n->lock);
+ seqlock_init(&n->ha_lock);
+ n->updated = n->used = now;
+@@ -701,7 +701,9 @@ void neigh_destroy(struct neighbour *neigh)
+ if (neigh_del_timer(neigh))
+ printk(KERN_WARNING "Impossible event.\n");
+
+- skb_queue_purge(&neigh->arp_queue);
++ write_lock_bh(&neigh->lock);
++ __skb_queue_purge(&neigh->arp_queue);
++ write_unlock_bh(&neigh->lock);
+
+ dev_put(neigh->dev);
+ neigh_parms_put(neigh->parms);
+@@ -843,7 +845,7 @@ static void neigh_invalidate(struct neighbour *neigh)
+ neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
+- skb_queue_purge(&neigh->arp_queue);
++ __skb_queue_purge(&neigh->arp_queue);
+ }
+
+ static void neigh_probe(struct neighbour *neigh)
+@@ -1176,7 +1178,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+
+ write_lock_bh(&neigh->lock);
+ }
+- skb_queue_purge(&neigh->arp_queue);
++ __skb_queue_purge(&neigh->arp_queue);
+ }
+ out:
+ if (update_isrouter) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5a65eea..5decc93 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -766,7 +766,7 @@ send:
+ /*
+ * Push out all pending data as one UDP datagram. Socket is locked.
+ */
+-static int udp_push_pending_frames(struct sock *sk)
++int udp_push_pending_frames(struct sock *sk)
+ {
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+@@ -785,6 +785,7 @@ out:
+ up->pending = 0;
+ return err;
+ }
++EXPORT_SYMBOL(udp_push_pending_frames);
+
+ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len)
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index d603caa..314bda2 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1236,6 +1236,23 @@ try_nextdev:
+ }
+ EXPORT_SYMBOL(ipv6_dev_get_saddr);
+
++int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
++ unsigned char banned_flags)
++{
++ struct inet6_ifaddr *ifp;
++ int err = -EADDRNOTAVAIL;
++
++ list_for_each_entry(ifp, &idev->addr_list, if_list) {
++ if (ifp->scope == IFA_LINK &&
++ !(ifp->flags & banned_flags)) {
++ ipv6_addr_copy(addr, &ifp->addr);
++ err = 0;
++ break;
++ }
++ }
++ return err;
++}
++
+ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags)
+ {
+@@ -1245,17 +1262,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev) {
+- struct inet6_ifaddr *ifp;
+-
+ read_lock_bh(&idev->lock);
+- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+- if (ifp->scope == IFA_LINK &&
+- !(ifp->flags & banned_flags)) {
+- ipv6_addr_copy(addr, &ifp->addr);
+- err = 0;
+- break;
+- }
+- }
++ err = __ipv6_get_lladdr(idev, addr, banned_flags);
+ read_unlock_bh(&idev->lock);
+ }
+ rcu_read_unlock();
+@@ -2434,6 +2442,9 @@ static void init_loopback(struct net_device *dev)
+ if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+ continue;
+
++ if (sp_ifa->rt)
++ continue;
++
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+ /* Failure cases are ignored */
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 6aadaa8..db60043 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -909,11 +909,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
+ const struct flowi6 *fl6)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+- struct rt6_info *rt = (struct rt6_info *)dst;
++ struct rt6_info *rt;
+
+ if (!dst)
+ goto out;
+
++ if (dst->ops->family != AF_INET6) {
++ dst_release(dst);
++ return NULL;
++ }
++
++ rt = (struct rt6_info *)dst;
+ /* Yes, checking route validity in not connected
+ * case is not very simple. Take into account,
+ * that we do not support routing by source, TOS,
+@@ -1178,11 +1184,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
+ return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
+ }
+
+-static void ip6_append_data_mtu(int *mtu,
++static void ip6_append_data_mtu(unsigned int *mtu,
+ int *maxfraglen,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+- struct rt6_info *rt)
++ struct rt6_info *rt,
++ bool pmtuprobe)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+@@ -1194,7 +1201,9 @@ static void ip6_append_data_mtu(int *mtu,
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = dst_mtu(rt->dst.path);
++ *mtu = min(*mtu, pmtuprobe ?
++ rt->dst.dev->mtu :
++ dst_mtu(rt->dst.path));
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1211,11 +1220,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen;
++ unsigned int maxfraglen, fragheaderlen, mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+- int mtu;
+ int copy;
+ int err;
+ int offset = 0;
+@@ -1378,7 +1386,9 @@ alloc_new_skb:
+ /* update mtu and maxfraglen if necessary */
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+- fragheaderlen, skb, rt);
++ fragheaderlen, skb, rt,
++ np->pmtudisc ==
++ IPV6_PMTUDISC_PROBE);
+
+ skb_prev = skb;
+
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index f2d74ea..c7ec4bb 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1334,8 +1334,9 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
+ return scount;
+ }
+
+-static struct sk_buff *mld_newpack(struct net_device *dev, int size)
++static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+ {
++ struct net_device *dev = idev->dev;
+ struct net *net = dev_net(dev);
+ struct sock *sk = net->ipv6.igmp_sk;
+ struct sk_buff *skb;
+@@ -1358,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
+
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+
+- if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
++ if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ /* <draft-ietf-magma-mld-source-05.txt>:
+ * use unspecified address as the source address
+ * when a valid link-local address is not available.
+@@ -1461,7 +1462,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ struct mld2_grec *pgr;
+
+ if (!skb)
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(pmc->idev, dev->mtu);
+ if (!skb)
+ return NULL;
+ pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
+@@ -1481,7 +1482,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ int type, int gdeleted, int sdeleted)
+ {
+- struct net_device *dev = pmc->idev->dev;
++ struct inet6_dev *idev = pmc->idev;
++ struct net_device *dev = idev->dev;
+ struct mld2_report *pmr;
+ struct mld2_grec *pgr = NULL;
+ struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+@@ -1510,7 +1512,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+ if (skb)
+ mld_sendpack(skb);
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(idev, dev->mtu);
+ }
+ }
+ first = 1;
+@@ -1537,7 +1539,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ pgr->grec_nsrcs = htons(scount);
+ if (skb)
+ mld_sendpack(skb);
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(idev, dev->mtu);
+ first = 1;
+ scount = 0;
+ }
+@@ -1592,8 +1594,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ struct sk_buff *skb = NULL;
+ int type;
+
++ read_lock_bh(&idev->lock);
+ if (!pmc) {
+- read_lock_bh(&idev->lock);
+ for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ if (pmc->mca_flags & MAF_NOREPORT)
+ continue;
+@@ -1605,7 +1607,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ skb = add_grec(skb, pmc, type, 0, 0);
+ spin_unlock_bh(&pmc->mca_lock);
+ }
+- read_unlock_bh(&idev->lock);
+ } else {
+ spin_lock_bh(&pmc->mca_lock);
+ if (pmc->mca_sfcount[MCAST_EXCLUDE])
+@@ -1615,6 +1616,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ skb = add_grec(skb, pmc, type, 0, 0);
+ spin_unlock_bh(&pmc->mca_lock);
+ }
++ read_unlock_bh(&idev->lock);
+ if (skb)
+ mld_sendpack(skb);
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 20f0812..f9e496b 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -893,11 +893,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
+ struct udphdr *uh;
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
++ struct flowi6 *fl6;
+ int err = 0;
+ int is_udplite = IS_UDPLITE(sk);
+ __wsum csum = 0;
+
++ if (up->pending == AF_INET)
++ return udp_push_pending_frames(sk);
++
++ fl6 = &inet->cork.fl.u.ip6;
++
+ /* Grab the skbuff where UDP header space exists. */
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+ goto out;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 1e733e9..6fefdfc 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1705,6 +1705,7 @@ static int key_notify_sa_flush(const struct km_event *c)
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
++ hdr->sadb_msg_reserved = 0;
+
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+
+@@ -2686,6 +2687,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
++ hdr->sadb_msg_reserved = 0;
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ return 0;
+
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 74410e6..e579006 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1778,7 +1778,8 @@ static const struct proto_ops pppol2tp_ops = {
+
+ static const struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+- .ioctl = pppol2tp_ioctl
++ .ioctl = pppol2tp_ioctl,
++ .owner = THIS_MODULE,
+ };
+
+ #ifdef CONFIG_L2TP_V3
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 3e16c6a..dc24ba9 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1586,11 +1586,11 @@ out_cud_release:
+ case SIOCX25CALLACCPTAPPRV: {
+ rc = -EINVAL;
+ lock_sock(sk);
+- if (sk->sk_state != TCP_CLOSE)
+- break;
+- clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
++ if (sk->sk_state == TCP_CLOSE) {
++ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
++ rc = 0;
++ }
+ release_sock(sk);
+- rc = 0;
+ break;
+ }
+
+@@ -1598,14 +1598,15 @@ out_cud_release:
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_ESTABLISHED)
+- break;
++ goto out_sendcallaccpt_release;
+ /* must call accptapprv above */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
+- break;
++ goto out_sendcallaccpt_release;
+ x25_write_internal(sk, X25_CALL_ACCEPTED);
+ x25->state = X25_STATE_3;
+- release_sock(sk);
+ rc = 0;
++out_sendcallaccpt_release:
++ release_sock(sk);
+ break;
+ }
+
+diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
+index 76e0d56..823359e 100644
+--- a/sound/arm/pxa2xx-pcm-lib.c
++++ b/sound/arm/pxa2xx-pcm-lib.c
+@@ -166,7 +166,9 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
+ } else {
+ printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
+ rtd->params->name, dma_ch, dcsr);
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(substream);
+ }
+ }
+ EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
+diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
+index f4b9e2b..fbf0bcd 100644
+--- a/sound/pci/asihpi/asihpi.c
++++ b/sound/pci/asihpi/asihpi.c
+@@ -768,7 +768,10 @@ static void snd_card_asihpi_timer_function(unsigned long data)
+ s->number);
+ ds->drained_count++;
+ if (ds->drained_count > 2) {
++ unsigned long flags;
++ snd_pcm_stream_lock_irqsave(s, flags);
+ snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(s, flags);
+ continue;
+ }
+ } else {
+diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
+index 15e4e5e..6faa173 100644
+--- a/sound/pci/atiixp.c
++++ b/sound/pci/atiixp.c
+@@ -688,7 +688,9 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
+ if (! dma->substream || ! dma->running)
+ return;
+ snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type);
++ snd_pcm_stream_lock(dma->substream);
+ snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dma->substream);
+ }
+
+ /*
+diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
+index 57bf8f4..d752120 100644
+--- a/sound/pci/atiixp_modem.c
++++ b/sound/pci/atiixp_modem.c
+@@ -638,7 +638,9 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
+ if (! dma->substream || ! dma->running)
+ return;
+ snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
++ snd_pcm_stream_lock(dma->substream);
+ snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dma->substream);
+ }
+
+ /*
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d148a2b..55d9b30 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1897,6 +1897,8 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
+@@ -1943,6 +1945,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0041");
+ MODULE_ALIAS("snd-hda-codec-id:10de0042");
+ MODULE_ALIAS("snd-hda-codec-id:10de0043");
+ MODULE_ALIAS("snd-hda-codec-id:10de0044");
++MODULE_ALIAS("snd-hda-codec-id:10de0051");
++MODULE_ALIAS("snd-hda-codec-id:10de0060");
+ MODULE_ALIAS("snd-hda-codec-id:10de0067");
+ MODULE_ALIAS("snd-hda-codec-id:10de8001");
+ MODULE_ALIAS("snd-hda-codec-id:17e80047");
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index b7cf246..d58c575 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -1595,7 +1595,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+
+ static void max98088_sync_cache(struct snd_soc_codec *codec)
+ {
+- u16 *reg_cache = codec->reg_cache;
++ u8 *reg_cache = codec->reg_cache;
+ int i;
+
+ if (!codec->cache_sync)
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index bbcf921..b5d4a97 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -38,7 +38,7 @@
+ static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = {
+ [SGTL5000_CHIP_CLK_CTRL] = 0x0008,
+ [SGTL5000_CHIP_I2S_CTRL] = 0x0010,
+- [SGTL5000_CHIP_SSS_CTRL] = 0x0008,
++ [SGTL5000_CHIP_SSS_CTRL] = 0x0010,
+ [SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
+ [SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
+ [SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
+diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
+index 8a9f435..d3a68bb 100644
+--- a/sound/soc/codecs/sgtl5000.h
++++ b/sound/soc/codecs/sgtl5000.h
+@@ -347,7 +347,7 @@
+ #define SGTL5000_PLL_INT_DIV_MASK 0xf800
+ #define SGTL5000_PLL_INT_DIV_SHIFT 11
+ #define SGTL5000_PLL_INT_DIV_WIDTH 5
+-#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700
++#define SGTL5000_PLL_FRAC_DIV_MASK 0x07ff
+ #define SGTL5000_PLL_FRAC_DIV_SHIFT 0
+ #define SGTL5000_PLL_FRAC_DIV_WIDTH 11
+
+diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
+index 55efc2b..75babae 100644
+--- a/sound/soc/s6000/s6000-pcm.c
++++ b/sound/soc/s6000/s6000-pcm.c
+@@ -128,7 +128,9 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data)
+ substream->runtime &&
+ snd_pcm_running(substream)) {
+ dev_dbg(pcm->dev, "xrun\n");
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(substream);
+ ret = IRQ_HANDLED;
+ }
+
+diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
+index d144cdb..888a7c7 100644
+--- a/sound/usb/6fire/pcm.c
++++ b/sound/usb/6fire/pcm.c
+@@ -541,7 +541,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
+ snd_pcm_uframes_t ret;
+
+ if (rt->panic || !sub)
+- return SNDRV_PCM_STATE_XRUN;
++ return SNDRV_PCM_POS_XRUN;
+
+ spin_lock_irqsave(&sub->lock, flags);
+ ret = sub->dma_off;
+@@ -640,17 +640,25 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ void usb6fire_pcm_abort(struct sfire_chip *chip)
+ {
+ struct pcm_runtime *rt = chip->pcm;
++ unsigned long flags;
+ int i;
+
+ if (rt) {
+ rt->panic = true;
+
+- if (rt->playback.instance)
++ if (rt->playback.instance) {
++ snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
+ snd_pcm_stop(rt->playback.instance,
+ SNDRV_PCM_STATE_XRUN);
+- if (rt->capture.instance)
++ snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
++ }
++
++ if (rt->capture.instance) {
++ snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
+ snd_pcm_stop(rt->capture.instance,
+ SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
++ }
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ usb_poison_urb(&rt->in_urbs[i].instance);
+diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
+index c0609c2..84052cf 100644
+--- a/sound/usb/misc/ua101.c
++++ b/sound/usb/misc/ua101.c
+@@ -613,14 +613,24 @@ static int start_usb_playback(struct ua101 *ua)
+
+ static void abort_alsa_capture(struct ua101 *ua)
+ {
+- if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
++ unsigned long flags;
++
++ if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
++ snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
+ snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
++ }
+ }
+
+ static void abort_alsa_playback(struct ua101 *ua)
+ {
+- if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
++ unsigned long flags;
++
++ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
++ snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
+ snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
++ }
+ }
+
+ static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index 6ffb371..d5724d8 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -273,7 +273,11 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
+ struct snd_usX2Y_substream *subs = usX2Y->subs[s];
+ if (subs) {
+ if (atomic_read(&subs->state) >= state_PRERUNNING) {
++ unsigned long flags;
++
++ snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
+ snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
+ }
+ for (u = 0; u < NRURBS; u++) {
+ struct urb *urb = subs->urb[u];
diff --git a/3.2.54/1050_linux-3.2.51.patch b/3.2.54/1050_linux-3.2.51.patch
new file mode 100644
index 0000000..5d5832b
--- /dev/null
+++ b/3.2.54/1050_linux-3.2.51.patch
@@ -0,0 +1,3886 @@
+diff --git a/Makefile b/Makefile
+index 0799e8e..0f11936 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
+index 4759fe7..2cc3cc5 100644
+--- a/arch/alpha/Makefile
++++ b/arch/alpha/Makefile
+@@ -12,7 +12,7 @@ NM := $(NM) -B
+
+ LDFLAGS_vmlinux := -static -N #-relax
+ CHECKFLAGS += -D__alpha__ -m64
+-cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
++cflags-y := -pipe -mno-fp-regs -ffixed-8
+ cflags-y += $(call cc-option, -fno-jump-tables)
+
+ cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 27bcd12..790ea68 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1,7 +1,6 @@
+ config ARM
+ bool
+ default y
+- select HAVE_AOUT
+ select HAVE_DMA_API_DEBUG
+ select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_MEMBLOCK
+diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
+deleted file mode 100644
+index 92f10cb..0000000
+--- a/arch/arm/include/asm/a.out-core.h
++++ /dev/null
+@@ -1,45 +0,0 @@
+-/* a.out coredump register dumper
+- *
+- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+- * Written by David Howells (dhowells@redhat.com)
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public Licence
+- * as published by the Free Software Foundation; either version
+- * 2 of the Licence, or (at your option) any later version.
+- */
+-
+-#ifndef _ASM_A_OUT_CORE_H
+-#define _ASM_A_OUT_CORE_H
+-
+-#ifdef __KERNEL__
+-
+-#include <linux/user.h>
+-#include <linux/elfcore.h>
+-
+-/*
+- * fill in the user structure for an a.out core dump
+- */
+-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
+-{
+- struct task_struct *tsk = current;
+-
+- dump->magic = CMAGIC;
+- dump->start_code = tsk->mm->start_code;
+- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
+-
+- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
+- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
+- dump->u_ssize = 0;
+-
+- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
+-
+- if (dump->start_stack < 0x04000000)
+- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
+-
+- dump->regs = *regs;
+- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
+-}
+-
+-#endif /* __KERNEL__ */
+-#endif /* _ASM_A_OUT_CORE_H */
+diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h
+deleted file mode 100644
+index 083894b..0000000
+--- a/arch/arm/include/asm/a.out.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-#ifndef __ARM_A_OUT_H__
+-#define __ARM_A_OUT_H__
+-
+-#include <linux/personality.h>
+-#include <linux/types.h>
+-
+-struct exec
+-{
+- __u32 a_info; /* Use macros N_MAGIC, etc for access */
+- __u32 a_text; /* length of text, in bytes */
+- __u32 a_data; /* length of data, in bytes */
+- __u32 a_bss; /* length of uninitialized data area for file, in bytes */
+- __u32 a_syms; /* length of symbol table data in file, in bytes */
+- __u32 a_entry; /* start address */
+- __u32 a_trsize; /* length of relocation info for text, in bytes */
+- __u32 a_drsize; /* length of relocation info for data, in bytes */
+-};
+-
+-/*
+- * This is always the same
+- */
+-#define N_TXTADDR(a) (0x00008000)
+-
+-#define N_TRSIZE(a) ((a).a_trsize)
+-#define N_DRSIZE(a) ((a).a_drsize)
+-#define N_SYMSIZE(a) ((a).a_syms)
+-
+-#define M_ARM 103
+-
+-#ifndef LIBRARY_START_TEXT
+-#define LIBRARY_START_TEXT (0x00c00000)
+-#endif
+-
+-#endif /* __A_OUT_GNU_H__ */
+diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
+index b2d9df5..3352451 100644
+--- a/arch/arm/include/asm/processor.h
++++ b/arch/arm/include/asm/processor.h
+@@ -54,7 +54,6 @@ struct thread_struct {
+
+ #define start_thread(regs,pc,sp) \
+ ({ \
+- unsigned long *stack = (unsigned long *)sp; \
+ set_fs(USER_DS); \
+ memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ if (current->personality & ADDR_LIMIT_32BIT) \
+@@ -66,9 +65,6 @@ struct thread_struct {
+ regs->ARM_cpsr |= PSR_ENDSTATE; \
+ regs->ARM_pc = pc & ~1; /* pc */ \
+ regs->ARM_sp = sp; /* sp */ \
+- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
+- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
+- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
+ nommu_start_thread(regs); \
+ })
+
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index 778d248..4a2db48 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -116,7 +116,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
+ static int
+ armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ {
+- int mapping = (*event_map)[config];
++ int mapping;
++
++ if (config >= PERF_COUNT_HW_MAX)
++ return -ENOENT;
++
++ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+ }
+
+@@ -326,6 +331,9 @@ validate_event(struct pmu_hw_events *hw_events,
+ struct hw_perf_event fake_event = event->hw;
+ struct pmu *leader_pmu = event->group_leader->pmu;
+
++ if (is_software_event(event))
++ return 1;
++
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
+index a6990cb..a68b983 100644
+--- a/arch/cris/kernel/vmlinux.lds.S
++++ b/arch/cris/kernel/vmlinux.lds.S
+@@ -52,6 +52,7 @@ SECTIONS
+
+ EXCEPTION_TABLE(4)
+
++ _sdata = .;
+ RODATA
+
+ . = ALIGN (4);
+diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
+index e711ace..54891e1 100644
+--- a/arch/hexagon/kernel/dma.c
++++ b/arch/hexagon/kernel/dma.c
+@@ -22,6 +22,7 @@
+ #include <linux/bootmem.h>
+ #include <linux/genalloc.h>
+ #include <asm/dma-mapping.h>
++#include <linux/module.h>
+
+ struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
+index bea3f08..8fe0349 100644
+--- a/arch/hexagon/kernel/ptrace.c
++++ b/arch/hexagon/kernel/ptrace.c
+@@ -28,6 +28,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/regset.h>
+ #include <linux/user.h>
++#include <linux/elf.h>
+
+ #include <asm/system.h>
+ #include <asm/user.h>
+diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
+index 6bee15c..5d9b33b 100644
+--- a/arch/hexagon/kernel/time.c
++++ b/arch/hexagon/kernel/time.c
+@@ -28,6 +28,7 @@
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/module.h>
+
+ #include <asm/timer-regs.h>
+ #include <asm/hexagon_vm.h>
+diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
+index 16277c3..e4ceedb 100644
+--- a/arch/hexagon/kernel/vdso.c
++++ b/arch/hexagon/kernel/vdso.c
+@@ -21,6 +21,7 @@
+ #include <linux/err.h>
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
++#include <linux/binfmts.h>
+
+ #include <asm/vdso.h>
+
+diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
+index 177716b..01729c2 100644
+--- a/arch/m32r/boot/compressed/Makefile
++++ b/arch/m32r/boot/compressed/Makefile
+@@ -43,9 +43,9 @@ endif
+
+ OBJCOPYFLAGS += -R .empty_zero_page
+
+-suffix_$(CONFIG_KERNEL_GZIP) = gz
+-suffix_$(CONFIG_KERNEL_BZIP2) = bz2
+-suffix_$(CONFIG_KERNEL_LZMA) = lzma
++suffix-$(CONFIG_KERNEL_GZIP) = gz
++suffix-$(CONFIG_KERNEL_BZIP2) = bz2
++suffix-$(CONFIG_KERNEL_LZMA) = lzma
+
+ $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
+ $(call if_changed,ld)
+diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
+index 370d608..28a0952 100644
+--- a/arch/m32r/boot/compressed/misc.c
++++ b/arch/m32r/boot/compressed/misc.c
+@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
+ static unsigned long free_mem_end_ptr;
+
+ #ifdef CONFIG_KERNEL_BZIP2
+-static void *memset(void *s, int c, size_t n)
++void *memset(void *s, int c, size_t n)
+ {
+ char *ss = s;
+
+@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
+ #endif
+
+ #ifdef CONFIG_KERNEL_GZIP
++void *memcpy(void *dest, const void *src, size_t n)
++{
++ char *d = dest;
++ const char *s = src;
++ while (n--)
++ *d++ = *s++;
++
++ return dest;
++}
++
+ #define BOOT_HEAP_SIZE 0x10000
+ #include "../../../../lib/decompress_inflate.c"
+ #endif
+diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
+index 2291a7d..fa277ae 100644
+--- a/arch/m68k/emu/natfeat.c
++++ b/arch/m68k/emu/natfeat.c
+@@ -18,9 +18,11 @@
+ #include <asm/machdep.h>
+ #include <asm/natfeat.h>
+
++extern long nf_get_id2(const char *feature_name);
++
+ asm("\n"
+-" .global nf_get_id,nf_call\n"
+-"nf_get_id:\n"
++" .global nf_get_id2,nf_call\n"
++"nf_get_id2:\n"
+ " .short 0x7300\n"
+ " rts\n"
+ "nf_call:\n"
+@@ -29,12 +31,25 @@ asm("\n"
+ "1: moveq.l #0,%d0\n"
+ " rts\n"
+ " .section __ex_table,\"a\"\n"
+-" .long nf_get_id,1b\n"
++" .long nf_get_id2,1b\n"
+ " .long nf_call,1b\n"
+ " .previous");
+-EXPORT_SYMBOL_GPL(nf_get_id);
+ EXPORT_SYMBOL_GPL(nf_call);
+
++long nf_get_id(const char *feature_name)
++{
++ /* feature_name may be in vmalloc()ed memory, so make a copy */
++ char name_copy[32];
++ size_t n;
++
++ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
++ if (n >= sizeof(name_copy))
++ return 0;
++
++ return nf_get_id2(name_copy);
++}
++EXPORT_SYMBOL_GPL(nf_get_id);
++
+ void nfprint(const char *fmt, ...)
+ {
+ static char buf[256];
+diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
+index edb6614..7558032 100644
+--- a/arch/m68k/include/asm/div64.h
++++ b/arch/m68k/include/asm/div64.h
+@@ -13,16 +13,17 @@
+ unsigned long long n64; \
+ } __n; \
+ unsigned long __rem, __upper; \
++ unsigned long __base = (base); \
+ \
+ __n.n64 = (n); \
+ if ((__upper = __n.n32[0])) { \
+ asm ("divul.l %2,%1:%0" \
+- : "=d" (__n.n32[0]), "=d" (__upper) \
+- : "d" (base), "0" (__n.n32[0])); \
++ : "=d" (__n.n32[0]), "=d" (__upper) \
++ : "d" (__base), "0" (__n.n32[0])); \
+ } \
+ asm ("divu.l %2,%1:%0" \
+- : "=d" (__n.n32[1]), "=d" (__rem) \
+- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
++ : "=d" (__n.n32[1]), "=d" (__rem) \
++ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
+ (n) = __n.n64; \
+ __rem; \
+ })
+diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
+index b3f5eec..a470f57 100644
+--- a/arch/microblaze/configs/mmu_defconfig
++++ b/arch/microblaze/configs/mmu_defconfig
+@@ -1,25 +1,22 @@
+ CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_FHANDLE=y
++CONFIG_AUDIT=y
++CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
++CONFIG_SYSFS_DEPRECATED=y
+ CONFIG_SYSFS_DEPRECATED_V2=y
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
+-CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_EXPERT=y
+ CONFIG_KALLSYMS_ALL=y
+-CONFIG_KALLSYMS_EXTRA_PASS=y
+-# CONFIG_HOTPLUG is not set
+ # CONFIG_BASE_FULL is not set
+-# CONFIG_FUTEX is not set
+-# CONFIG_EPOLL is not set
+-# CONFIG_SIGNALFD is not set
+-# CONFIG_SHMEM is not set
++CONFIG_EMBEDDED=y
+ CONFIG_SLAB=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_EFI_PARTITION is not set
+ CONFIG_OPT_LIB_ASM=y
+ CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
+ CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
+@@ -37,33 +34,53 @@ CONFIG_UNIX=y
+ CONFIG_INET=y
+ # CONFIG_INET_LRO is not set
+ # CONFIG_IPV6 is not set
++CONFIG_MTD=y
+ CONFIG_PROC_DEVICETREE=y
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_SIZE=8192
+ CONFIG_NETDEVICES=y
+-CONFIG_NET_ETHERNET=y
+ CONFIG_XILINX_EMACLITE=y
++CONFIG_XILINX_LL_TEMAC=y
+ # CONFIG_INPUT is not set
+ # CONFIG_SERIO is not set
+ # CONFIG_VT is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_UARTLITE=y
+ CONFIG_SERIAL_UARTLITE_CONSOLE=y
+ # CONFIG_HW_RANDOM is not set
++CONFIG_XILINX_HWICAP=y
++CONFIG_I2C=y
++CONFIG_I2C_XILINX=y
++CONFIG_SPI=y
++CONFIG_SPI_XILINX=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_XILINX=y
+ # CONFIG_HWMON is not set
++CONFIG_WATCHDOG=y
++CONFIG_XILINX_WATCHDOG=y
++CONFIG_FB=y
++CONFIG_FB_XILINX=y
+ # CONFIG_USB_SUPPORT is not set
++CONFIG_UIO=y
++CONFIG_UIO_PDRV=y
++CONFIG_UIO_PDRV_GENIRQ=y
++CONFIG_UIO_DMEM_GENIRQ=y
+ CONFIG_EXT2_FS=y
+ # CONFIG_DNOTIFY is not set
++CONFIG_CRAMFS=y
++CONFIG_ROMFS_FS=y
+ CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+ CONFIG_CIFS=y
+ CONFIG_CIFS_STATS=y
+ CONFIG_CIFS_STATS2=y
+-CONFIG_PARTITION_ADVANCED=y
+-CONFIG_DEBUG_KERNEL=y
+ CONFIG_DETECT_HUNG_TASK=y
+ CONFIG_DEBUG_SLAB=y
+ CONFIG_DEBUG_SPINLOCK=y
+ CONFIG_DEBUG_INFO=y
+-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+ CONFIG_EARLY_PRINTK=y
++CONFIG_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
+index 0249e4b..5454a6d 100644
+--- a/arch/microblaze/configs/nommu_defconfig
++++ b/arch/microblaze/configs/nommu_defconfig
+@@ -1,41 +1,40 @@
+ CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
++CONFIG_FHANDLE=y
++CONFIG_AUDIT=y
++CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+ CONFIG_BSD_PROCESS_ACCT_V3=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
++CONFIG_SYSFS_DEPRECATED=y
+ CONFIG_SYSFS_DEPRECATED_V2=y
+-CONFIG_EXPERT=y
+ CONFIG_KALLSYMS_ALL=y
+-CONFIG_KALLSYMS_EXTRA_PASS=y
+-# CONFIG_HOTPLUG is not set
+ # CONFIG_BASE_FULL is not set
++CONFIG_EMBEDDED=y
+ CONFIG_SLAB=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+-# CONFIG_OPT_LIB_FUNCTION is not set
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_EFI_PARTITION is not set
+ CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
+ CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
+ CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
+ CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
+ CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
+ CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
+-CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_100=y
+ CONFIG_CMDLINE_BOOL=y
+-CONFIG_BINFMT_FLAT=y
++CONFIG_CMDLINE_FORCE=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+ CONFIG_UNIX=y
+ CONFIG_INET=y
+ # CONFIG_INET_LRO is not set
+ # CONFIG_IPV6 is not set
+-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+ CONFIG_MTD=y
+-CONFIG_MTD_CONCAT=y
+-CONFIG_MTD_PARTITIONS=y
+ CONFIG_MTD_CMDLINE_PARTS=y
+ CONFIG_MTD_CHAR=y
+ CONFIG_MTD_BLOCK=y
+@@ -45,41 +44,55 @@ CONFIG_MTD_CFI_AMDSTD=y
+ CONFIG_MTD_RAM=y
+ CONFIG_MTD_UCLINUX=y
+ CONFIG_PROC_DEVICETREE=y
+-CONFIG_BLK_DEV_NBD=y
+ CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=8192
+ CONFIG_NETDEVICES=y
+-CONFIG_NET_ETHERNET=y
++CONFIG_XILINX_EMACLITE=y
++CONFIG_XILINX_LL_TEMAC=y
+ # CONFIG_INPUT is not set
+ # CONFIG_SERIO is not set
+ # CONFIG_VT is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_UARTLITE=y
+ CONFIG_SERIAL_UARTLITE_CONSOLE=y
+-CONFIG_HW_RANDOM=y
++# CONFIG_HW_RANDOM is not set
++CONFIG_XILINX_HWICAP=y
++CONFIG_I2C=y
++CONFIG_I2C_XILINX=y
++CONFIG_SPI=y
++CONFIG_SPI_XILINX=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_XILINX=y
+ # CONFIG_HWMON is not set
+-CONFIG_VIDEO_OUTPUT_CONTROL=y
++CONFIG_WATCHDOG=y
++CONFIG_XILINX_WATCHDOG=y
++CONFIG_FB=y
++CONFIG_FB_XILINX=y
++# CONFIG_USB_SUPPORT is not set
++CONFIG_UIO=y
++CONFIG_UIO_PDRV=y
++CONFIG_UIO_PDRV_GENIRQ=y
++CONFIG_UIO_DMEM_GENIRQ=y
+ CONFIG_EXT2_FS=y
+ # CONFIG_DNOTIFY is not set
+ CONFIG_CRAMFS=y
+ CONFIG_ROMFS_FS=y
+ CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+ CONFIG_NFS_V3_ACL=y
+-CONFIG_UNUSED_SYMBOLS=y
+-CONFIG_DEBUG_FS=y
+-CONFIG_DEBUG_KERNEL=y
+-CONFIG_DEBUG_SHIRQ=y
++CONFIG_NLS=y
+ CONFIG_DETECT_HUNG_TASK=y
+-CONFIG_SCHEDSTATS=y
+-CONFIG_TIMER_STATS=y
+-CONFIG_DEBUG_OBJECTS=y
+-CONFIG_DEBUG_OBJECTS_SELFTEST=y
+-CONFIG_DEBUG_OBJECTS_FREE=y
+-CONFIG_DEBUG_OBJECTS_TIMERS=y
++CONFIG_DEBUG_SLAB=y
++CONFIG_DEBUG_SPINLOCK=y
+ CONFIG_DEBUG_INFO=y
+-CONFIG_DEBUG_LIST=y
+-CONFIG_DEBUG_SG=y
+-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+-CONFIG_SYSCTL_SYSCALL_CHECK=y
+ CONFIG_EARLY_PRINTK=y
++CONFIG_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_ARC4=y
++CONFIG_CRYPTO_DES=y
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+-# CONFIG_CRC32 is not set
+diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
+index b0526d2..ff8cde1 100644
+--- a/arch/microblaze/include/asm/futex.h
++++ b/arch/microblaze/include/asm/futex.h
+@@ -24,7 +24,7 @@
+ .word 1b,4b,2b,4b; \
+ .previous;" \
+ : "=&r" (oldval), "=&r" (ret) \
+- : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
++ : "r" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ ); \
+ })
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 951e18f..16ef838 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -937,6 +937,7 @@ config RELOCATABLE
+ must live at a different physical address than the primary
+ kernel.
+
++# This value must have zeroes in the bottom 60 bits otherwise lots will break
+ config PAGE_OFFSET
+ hex
+ default "0xc000000000000000"
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index dd9c4fd..5b0bde2 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -132,9 +132,19 @@ extern phys_addr_t kernstart_addr;
+ #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + KERNELBASE))
+ #define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
+ #else
++#ifdef CONFIG_PPC64
++/*
++ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
++ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
++ */
++#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
++#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
++
++#else /* 32-bit, non book E */
+ #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
+ #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
+ #endif
++#endif
+
+ /*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
+index 84daabe..826681d 100644
+--- a/arch/powerpc/kernel/lparcfg.c
++++ b/arch/powerpc/kernel/lparcfg.c
+@@ -37,7 +37,13 @@
+ #include <asm/vdso_datapage.h>
+ #include <asm/vio.h>
+ #include <asm/mmu.h>
++#include <asm/machdep.h>
+
++
++/*
++ * This isn't a module but we expose that to userspace
++ * via /proc so leave the definitions here
++ */
+ #define MODULE_VERS "1.9"
+ #define MODULE_NAME "lparcfg"
+
+@@ -487,7 +493,8 @@ static void parse_em_data(struct seq_file *m)
+ {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+- if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
++ if (firmware_has_feature(FW_FEATURE_LPAR) &&
++ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+ }
+
+@@ -772,7 +779,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
+ }
+
+ static const struct file_operations lparcfg_fops = {
+- .owner = THIS_MODULE,
+ .read = seq_read,
+ .write = lparcfg_write,
+ .open = lparcfg_open,
+@@ -799,15 +805,4 @@ static int __init lparcfg_init(void)
+ proc_ppc64_lparcfg = ent;
+ return 0;
+ }
+-
+-static void __exit lparcfg_cleanup(void)
+-{
+- if (proc_ppc64_lparcfg)
+- remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
+-}
+-
+-module_init(lparcfg_init);
+-module_exit(lparcfg_cleanup);
+-MODULE_DESCRIPTION("Interface for LPAR configuration data");
+-MODULE_AUTHOR("Dave Engebretsen");
+-MODULE_LICENSE("GPL");
++machine_device_initcall(pseries, lparcfg_init);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 4db9b1e..dd072b1 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -469,6 +469,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+
+ static void __vcpu_run(struct kvm_vcpu *vcpu)
+ {
++ int rc;
++
+ memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
+
+ if (need_resched())
+@@ -479,21 +481,24 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
+
+ kvm_s390_deliver_pending_interrupts(vcpu);
+
++ VCPU_EVENT(vcpu, 6, "entering sie flags %x",
++ atomic_read(&vcpu->arch.sie_block->cpuflags));
++
+ vcpu->arch.sie_block->icptcode = 0;
+ local_irq_disable();
+ kvm_guest_enter();
+ local_irq_enable();
+- VCPU_EVENT(vcpu, 6, "entering sie flags %x",
+- atomic_read(&vcpu->arch.sie_block->cpuflags));
+- if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
++ rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
++ local_irq_disable();
++ kvm_guest_exit();
++ local_irq_enable();
++
++ if (rc) {
+ VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ }
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+- local_irq_disable();
+- kvm_guest_exit();
+- local_irq_enable();
+
+ memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
+ }
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index f210d51..87537e2 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -31,6 +31,7 @@ config SPARC
+
+ config SPARC32
+ def_bool !64BIT
++ select GENERIC_ATOMIC64
+
+ config SPARC64
+ def_bool 64BIT
+diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
+index 5c3c8b6..07dd35e 100644
+--- a/arch/sparc/include/asm/atomic_32.h
++++ b/arch/sparc/include/asm/atomic_32.h
+@@ -15,6 +15,8 @@
+
+ #ifdef __KERNEL__
+
++#include <asm-generic/atomic64.h>
++
+ #include <asm/system.h>
+
+ #define ATOMIC_INIT(i) { (i) }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index a3fc437..4961516 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -40,7 +40,7 @@ lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
+
+ obj-y += iomap.o
+-obj-$(CONFIG_SPARC32) += atomic32.o
++obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
+ obj-y += ksyms.o
+ obj-$(CONFIG_SPARC64) += PeeCeeI.o
+ obj-y += usercopy.o
+diff --git a/arch/sparc/lib/ucmpdi2.c b/arch/sparc/lib/ucmpdi2.c
+new file mode 100644
+index 0000000..1e06ed5
+--- /dev/null
++++ b/arch/sparc/lib/ucmpdi2.c
+@@ -0,0 +1,19 @@
++#include <linux/module.h>
++#include "libgcc.h"
++
++word_type __ucmpdi2(unsigned long long a, unsigned long long b)
++{
++ const DWunion au = {.ll = a};
++ const DWunion bu = {.ll = b};
++
++ if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
++ return 0;
++ else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
++ return 2;
++ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
++ return 0;
++ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
++ return 2;
++ return 1;
++}
++EXPORT_SYMBOL(__ucmpdi2);
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+index 739d859..fa4ea09 100644
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -51,7 +51,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
+ clts();
+ if (cpu_has_fxsr) {
+ memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
+- asm volatile("fxsave %0" : : "m" (fx_scratch));
++ asm volatile("fxsave %0" : "+m" (fx_scratch));
+ mask = fx_scratch.mxcsr_mask;
+ if (mask == 0)
+ mask = 0x0000ffbf;
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index 0514890..cdb2fc9 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = current->mm->mmap_legacy_base;
+ *end = TASK_SIZE;
+ }
+ }
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 845df68..5c1ae28 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -112,12 +112,14 @@ static unsigned long mmap_legacy_base(void)
+ */
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
++ mm->mmap_legacy_base = mmap_legacy_base();
++ mm->mmap_base = mmap_base();
++
+ if (mmap_is_legacy()) {
+- mm->mmap_base = mmap_legacy_base();
++ mm->mmap_base = mm->mmap_legacy_base;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+- mm->mmap_base = mmap_base();
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 6e5a7f1..4d54b38 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -212,6 +212,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+ e820_add_region(start, end - start, type);
+ }
+
++void xen_ignore_unusable(struct e820entry *list, size_t map_size)
++{
++ struct e820entry *entry;
++ unsigned int i;
++
++ for (i = 0, entry = list; i < map_size; i++, entry++) {
++ if (entry->type == E820_UNUSABLE)
++ entry->type = E820_RAM;
++ }
++}
++
+ /**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ **/
+@@ -250,6 +261,17 @@ char * __init xen_memory_setup(void)
+ }
+ BUG_ON(rc);
+
++ /*
++ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
++ * regions, so if we're using the machine memory map leave the
++ * region as RAM as it is in the pseudo-physical map.
++ *
++ * UNUSABLE regions in domUs are not handled and will need
++ * a patch in the future.
++ */
++ if (xen_initial_domain())
++ xen_ignore_unusable(map, memmap.nr_entries);
++
+ /* Make sure the Xen-supplied memory map is well-ordered. */
+ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index a1a4b8e..c749b93 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -117,6 +117,7 @@ struct acpi_battery {
+ struct acpi_device *device;
+ struct notifier_block pm_nb;
+ unsigned long update_time;
++ int revision;
+ int rate_now;
+ int capacity_now;
+ int voltage_now;
+@@ -350,6 +351,7 @@ static struct acpi_offsets info_offsets[] = {
+ };
+
+ static struct acpi_offsets extended_info_offsets[] = {
++ {offsetof(struct acpi_battery, revision), 0},
+ {offsetof(struct acpi_battery, power_unit), 0},
+ {offsetof(struct acpi_battery, design_capacity), 0},
+ {offsetof(struct acpi_battery, full_charge_capacity), 0},
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index cf047c4..e7b3a9e 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -93,7 +93,7 @@ config SATA_FSL
+ If unsure, say N.
+
+ config SATA_INIC162X
+- tristate "Initio 162x SATA support"
++ tristate "Initio 162x SATA support (Very Experimental)"
+ depends on PCI
+ help
+ This option enables support for Initio 162x Serial ATA.
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index f63a588..f5c35be 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
+
+ /* Disable sending Early R_OK.
+ * With "cached read" HDD testing and multiple ports busy on a SATA
+- * host controller, 3726 PMP will very rarely drop a deferred
++ * host controller, 3x26 PMP will very rarely drop a deferred
+ * R_OK that was intended for the host. Symptom will be all
+ * 5 drives under test will timeout, get reset, and recover.
+ */
+- if (vendor == 0x1095 && devid == 0x3726) {
++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+ u32 reg;
+
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+ if (err_mask) {
+ rc = -EIO;
+- reason = "failed to read Sil3726 Private Register";
++ reason = "failed to read Sil3x26 Private Register";
+ goto fail;
+ }
+ reg &= ~0x1;
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+ if (err_mask) {
+ rc = -EIO;
+- reason = "failed to write Sil3726 Private Register";
++ reason = "failed to write Sil3x26 Private Register";
+ goto fail;
+ }
+ }
+@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ u16 devid = sata_pmp_gscr_devid(gscr);
+ struct ata_link *link;
+
+- if (vendor == 0x1095 && devid == 0x3726) {
+- /* sil3726 quirks */
++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
++ /* sil3x26 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+ /* link reports offline after LPM */
+ link->flags |= ATA_LFLAG_NO_LPM;
+diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
+index 5c7d70c..3a8b55e 100644
+--- a/drivers/ata/sata_inic162x.c
++++ b/drivers/ata/sata_inic162x.c
+@@ -6,6 +6,18 @@
+ *
+ * This file is released under GPL v2.
+ *
++ * **** WARNING ****
++ *
++ * This driver never worked properly and unfortunately data corruption is
++ * relatively common. There isn't anyone working on the driver and there's
++ * no support from the vendor. Do not use this driver in any production
++ * environment.
++ *
++ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
++ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
++ *
++ * *****************
++ *
+ * This controller is eccentric and easily locks up if something isn't
+ * right. Documentation is available at initio's website but it only
+ * documents registers (not programming model).
+@@ -809,6 +821,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
+
++ dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
++
+ /* alloc host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index 8272d92..732ad0d 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -172,6 +172,8 @@ static ssize_t show_mem_removable(struct sys_device *dev,
+ container_of(dev, struct memory_block, sysdev);
+
+ for (i = 0; i < sections_per_block; i++) {
++ if (!present_section_nr(mem->start_section_nr + i))
++ continue;
+ pfn = section_nr_to_pfn(mem->start_section_nr + i);
+ ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ }
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 79038e5..6790cf7 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -751,8 +751,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+- if (genpd->suspend_power_off
+- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
++ if (genpd->suspend_power_off)
+ return 0;
+
+ /*
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 853fdf8..bde72f7 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -89,6 +89,11 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe056) },
++ { USB_DEVICE(0x0489, 0xe04d) },
++ { USB_DEVICE(0x04c5, 0x1330) },
++ { USB_DEVICE(0x13d3, 0x3402) },
++ { USB_DEVICE(0x0cf3, 0x3121) },
++ { USB_DEVICE(0x0cf3, 0xe003) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+@@ -125,6 +130,11 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6b784b7..1bd3924 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -63,6 +63,9 @@ static struct usb_device_id btusb_table[] = {
+ /* Apple-specific (Broadcom) devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
+
++ /* MediaTek MT76x0E */
++ { USB_DEVICE(0x0e8d, 0x763f) },
++
+ /* Broadcom SoftSailing reporting vendor specific */
+ { USB_DEVICE(0x0a5c, 0x21e1) },
+
+@@ -156,6 +159,11 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index d5ae736..c68b8ad 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -257,9 +257,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+- list_for_each_entry(port, &portdev->ports, list)
+- if (port->cdev->dev == dev)
++ list_for_each_entry(port, &portdev->ports, list) {
++ if (port->cdev->dev == dev) {
++ kref_get(&port->kref);
+ goto out;
++ }
++ }
+ port = NULL;
+ out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+@@ -634,6 +637,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+
+ port = filp->private_data;
+
++ /* Port is hot-unplugged. */
++ if (!port->guest_connected)
++ return -ENODEV;
++
+ if (!port_has_data(port)) {
+ /*
+ * If nothing's connected on the host just return 0 in
+@@ -650,7 +657,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+ if (ret < 0)
+ return ret;
+ }
+- /* Port got hot-unplugged. */
++ /* Port got hot-unplugged while we were waiting above. */
+ if (!port->guest_connected)
+ return -ENODEV;
+ /*
+@@ -793,14 +800,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
+ struct port *port;
+ int ret;
+
++ /* We get the port with a kref here */
+ port = find_port_by_devt(cdev->dev);
++ if (!port) {
++ /* Port was unplugged before we could proceed */
++ return -ENXIO;
++ }
+ filp->private_data = port;
+
+- /* Prevent against a port getting hot-unplugged at the same time */
+- spin_lock_irq(&port->portdev->ports_lock);
+- kref_get(&port->kref);
+- spin_unlock_irq(&port->portdev->ports_lock);
+-
+ /*
+ * Don't allow opening of console port devices -- that's done
+ * via /dev/hvc
+@@ -1264,14 +1271,6 @@ static void remove_port(struct kref *kref)
+
+ port = container_of(kref, struct port, kref);
+
+- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+- device_destroy(pdrvdata.class, port->dev->devt);
+- cdev_del(port->cdev);
+-
+- kfree(port->name);
+-
+- debugfs_remove(port->debugfs_file);
+-
+ kfree(port);
+ }
+
+@@ -1289,12 +1288,14 @@ static void unplug_port(struct port *port)
+ spin_unlock_irq(&port->portdev->ports_lock);
+
+ if (port->guest_connected) {
++ /* Let the app know the port is going down. */
++ send_sigio_to_port(port);
++
++ /* Do this after sigio is actually sent */
+ port->guest_connected = false;
+ port->host_connected = false;
+- wake_up_interruptible(&port->waitqueue);
+
+- /* Let the app know the port is going down. */
+- send_sigio_to_port(port);
++ wake_up_interruptible(&port->waitqueue);
+ }
+
+ if (is_console_port(port)) {
+@@ -1320,6 +1321,14 @@ static void unplug_port(struct port *port)
+ */
+ port->portdev = NULL;
+
++ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
++ device_destroy(pdrvdata.class, port->dev->devt);
++ cdev_del(port->cdev);
++
++ kfree(port->name);
++
++ debugfs_remove(port->debugfs_file);
++
+ /*
+ * Locks around here are not necessary - a port can't be
+ * opened after we removed the port struct from ports_list
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 144d37c..61274bf 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -275,6 +275,7 @@ enum intel_pch {
+ #define QUIRK_PIPEA_FORCE (1<<0)
+ #define QUIRK_LVDS_SSC_DISABLE (1<<1)
+ #define QUIRK_INVERT_BRIGHTNESS (1<<2)
++#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
+
+ struct intel_fbdev;
+ struct intel_fbc_work;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 124dd87..97a050f 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -362,6 +362,7 @@
+ #define IPEIR_I965 0x02064
+ #define IPEHR_I965 0x02068
+ #define INSTDONE_I965 0x0206c
++#define RING_INSTPM(base) ((base)+0xc0)
+ #define INSTPS 0x02070 /* 965+ only */
+ #define INSTDONE1 0x0207c /* 965+ only */
+ #define ACTHD_I965 0x02074
+@@ -458,6 +459,8 @@
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
++#define INSTPM_TLB_INVALIDATE (1<<9)
++#define INSTPM_SYNC_FLUSH (1<<5)
+ #define ACTHD 0x020c8
+ #define FW_BLC 0x020d8
+ #define FW_BLC2 0x020dc
+@@ -3513,7 +3516,7 @@
+ #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+ #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+ #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
++#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+
+ /* legacy values */
+ #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index cfbb893..ee29c1f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8842,6 +8842,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ }
+
++/*
++ * Some machines (Dell XPS13) suffer broken backlight controls if
++ * BLM_PCH_PWM_ENABLE is set.
++ */
++static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
++ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
++}
++
+ struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+@@ -8916,6 +8927,11 @@ struct intel_quirk intel_quirks[] = {
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
++
++ /* Dell XPS13 HD Sandy Bridge */
++ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
++ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
++ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
+ };
+
+ static void intel_init_quirks(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 2ffa740..74d312f 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -402,13 +402,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
+ {
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+- /*
+- * Prior to Ironlake, we must disable the pipe if we want to adjust
+- * the panel fitter. However at all other times we can just reset
+- * the registers regardless.
+- */
+- if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
+- intel_lvds_disable(intel_lvds);
++ intel_lvds_disable(intel_lvds);
+ }
+
+ static void intel_lvds_commit(struct drm_encoder *encoder)
+@@ -1075,7 +1069,8 @@ bool intel_lvds_init(struct drm_device *dev)
+ goto failed;
+
+ out:
+- if (HAS_PCH_SPLIT(dev)) {
++ if (HAS_PCH_SPLIT(dev) &&
++ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
+ u32 pwm;
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 38a7793..3c55cf6 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -776,6 +776,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
++
++ /* Flush the TLB for this page */
++ if (INTEL_INFO(dev)->gen >= 6) {
++ u32 reg = RING_INSTPM(ring->mmio_base);
++ I915_WRITE(reg,
++ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
++ INSTPM_SYNC_FLUSH));
++ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
++ 1000))
++ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
++ ring->name);
++ }
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index d969f3c..afb351a 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -1220,12 +1220,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+ int r;
+
+ mutex_lock(&ctx->mutex);
++ /* reset data block */
++ ctx->data_block = 0;
+ /* reset reg block */
+ ctx->reg_block = 0;
+ /* reset fb window */
+ ctx->fb_base = 0;
+ /* reset io mode */
+ ctx->io_mode = ATOM_IO_MM;
++ /* reset divmul */
++ ctx->divmul[0] = 0;
++ ctx->divmul[1] = 0;
+ r = atom_execute_table_locked(ctx, index, params);
+ mutex_unlock(&ctx->mutex);
+ return r;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 9bea4a6..f5962a0 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3036,6 +3036,8 @@ static int evergreen_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
++ evergreen_mc_program(rdev);
++
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+@@ -3063,7 +3065,6 @@ static int evergreen_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- evergreen_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ evergreen_agp_enable(rdev);
+ } else {
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 3f9705b..77e6fb1 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1353,6 +1353,8 @@ static int cayman_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
++ evergreen_mc_program(rdev);
++
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+@@ -1370,7 +1372,6 @@ static int cayman_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- evergreen_mc_program(rdev);
+ r = cayman_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 3d46d7d4..57e45c6 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2415,6 +2415,8 @@ int r600_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ r600_pcie_gen2_enable(rdev);
+
++ r600_mc_program(rdev);
++
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+@@ -2427,7 +2429,6 @@ int r600_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r600_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ r600_agp_enable(rdev);
+ } else {
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 63db75d..3e72074 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1057,6 +1057,8 @@ static int rv770_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ rv770_pcie_gen2_enable(rdev);
+
++ rv770_mc_program(rdev);
++
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+@@ -1069,7 +1071,6 @@ static int rv770_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- rv770_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ rv770_agp_enable(rdev);
+ } else {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+index c41226a..2952249 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+@@ -29,7 +29,9 @@
+ #include "drmP.h"
+ #include "ttm/ttm_bo_driver.h"
+
+-#define VMW_PPN_SIZE sizeof(unsigned long)
++#define VMW_PPN_SIZE (sizeof(unsigned long))
++/* A future safe maximum remap size. */
++#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+
+ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ struct page *pages[],
+@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ {
+ SVGAFifoCmdDefineGMR2 define_cmd;
+ SVGAFifoCmdRemapGMR2 remap_cmd;
+- uint32_t define_size = sizeof(define_cmd) + 4;
+- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
+ uint32_t *cmd;
+ uint32_t *cmd_orig;
++ uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
++ uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
++ uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
++ uint32_t remap_pos = 0;
++ uint32_t cmd_size = define_size + remap_size;
+ uint32_t i;
+
+- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
++ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL))
+ return -ENOMEM;
+
+ define_cmd.gmrId = gmr_id;
+ define_cmd.numPages = num_pages;
+
++ *cmd++ = SVGA_CMD_DEFINE_GMR2;
++ memcpy(cmd, &define_cmd, sizeof(define_cmd));
++ cmd += sizeof(define_cmd) / sizeof(*cmd);
++
++ /*
++ * Need to split the command if there are too many
++ * pages that goes into the gmr.
++ */
++
+ remap_cmd.gmrId = gmr_id;
+ remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
+ SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
+- remap_cmd.offsetPages = 0;
+- remap_cmd.numPages = num_pages;
+
+- *cmd++ = SVGA_CMD_DEFINE_GMR2;
+- memcpy(cmd, &define_cmd, sizeof(define_cmd));
+- cmd += sizeof(define_cmd) / sizeof(uint32);
++ while (num_pages > 0) {
++ unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
++
++ remap_cmd.offsetPages = remap_pos;
++ remap_cmd.numPages = nr;
+
+- *cmd++ = SVGA_CMD_REMAP_GMR2;
+- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+- cmd += sizeof(remap_cmd) / sizeof(uint32);
++ *cmd++ = SVGA_CMD_REMAP_GMR2;
++ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
++ cmd += sizeof(remap_cmd) / sizeof(*cmd);
+
+- for (i = 0; i < num_pages; ++i) {
+- if (VMW_PPN_SIZE <= 4)
+- *cmd = page_to_pfn(*pages++);
+- else
+- *((uint64_t *)cmd) = page_to_pfn(*pages++);
++ for (i = 0; i < nr; ++i) {
++ if (VMW_PPN_SIZE <= 4)
++ *cmd = page_to_pfn(*pages++);
++ else
++ *((uint64_t *)cmd) = page_to_pfn(*pages++);
+
+- cmd += VMW_PPN_SIZE / sizeof(*cmd);
++ cmd += VMW_PPN_SIZE / sizeof(*cmd);
++ }
++
++ num_pages -= nr;
++ remap_pos += nr;
+ }
+
+- vmw_fifo_commit(dev_priv, define_size + remap_size);
++ BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
++
++ vmw_fifo_commit(dev_priv, cmd_size);
+
+ return 0;
+ }
+diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
+index c6d1ce0..a9726c1 100644
+--- a/drivers/hwmon/adt7470.c
++++ b/drivers/hwmon/adt7470.c
+@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
+ u16 value)
+ {
+ return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
+- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
++ || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+ }
+
+ static void adt7470_init_client(struct i2c_client *client)
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 298e02a..c706a7b 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1139,7 +1139,7 @@ read_again:
+ * know the original bi_idx, so we just free
+ * them all
+ */
+- __bio_for_each_segment(bvec, mbio, j, 0)
++ bio_for_each_segment_all(bvec, mbio, j)
+ bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
+ if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
+ atomic_inc(&r1_bio->behind_remaining);
+diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
+index a746ba2..a956053 100644
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
+
+ soft = &pkt.soft.rfc1201;
+
+- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
++ lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
+ if (pkt.hard.offset[0]) {
+ ofs = pkt.hard.offset[0];
+ length = 256 - ofs;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+index fcd0e47..2a7d091 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+
+ /* Enable arbiter */
+ reg &= ~IXGBE_DPMCS_ARBDIS;
+- /* Enable DFP and Recycle mode */
+- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
+ reg |= IXGBE_DPMCS_TSOEF;
++
+ /* Configure Max TSO packet size 34KB including payload and headers */
+ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index b19841a..00f1367 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -34,6 +34,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/moduleparam.h>
++#include <linux/sched.h>
+ #include <net/pkt_sched.h>
+ #include <net/net_namespace.h>
+
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index f3d17f8..a8e4640 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -43,7 +43,6 @@
+ #define EEPROM_MAC_OFFSET (0x01)
+ #define DEFAULT_TX_CSUM_ENABLE (true)
+ #define DEFAULT_RX_CSUM_ENABLE (true)
+-#define DEFAULT_TSO_ENABLE (true)
+ #define SMSC75XX_INTERNAL_PHY_ID (1)
+ #define SMSC75XX_TX_OVERHEAD (8)
+ #define MAX_RX_FIFO_SIZE (20 * 1024)
+@@ -1035,17 +1034,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+
+ INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
+
+- if (DEFAULT_TX_CSUM_ENABLE) {
++ if (DEFAULT_TX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+- if (DEFAULT_TSO_ENABLE)
+- dev->net->features |= NETIF_F_SG |
+- NETIF_F_TSO | NETIF_F_TSO6;
+- }
++
+ if (DEFAULT_RX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_RXCSUM;
+
+ dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
++ NETIF_F_RXCSUM;
+
+ /* Init all registers */
+ ret = smsc75xx_reset(dev);
+@@ -1170,8 +1166,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
+ {
+ u32 tx_cmd_a, tx_cmd_b;
+
+- skb_linearize(skb);
+-
+ if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
+ struct sk_buff *skb2 =
+ skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 84890d5..ef921e1 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -851,6 +851,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
+ if (error != 0)
+ goto err_rx;
+
++ ath9k_hw_disable(priv->ah);
+ #ifdef CONFIG_MAC80211_LEDS
+ /* must be initialized before ieee80211_register_hw */
+ priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index a48bb83..9a57149 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
+ struct ieee80211_conf *cur_conf = &priv->hw->conf;
+ bool txok;
+ int slot;
++ int hdrlen, padsize;
+
+ slot = strip_drv_header(priv, skb);
+ if (slot < 0) {
+@@ -504,6 +505,15 @@ send_mac80211:
+
+ ath9k_htc_tx_clear_slot(priv, slot);
+
++ /* Remove padding before handing frame back to mac80211 */
++ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
++
++ padsize = hdrlen & 3;
++ if (padsize && skb->len > hdrlen + padsize) {
++ memmove(skb->data + padsize, skb->data, hdrlen);
++ skb_pull(skb, padsize);
++ }
++
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+ }
+diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
+index 045a936..271e818 100644
+--- a/drivers/net/wireless/hostap/hostap_ioctl.c
++++ b/drivers/net/wireless/hostap/hostap_ioctl.c
+@@ -522,9 +522,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
+
+ data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
+
+- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
++ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
+ data->flags = 1; /* has quality information */
+- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
++ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
+ sizeof(struct iw_quality) * data->length);
+
+ kfree(addr);
+diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
+index 1bb64c9..09891e5 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-core.c
++++ b/drivers/net/wireless/iwlegacy/iwl-core.c
+@@ -1757,6 +1757,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
+
+ return 0;
+ }
++EXPORT_SYMBOL(iwl_legacy_force_reset);
+
+ int
+ iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
+index d2fba9e..6e25c7b 100644
+--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
+@@ -868,13 +868,13 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+- if (!test_bit(STATUS_ALIVE, &priv->status)) {
+- if (hw_rf_kill)
+- set_bit(STATUS_RF_KILL_HW, &priv->status);
+- else
+- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
++ if (hw_rf_kill) {
++ set_bit(STATUS_RF_KILL_HW, &priv->status);
++ } else {
++ clear_bit(STATUS_RF_KILL_HW, &priv->status);
++ iwl_legacy_force_reset(priv, true);
+ }
++ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+@@ -1764,6 +1764,9 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
+
+ priv->active_rate = IWL_RATES_MASK;
+
++ iwl_legacy_power_update_mode(priv, true);
++ IWL_DEBUG_INFO(priv, "Updated power mode\n");
++
+ if (iwl_legacy_is_associated_ctx(ctx)) {
+ struct iwl_legacy_rxon_cmd *active_rxon =
+ (struct iwl_legacy_rxon_cmd *)&ctx->active;
+@@ -1796,9 +1799,6 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
+ IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+ wake_up(&priv->wait_command_queue);
+
+- iwl_legacy_power_update_mode(priv, true);
+- IWL_DEBUG_INFO(priv, "Updated power mode\n");
+-
+ return;
+
+ restart:
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 16cdd12..94d35ad 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -1297,7 +1297,7 @@ int iwl_alive_start(struct iwl_priv *priv)
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+- } else {
++ } else if (priv->cfg->bt_params) {
+ /*
+ * default is 2-wire BT coexexistence support
+ */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index 832ec4d..5ef176a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -808,8 +808,11 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ return;
+
+- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
++ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->shrd->status))
++ return;
++
++ if (ctx->vif)
+ ieee80211_chswitch_done(ctx->vif, is_success);
+ }
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
+index 1800029..346dc9b 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
++++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
+@@ -227,6 +227,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
++ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 3579a68..17f8720 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -1429,8 +1429,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
+ /* Allocate buffer and copy payload */
+ blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
+ buf_block_len = (pkt_len + blk_size - 1) / blk_size;
+- *(u16 *) &payload[0] = (u16) pkt_len;
+- *(u16 *) &payload[2] = type;
++ *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
++ *(__le16 *)&payload[2] = cpu_to_le16(type);
+
+ /*
+ * This is SDIO specific header
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 50f92d5..4d792a2 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -856,13 +856,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
+ }
+
+-void rt2x00queue_pause_queue(struct data_queue *queue)
++void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
+ {
+- if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+- !test_bit(QUEUE_STARTED, &queue->flags) ||
+- test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+- return;
+-
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+@@ -878,6 +873,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
+ break;
+ }
+ }
++void rt2x00queue_pause_queue(struct data_queue *queue)
++{
++ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
++ !test_bit(QUEUE_STARTED, &queue->flags) ||
++ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
++ return;
++
++ rt2x00queue_pause_queue_nocheck(queue);
++}
+ EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
+
+ void rt2x00queue_unpause_queue(struct data_queue *queue)
+@@ -939,7 +943,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
+ return;
+ }
+
+- rt2x00queue_pause_queue(queue);
++ rt2x00queue_pause_queue_nocheck(queue);
+
+ queue->rt2x00dev->ops->lib->stop_queue(queue);
+
+diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
+index 8efa2f2..f8c319c 100644
+--- a/drivers/net/wireless/zd1201.c
++++ b/drivers/net/wireless/zd1201.c
+@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
+ goto exit;
+
+ err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
+- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
++ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
+ if (err < 0)
+ goto exit;
+
++ memcpy(&ret, buf, sizeof(ret));
++
+ if (ret & 0x80) {
+ err = -EIO;
+ goto exit;
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index fd85fa2..b77808c 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -389,6 +389,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
+ mem = (unsigned long)
+ dt_alloc(size + 4, __alignof__(struct device_node));
+
++ memset((void *)mem, 0, size);
++
+ ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
+
+ pr_debug(" unflattening %lx...\n", mem);
+diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
+index a9c46cc..8c33491 100644
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -1,3 +1,5 @@
++#include <linux/prefetch.h>
++
+ /**
+ * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
+ * @ioc: The I/O Controller.
+diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
+index 083a49f..165274c 100644
+--- a/drivers/pci/Makefile
++++ b/drivers/pci/Makefile
+@@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_PARISC) += setup-bus.o
+ obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_PPC) += setup-bus.o
++obj-$(CONFIG_FRV) += setup-bus.o
+ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_X86_VISWS) += setup-irq.o
+ obj-$(CONFIG_MN10300) += setup-bus.o
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index e1b4f80..5c87270 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+ zfcp_erp_action_dismiss(&port->erp_action);
+- else
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ else {
++ spin_lock(port->adapter->scsi_host->host_lock);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ zfcp_erp_action_dismiss_lun(sdev);
++ spin_unlock(port->adapter->scsi_host->host_lock);
++ }
+ }
+
+ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
+ {
+ struct scsi_device *sdev;
+
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ spin_lock(port->adapter->scsi_host->host_lock);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ _zfcp_erp_lun_reopen(sdev, clear, id, 0);
++ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
+
+ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
+@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+ atomic_set_mask(common_mask, &port->status);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, adapter->scsi_host)
++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, adapter->scsi_host)
+ atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, adapter->scsi_host) {
++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, adapter->scsi_host) {
+ atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ if (clear_counter)
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+ }
++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
+ {
+ struct scsi_device *sdev;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
++ unsigned long flags;
+
+ atomic_set_mask(mask, &port->status);
+
+ if (!common_mask)
+ return;
+
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ atomic_set_mask(common_mask,
+ &sdev_to_zfcp(sdev)->status);
++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
+ struct scsi_device *sdev;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
++ unsigned long flags;
+
+ atomic_clear_mask(mask, &port->status);
+
+@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
+ if (clear_counter)
+ atomic_set(&port->erp_counter, 0);
+
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port) {
+ atomic_clear_mask(common_mask,
+ &sdev_to_zfcp(sdev)->status);
+ if (clear_counter)
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+ }
++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index e76d003..52c6b59 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+
+ static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+ {
+- spin_lock_irq(&qdio->req_q_lock);
+ if (atomic_read(&qdio->req_q_free) ||
+ !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ return 1;
+- spin_unlock_irq(&qdio->req_q_lock);
+ return 0;
+ }
+
+@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+ {
+ long ret;
+
+- spin_unlock_irq(&qdio->req_q_lock);
+- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+- zfcp_qdio_sbal_check(qdio), 5 * HZ);
++ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
++ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
+
+ if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ return -EIO;
+@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
+ }
+
+- spin_lock_irq(&qdio->req_q_lock);
+ return -EIO;
+ }
+
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index fc5a2ef..b018997 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ break;
+ }
+
+- /*
+- * We expect the FW state to be READY
+- */
+- if (megasas_transition_to_ready(instance, 0))
+- goto fail_ready_state;
++ if (megasas_transition_to_ready(instance, 0)) {
++ atomic_set(&instance->fw_reset_no_pci_access, 1);
++ instance->instancet->adp_reset
++ (instance, instance->reg_set);
++ atomic_set(&instance->fw_reset_no_pci_access, 0);
++ dev_info(&instance->pdev->dev,
++ "megasas: FW restarted successfully from %s!\n",
++ __func__);
++
++ /*waitting for about 30 second before retry*/
++ ssleep(30);
++
++ if (megasas_transition_to_ready(instance, 0))
++ goto fail_ready_state;
++ }
+
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
+index f6a50c9..bfb05b8 100644
+--- a/drivers/scsi/nsp32.c
++++ b/drivers/scsi/nsp32.c
+@@ -2927,7 +2927,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
+ * reset SCSI bus
+ */
+ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
+- udelay(RESET_HOLD_TIME);
++ mdelay(RESET_HOLD_TIME / 1000);
+ nsp32_write1(base, SCSI_BUS_CONTROL, 0);
+ for(i = 0; i < 5; i++) {
+ intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 717a8d4..903b2f5 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -127,11 +127,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ goto out;
+ }
+
+- snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+- snprintf((unsigned char *)&buf[16], 16, "%s",
+- &dev->se_sub_dev->t10_wwn.model[0]);
+- snprintf((unsigned char *)&buf[32], 4, "%s",
+- &dev->se_sub_dev->t10_wwn.revision[0]);
++ memcpy(&buf[8], "LIO-ORG ", 8);
++ memset(&buf[16], 0x20, 16);
++ memcpy(&buf[16], dev->se_sub_dev->t10_wwn.model,
++ min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.model), 16));
++ memcpy(&buf[32], dev->se_sub_dev->t10_wwn.revision,
++ min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.revision), 4));
+ buf[4] = 31; /* Set additional length to 31 */
+
+ out:
+diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
+index 6f4dd83..3749688 100644
+--- a/drivers/tty/hvc/hvsi_lib.c
++++ b/drivers/tty/hvc/hvsi_lib.c
+@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
+
+ pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
+
+- /* Try for up to 200s */
+- for (timeout = 0; timeout < 20; timeout++) {
++ /* Try for up to 400ms */
++ for (timeout = 0; timeout < 40; timeout++) {
+ if (pv->established)
+ goto established;
+ if (!hvsi_get_packet(pv))
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 5b3d063..ab7d11e 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -374,11 +374,18 @@ static void mxs_auart_settermios(struct uart_port *u,
+
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ {
+- u32 istatus, istat;
++ u32 istat;
+ struct mxs_auart_port *s = context;
+ u32 stat = readl(s->port.membase + AUART_STAT);
+
+- istatus = istat = readl(s->port.membase + AUART_INTR);
++ istat = readl(s->port.membase + AUART_INTR);
++
++ /* ack irq */
++ writel(istat & (AUART_INTR_RTIS
++ | AUART_INTR_TXIS
++ | AUART_INTR_RXIS
++ | AUART_INTR_CTSMIS),
++ s->port.membase + AUART_INTR_CLR);
+
+ if (istat & AUART_INTR_CTSMIS) {
+ uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+@@ -397,12 +404,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ istat &= ~AUART_INTR_TXIS;
+ }
+
+- writel(istatus & (AUART_INTR_RTIS
+- | AUART_INTR_TXIS
+- | AUART_INTR_RXIS
+- | AUART_INTR_CTSMIS),
+- s->port.membase + AUART_INTR_CLR);
+-
+ return IRQ_HANDLED;
+ }
+
+@@ -542,7 +543,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
+ struct mxs_auart_port *s;
+ struct uart_port *port;
+ unsigned int old_ctrl0, old_ctrl2;
+- unsigned int to = 1000;
++ unsigned int to = 20000;
+
+ if (co->index > MXS_AUART_PORTS || co->index < 0)
+ return;
+@@ -563,18 +564,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
+
+ uart_console_write(port, str, count, mxs_auart_console_putchar);
+
+- /*
+- * Finally, wait for transmitter to become empty
+- * and restore the TCR
+- */
++ /* Finally, wait for transmitter to become empty ... */
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
++ udelay(1);
+ if (!to--)
+ break;
+- udelay(1);
+ }
+
+- writel(old_ctrl0, port->membase + AUART_CTRL0);
+- writel(old_ctrl2, port->membase + AUART_CTRL2);
++ /*
++ * ... and restore the TCR if we waited long enough for the transmitter
++ * to be idle. This might keep the transmitter enabled although it is
++ * unused, but that is better than to disable it while it is still
++ * transmitting.
++ */
++ if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
++ writel(old_ctrl0, port->membase + AUART_CTRL0);
++ writel(old_ctrl2, port->membase + AUART_CTRL2);
++ }
+
+ clk_disable(s->clk);
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 2fbcb75..f52182d 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04d8, 0x000c), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* CarrolTouch 4000U */
++ { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
++
++ /* CarrolTouch 4500U */
++ { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
+index fe85871..db5128b7e 100644
+--- a/drivers/usb/misc/adutux.c
++++ b/drivers/usb/misc/adutux.c
+@@ -829,7 +829,7 @@ static int adu_probe(struct usb_interface *interface,
+
+ /* let the user know what node this device is now attached to */
+ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
+- udev->descriptor.idProduct, dev->serial_number,
++ le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
+ (dev->minor - ADU_MINOR_BASE));
+ exit:
+ dbg(2," %s : leave, return value %p (dev)", __func__, dev);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index ce9f87f..a3f6fe0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -743,9 +743,34 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
+- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
+ { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5d25e26..61685ed 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -815,11 +815,35 @@
+ /*
+ * RT Systems programming cables for various ham radios
+ */
+-#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+-#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+-#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
+-#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
+-
++#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
++#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
++#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
++#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
++#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
++#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
++#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
++#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
++#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
++#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
++#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
++#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
++#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
++#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
++#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
++#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
++#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
++#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
++#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
++#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
++#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
++#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
++#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
++#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
++#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/
+
+ /*
+ * Physik Instrumente
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index 4f415e28..b668069 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2620,7 +2620,7 @@ static int keyspan_startup(struct usb_serial *serial)
+ if (d_details == NULL) {
+ dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
+ __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
+- return 1;
++ return -ENODEV;
+ }
+
+ /* Setup private data for serial driver */
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 9580679..9270d5c 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -97,6 +97,7 @@ struct urbtracker {
+ struct list_head urblist_entry;
+ struct kref ref_count;
+ struct urb *urb;
++ struct usb_ctrlrequest *setup;
+ };
+
+ enum mos7715_pp_modes {
+@@ -279,6 +280,7 @@ static void destroy_urbtracker(struct kref *kref)
+ struct mos7715_parport *mos_parport = urbtrack->mos_parport;
+ dbg("%s called", __func__);
+ usb_free_urb(urbtrack->urb);
++ kfree(urbtrack->setup);
+ kfree(urbtrack);
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ }
+@@ -363,7 +365,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ struct urbtracker *urbtrack;
+ int ret_val;
+ unsigned long flags;
+- struct usb_ctrlrequest setup;
+ struct usb_serial *serial = mos_parport->serial;
+ struct usb_device *usbdev = serial->dev;
+ dbg("%s called", __func__);
+@@ -382,14 +383,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+- setup.bRequestType = (__u8)0x40;
+- setup.bRequest = (__u8)0x0e;
+- setup.wValue = get_reg_value(reg, dummy);
+- setup.wIndex = get_reg_index(reg);
+- setup.wLength = 0;
++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
++ if (!urbtrack->setup) {
++ usb_free_urb(urbtrack->urb);
++ kfree(urbtrack);
++ return -ENOMEM;
++ }
++ urbtrack->setup->bRequestType = (__u8)0x40;
++ urbtrack->setup->bRequest = (__u8)0x0e;
++ urbtrack->setup->wValue = get_reg_value(reg, dummy);
++ urbtrack->setup->wIndex = get_reg_index(reg);
++ urbtrack->setup->wLength = 0;
+ usb_fill_control_urb(urbtrack->urb, usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+- (unsigned char *)&setup,
++ (unsigned char *)urbtrack->setup,
+ NULL, 0, async_complete, urbtrack);
+ kref_init(&urbtrack->ref_count);
+ INIT_LIST_HEAD(&urbtrack->urblist_entry);
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5e8c736..5d2501e 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -185,6 +185,10 @@
+ #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
+
+
++enum mos7840_flag {
++ MOS7840_FLAG_CTRL_BUSY,
++};
++
+ static const struct usb_device_id moschip_port_id_table[] = {
+ {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
+ {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
+@@ -258,6 +262,8 @@ struct moschip_port {
+ struct urb *write_urb_pool[NUM_URBS];
+ char busy[NUM_URBS];
+ bool read_urb_busy;
++
++ unsigned long flags;
+ };
+
+
+@@ -519,11 +525,11 @@ static void mos7840_control_callback(struct urb *urb)
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__,
+ status);
+- return;
++ goto out;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+- return;
++ goto out;
+ }
+
+ dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+@@ -536,6 +542,8 @@ static void mos7840_control_callback(struct urb *urb)
+ mos7840_handle_new_msr(mos7840_port, regval);
+ else if (mos7840_port->MsrLsr == 1)
+ mos7840_handle_new_lsr(mos7840_port, regval);
++out:
++ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
+ }
+
+ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+@@ -546,6 +554,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+ unsigned char *buffer = mcs->ctrl_buf;
+ int ret;
+
++ if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
++ return -EBUSY;
++
+ dr->bRequestType = MCS_RD_RTYPE;
+ dr->bRequest = MCS_RDREQ;
+ dr->wValue = cpu_to_le16(Wval); /* 0 */
+@@ -557,6 +568,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+ mos7840_control_callback, mcs);
+ mcs->control_urb->transfer_buffer_length = 2;
+ ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
++ if (ret)
++ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
++
+ return ret;
+ }
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 42038ba..885d15d 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -1713,12 +1713,13 @@ static int ti_download_firmware(struct ti_device *tdev)
+
+ dbg("%s\n", __func__);
+ /* try ID specific firmware first, then try generic firmware */
+- sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
+- dev->descriptor.idProduct);
++ sprintf(buf, "ti_usb-v%04x-p%04x.fw",
++ le16_to_cpu(dev->descriptor.idVendor),
++ le16_to_cpu(dev->descriptor.idProduct));
+ if ((status = request_firmware(&fw_p, buf, &dev->dev)) != 0) {
+ buf[0] = '\0';
+- if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
+- switch (dev->descriptor.idProduct) {
++ if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
++ switch (le16_to_cpu(dev->descriptor.idProduct)) {
+ case MTS_CDMA_PRODUCT_ID:
+ strcpy(buf, "mts_cdma.fw");
+ break;
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 11d7b64..f6227cc 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -316,7 +316,7 @@ static void init_evtchn_cpu_bindings(void)
+
+ for_each_possible_cpu(i)
+ memset(per_cpu(cpu_evtchn_mask, i),
+- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
++ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
+ }
+
+ static inline void clear_evtchn(int port)
+@@ -1340,8 +1340,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
+ /* Rebind an evtchn so that it gets delivered to a specific cpu */
+ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ {
++ struct shared_info *s = HYPERVISOR_shared_info;
+ struct evtchn_bind_vcpu bind_vcpu;
+ int evtchn = evtchn_from_irq(irq);
++ int masked;
+
+ if (!VALID_EVTCHN(evtchn))
+ return -1;
+@@ -1358,6 +1360,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ bind_vcpu.vcpu = tcpu;
+
+ /*
++ * Mask the event while changing the VCPU binding to prevent
++ * it being delivered on an unexpected VCPU.
++ */
++ masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
++
++ /*
+ * If this fails, it usually just indicates that we're dealing with a
+ * virq or IPI channel, which don't actually need to be rebound. Ignore
+ * it, but don't do the xenlinux-level rebind in that case.
+@@ -1365,6 +1373,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+ bind_evtchn_to_cpu(evtchn, tcpu);
+
++ if (!masked)
++ unmask_evtchn(evtchn);
++
+ return 0;
+ }
+
+diff --git a/fs/bio.c b/fs/bio.c
+index 4fc4dbb..b84d851 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -734,7 +734,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
+ int iov_idx = 0;
+ unsigned int iov_off = 0;
+
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ char *bv_addr = page_address(bvec->bv_page);
+ unsigned int bv_len = iovecs[i].bv_len;
+
+@@ -787,12 +787,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
+ int bio_uncopy_user(struct bio *bio)
+ {
+ struct bio_map_data *bmd = bio->bi_private;
+- int ret = 0;
++ struct bio_vec *bvec;
++ int ret = 0, i;
+
+- if (!bio_flagged(bio, BIO_NULL_MAPPED))
+- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+- 0, bmd->is_our_pages);
++ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
++ /*
++ * if we're in a workqueue, the request is orphaned, so
++ * don't copy into a random user address space, just free.
++ */
++ if (current->mm)
++ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
++ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
++ 0, bmd->is_our_pages);
++ else if (bmd->is_our_pages)
++ bio_for_each_segment_all(bvec, bio, i)
++ __free_page(bvec->bv_page);
++ }
+ bio_free_map_data(bmd);
+ bio_put(bio);
+ return ret;
+@@ -916,7 +926,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+ return bio;
+ cleanup:
+ if (!map_data)
+- bio_for_each_segment(bvec, bio, i)
++ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ bio_put(bio);
+@@ -1130,7 +1140,7 @@ static void __bio_unmap_user(struct bio *bio)
+ /*
+ * make sure we dirty pages we wrote to
+ */
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ if (bio_data_dir(bio) == READ)
+ set_page_dirty_lock(bvec->bv_page);
+
+@@ -1236,7 +1246,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
+ int i;
+ char *p = bmd->sgvecs[0].iov_base;
+
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+ int len = bmd->iovecs[i].bv_len;
+
+@@ -1276,7 +1286,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+ if (!reading) {
+ void *p = data;
+
+- bio_for_each_segment(bvec, bio, i) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ memcpy(addr, p, bvec->bv_len);
+@@ -1556,7 +1566,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
+ if (index >= bio->bi_idx)
+ index = bio->bi_vcnt - 1;
+
+- __bio_for_each_segment(bv, bio, i, 0) {
++ bio_for_each_segment_all(bv, bio, i) {
+ if (i == index) {
+ if (offset > bv->bv_offset)
+ sectors += (offset - bv->bv_offset) / sector_sz;
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index cdcd665..b4675bd 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -369,7 +369,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ if (blobptr + attrsize > blobend)
+ break;
+ if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
+- if (!attrsize)
++ if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
+ break;
+ if (!ses->domainName) {
+ ses->domainName =
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 2f3ff59..7b68088 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -38,6 +38,7 @@
+ #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
+ #define MAX_SERVER_SIZE 15
+ #define MAX_SHARE_SIZE 80
++#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
+ #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
+ #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 4c37ed4..52a820a 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -96,6 +96,14 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+ dput(dentry);
+ }
+
++ /*
++ * If we know that the inode will need to be revalidated immediately,
++ * then don't create a new dentry for it. We'll end up doing an on
++ * the wire call either way and this spares us an invalidation.
++ */
++ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
++ return NULL;
++
+ dentry = d_alloc(parent, name);
+ if (dentry == NULL)
+ return NULL;
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 2504809..d362626 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -198,7 +198,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ bytes_ret = 0;
+ } else
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
+- 256, nls_cp);
++ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* account for null terminator */
+
+@@ -256,8 +256,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+
+ /* copy domain */
+ if (ses->domainName != NULL) {
+- strncpy(bcc_ptr, ses->domainName, 256);
+- bcc_ptr += strnlen(ses->domainName, 256);
++ strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++ bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ } /* else we will send a null domain name
+ so the server will default to its own domain */
+ *bcc_ptr = 0;
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 1585db1..26bb512 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio)
+ struct bio_vec *bv;
+ unsigned i;
+
+- __bio_for_each_segment(bv, bio, i, 0) {
++ bio_for_each_segment_all(bv, bio, i) {
+ unsigned this_count = bv->bv_len;
+
+ if (likely(PAGE_SIZE == this_count))
+diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
+index fff2070..2c64826 100644
+--- a/fs/exofs/ore_raid.c
++++ b/fs/exofs/ore_raid.c
+@@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
+ if (!bio)
+ continue;
+
+- __bio_for_each_segment(bv, bio, i, 0) {
++ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ SetPageUptodate(page);
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index aca1790..d0b8f98 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -109,10 +109,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+
+ if (ext4_handle_valid(handle)) {
+ err = jbd2_journal_dirty_metadata(handle, bh);
+- if (err) {
+- /* Errors can only happen if there is a bug */
+- handle->h_err = err;
+- __ext4_journal_stop(where, line, handle);
++ /* Errors can only happen if there is a bug */
++ if (WARN_ON_ONCE(err)) {
++ ext4_journal_abort_handle(where, line, __func__, bh,
++ handle, err);
+ }
+ } else {
+ if (inode)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 259e950..84f84bf 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3372,7 +3372,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ }
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+- "both data=journal and delalloc");
++ "both data=journal and dioread_nolock");
+ goto failed_mount;
+ }
+ if (test_opt(sb, DELALLOC))
+@@ -4539,6 +4539,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ goto restore_opts;
+ }
+
++ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
++ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "both data=journal and delalloc");
++ err = -EINVAL;
++ goto restore_opts;
++ }
++ if (test_opt(sb, DIOREAD_NOLOCK)) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "both data=journal and dioread_nolock");
++ err = -EINVAL;
++ goto restore_opts;
++ }
++ }
++
+ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+ ext4_abort(sb, "Abort forced by user");
+
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 9197a1b..9f7c758 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+
+ dir_index = (u32) filp->f_pos;
+
++ /*
++ * NFSv4 reserves cookies 1 and 2 for . and .. so the value
++ * we return to the vfs is one greater than the one we use
++ * internally.
++ */
++ if (dir_index)
++ dir_index--;
++
+ if (dir_index > 1) {
+ struct dir_table_slot dirtab_slot;
+
+@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ if (p->header.flag & BT_INTERNAL) {
+ jfs_err("jfs_readdir: bad index table");
+ DT_PUTPAGE(mp);
+- filp->f_pos = -1;
++ filp->f_pos = DIREND;
+ return 0;
+ }
+ } else {
+@@ -3094,7 +3102,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ /*
+ * self "."
+ */
+- filp->f_pos = 0;
++ filp->f_pos = 1;
+ if (filldir(dirent, ".", 1, 0, ip->i_ino,
+ DT_DIR))
+ return 0;
+@@ -3102,7 +3110,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ /*
+ * parent ".."
+ */
+- filp->f_pos = 1;
++ filp->f_pos = 2;
+ if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
+ return 0;
+
+@@ -3123,24 +3131,25 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ /*
+ * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
+ *
+- * pn = index = 0: First entry "."
+- * pn = 0; index = 1: Second entry ".."
++ * pn = 0; index = 1: First entry "."
++ * pn = 0; index = 2: Second entry ".."
+ * pn > 0: Real entries, pn=1 -> leftmost page
+ * pn = index = -1: No more entries
+ */
+ dtpos = filp->f_pos;
+- if (dtpos == 0) {
++ if (dtpos < 2) {
+ /* build "." entry */
+
++ filp->f_pos = 1;
+ if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
+ DT_DIR))
+ return 0;
+- dtoffset->index = 1;
++ dtoffset->index = 2;
+ filp->f_pos = dtpos;
+ }
+
+ if (dtoffset->pn == 0) {
+- if (dtoffset->index == 1) {
++ if (dtoffset->index == 2) {
+ /* build ".." entry */
+
+ if (filldir(dirent, "..", 2, filp->f_pos,
+@@ -3233,6 +3242,12 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ }
+ jfs_dirent->position = unique_pos++;
+ }
++ /*
++ * We add 1 to the index because we may
++ * use a value of 2 internally, and NFSv4
++ * doesn't like that.
++ */
++ jfs_dirent->position++;
+ } else {
+ jfs_dirent->position = dtpos;
+ len = min(d_namleft, DTLHDRDATALEN_LEGACY);
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 168cb93..3fde055 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -451,9 +451,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+ args->csa_nrclists = ntohl(*p++);
+ args->csa_rclists = NULL;
+ if (args->csa_nrclists) {
+- args->csa_rclists = kmalloc(args->csa_nrclists *
+- sizeof(*args->csa_rclists),
+- GFP_KERNEL);
++ args->csa_rclists = kmalloc_array(args->csa_nrclists,
++ sizeof(*args->csa_rclists),
++ GFP_KERNEL);
+ if (unlikely(args->csa_rclists == NULL))
+ goto out;
+
+diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
+index 850a7c0..07a666a 100644
+--- a/fs/nilfs2/segbuf.c
++++ b/fs/nilfs2/segbuf.c
+@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
+
+ if (err == -EOPNOTSUPP) {
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+- bio_put(bio);
+- /* to be detected by submit_seg_bio() */
++ /* to be detected by nilfs_segbuf_submit_bio() */
+ }
+
+ if (!uptodate)
+@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
+ bio->bi_private = segbuf;
+ bio_get(bio);
+ submit_bio(mode, bio);
++ segbuf->sb_nbio++;
+ if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
+ bio_put(bio);
+ err = -EOPNOTSUPP;
+ goto failed;
+ }
+- segbuf->sb_nbio++;
+ bio_put(bio);
+
+ wi->bio = NULL;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 3efa725..ef1740d 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -604,7 +604,7 @@ const struct file_operations proc_clear_refs_operations = {
+ };
+
+ struct pagemapread {
+- int pos, len;
++ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
+ u64 *buffer;
+ };
+
+@@ -792,8 +792,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ if (!count)
+ goto out_task;
+
+- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
++ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
++ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
+ ret = -ENOMEM;
+ if (!pm.buffer)
+ goto out_task;
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 847994a..e868554 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -135,16 +135,27 @@ static inline int bio_has_allocated_vec(struct bio *bio)
+ #define bio_io_error(bio) bio_endio((bio), -EIO)
+
+ /*
+- * drivers should not use the __ version unless they _really_ want to
+- * run through the entire bio and not just pending pieces
++ * drivers should not use the __ version unless they _really_ know what
++ * they're doing
+ */
+ #define __bio_for_each_segment(bvl, bio, i, start_idx) \
+ for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
+ i < (bio)->bi_vcnt; \
+ bvl++, i++)
+
++/*
++ * drivers should _never_ use the all version - the bio may have been split
++ * before it got to the driver and the driver won't own all of it
++ */
++#define bio_for_each_segment_all(bvl, bio, i) \
++ for (i = 0; \
++ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
++ i++)
++
+ #define bio_for_each_segment(bvl, bio, i) \
+- __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
++ for (i = (bio)->bi_idx; \
++ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
++ i++)
+
+ /*
+ * get a reference to a bio, so it won't disappear. the intended use is
+diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
+index c3da42d..82924bf 100644
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -71,6 +71,8 @@ struct trace_iterator {
+ /* trace_seq for __print_flags() and __print_symbolic() etc. */
+ struct trace_seq tmp_seq;
+
++ cpumask_var_t started;
++
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
+ struct trace_entry *ent;
+@@ -83,7 +85,7 @@ struct trace_iterator {
+ loff_t pos;
+ long idx;
+
+- cpumask_var_t started;
++ /* All new field here will be zeroed out in pipe_read */
+ };
+
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 5b42f1b..de3a321 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -297,6 +297,7 @@ struct mm_struct {
+ void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+ #endif
+ unsigned long mmap_base; /* base of mmap area */
++ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long task_size; /* size of task vm space */
+ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
+ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 573c809..a595dce 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -190,7 +190,7 @@ size_t ksize(const void *);
+ #endif
+
+ /**
+- * kcalloc - allocate memory for an array. The memory is set to zero.
++ * kmalloc_array - allocate memory for an array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate.
+@@ -240,11 +240,22 @@ size_t ksize(const void *);
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
+-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
++static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+ if (size != 0 && n > ULONG_MAX / size)
+ return NULL;
+- return __kmalloc(n * size, flags | __GFP_ZERO);
++ return __kmalloc(n * size, flags);
++}
++
++/**
++ * kcalloc - allocate memory for an array. The memory is set to zero.
++ * @n: number of elements.
++ * @size: element size.
++ * @flags: the type of memory to allocate (see kmalloc).
++ */
++static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
++{
++ return kmalloc_array(n, size, flags | __GFP_ZERO);
+ }
+
+ #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index bea7ad5..e007f76 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -530,6 +530,63 @@ do { \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+
+
++#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
++ lock, ret) \
++do { \
++ DEFINE_WAIT(__wait); \
++ \
++ for (;;) { \
++ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ if (signal_pending(current)) { \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ spin_unlock_irq(&lock); \
++ ret = schedule_timeout(ret); \
++ spin_lock_irq(&lock); \
++ if (!ret) \
++ break; \
++ } \
++ finish_wait(&wq, &__wait); \
++} while (0)
++
++/**
++ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
++ * The condition is checked under the lock. This is expected
++ * to be called with the lock taken.
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ * @lock: a locked spinlock_t, which will be released before schedule()
++ * and reacquired afterwards.
++ * @timeout: timeout, in jiffies
++ *
++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
++ * @condition evaluates to true or signal is received. The @condition is
++ * checked each time the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ *
++ * This is supposed to be called while holding the lock. The lock is
++ * dropped before going to sleep and is reacquired afterwards.
++ *
++ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
++ * was interrupted by a signal, and the remaining jiffies otherwise
++ * if the condition evaluated to true before the timeout elapsed.
++ */
++#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
++ timeout) \
++({ \
++ int __ret = timeout; \
++ \
++ if (!(condition)) \
++ __wait_event_interruptible_lock_irq_timeout( \
++ wq, condition, lock, __ret); \
++ __ret; \
++})
++
+
+ #define __wait_event_killable(wq, condition, ret) \
+ do { \
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8be9b746..5bbe443 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -900,6 +900,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
+ }
+
+ /*
++ * Initialize event state based on the perf_event_attr::disabled.
++ */
++static inline void perf_event__state_init(struct perf_event *event)
++{
++ event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
++ PERF_EVENT_STATE_INACTIVE;
++}
++
++/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+@@ -6050,8 +6059,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ event->overflow_handler = overflow_handler;
+ event->overflow_handler_context = context;
+
+- if (attr->disabled)
+- event->state = PERF_EVENT_STATE_OFF;
++ perf_event__state_init(event);
+
+ pmu = NULL;
+
+@@ -6433,9 +6441,17 @@ SYSCALL_DEFINE5(perf_event_open,
+
+ mutex_lock(&gctx->mutex);
+ perf_remove_from_context(group_leader);
++
++ /*
++ * Removing from the context ends up with disabled
++ * event. What we want here is event in the initial
++ * startup state, ready to be add into new context.
++ */
++ perf_event__state_init(group_leader);
+ list_for_each_entry(sibling, &group_leader->sibling_list,
+ group_entry) {
+ perf_remove_from_context(sibling);
++ perf_event__state_init(sibling);
+ put_ctx(gctx);
+ }
+ mutex_unlock(&gctx->mutex);
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 66e4576..59474c5 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -5033,7 +5033,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
+ * idle runqueue:
+ */
+ if (rq->cfs.load.weight)
+- rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
++ rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+
+ return rr_interval;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index a584ad9..ce1067f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3375,6 +3375,7 @@ waitagain:
+ memset(&iter->seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
++ cpumask_clear(iter->started);
+ iter->pos = -1;
+
+ trace_event_read_lock();
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 0ad2420..0bc9ff0 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1920,6 +1920,15 @@ __acquires(&gcwq->lock)
+ dump_stack();
+ }
+
++ /*
++ * The following prevents a kworker from hogging CPU on !PREEMPT
++ * kernels, where a requeueing work item waiting for something to
++ * happen could deadlock with stop_machine as such work item could
++ * indefinitely requeue itself while all other CPUs are trapped in
++ * stop_machine.
++ */
++ cond_resched();
++
+ spin_lock_irq(&gcwq->lock);
+
+ /* clear cpu intensive status */
+diff --git a/mm/bounce.c b/mm/bounce.c
+index 4e9ae72..f71a3b34 100644
+--- a/mm/bounce.c
++++ b/mm/bounce.c
+@@ -132,7 +132,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
+ /*
+ * free up bounce indirect pages used
+ */
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ org_vec = bio_orig->bi_io_vec + i;
+ if (bvec->bv_page == org_vec->bv_page)
+ continue;
+diff --git a/mm/nommu.c b/mm/nommu.c
+index f0cd7ab..1db7971 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1825,6 +1825,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
++{
++ unsigned long pfn = start >> PAGE_SHIFT;
++ unsigned long vm_len = vma->vm_end - vma->vm_start;
++
++ pfn += vma->vm_pgoff;
++ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_iomap_memory);
++
+ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 5c028e2..b5afea2 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5777,6 +5777,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
+ zone->free_area[order].nr_free--;
+ __mod_zone_page_state(zone, NR_FREE_PAGES,
+ - (1UL << order));
++#ifdef CONFIG_HIGHMEM
++ if (PageHighMem(page))
++ totalhigh_pages -= 1 << order;
++#endif
+ for (i = 0; i < (1 << order); i++)
+ SetPageReserved((page+i));
+ pfn += (1 << order);
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 5485077..739b073 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -32,6 +32,8 @@ static int tcp_adv_win_scale_min = -31;
+ static int tcp_adv_win_scale_max = 31;
+ static int ip_ttl_min = 1;
+ static int ip_ttl_max = 255;
++static int tcp_syn_retries_min = 1;
++static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
+ static int ip_ping_group_range_min[] = { 0, 0 };
+ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+
+@@ -231,7 +233,9 @@ static struct ctl_table ipv4_table[] = {
+ .data = &sysctl_tcp_syn_retries,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &tcp_syn_retries_min,
++ .extra2 = &tcp_syn_retries_max
+ },
+ {
+ .procname = "tcp_synack_retries",
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 449a918..f5af259 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -257,10 +257,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
+ {
+ struct mr6_table *mrt, *next;
+
++ rtnl_lock();
+ list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
+ list_del(&mrt->list);
+ ip6mr_free_table(mrt);
+ }
++ rtnl_unlock();
+ fib_rules_unregister(net->ipv6.mr6_rules_ops);
+ }
+ #else
+@@ -287,7 +289,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
+
+ static void __net_exit ip6mr_rules_exit(struct net *net)
+ {
++ rtnl_lock();
+ ip6mr_free_table(net->ipv6.mrt6);
++ net->ipv6.mrt6 = NULL;
++ rtnl_unlock();
+ }
+ #endif
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 6fefdfc..8dbdb8e 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2073,6 +2073,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
+ pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
+ }
+ pol->sadb_x_policy_dir = dir+1;
++ pol->sadb_x_policy_reserved = 0;
+ pol->sadb_x_policy_id = xp->index;
+ pol->sadb_x_policy_priority = xp->priority;
+
+@@ -2686,6 +2687,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_pid = c->pid;
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
++ hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ hdr->sadb_msg_reserved = 0;
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+@@ -3108,7 +3110,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
+ pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
+ pol->sadb_x_policy_dir = dir+1;
++ pol->sadb_x_policy_reserved = 0;
+ pol->sadb_x_policy_id = xp->index;
++ pol->sadb_x_policy_priority = xp->priority;
+
+ /* Set sadb_comb's. */
+ if (x->id.proto == IPPROTO_AH)
+@@ -3496,6 +3500,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
+ pol->sadb_x_policy_dir = dir + 1;
++ pol->sadb_x_policy_reserved = 0;
+ pol->sadb_x_policy_id = 0;
+ pol->sadb_x_policy_priority = 0;
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index cd6cbdb..7d882fc 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -821,8 +821,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+
+- /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
+- if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
++ /*
++ * Drop duplicate 802.11 retransmissions
++ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
++ */
++ if (rx->skb->len >= 24 && rx->sta &&
++ !ieee80211_is_ctl(hdr->frame_control) &&
++ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
++ !is_multicast_ether_addr(hdr->addr1)) {
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ rx->sta->last_seq_ctrl[rx->seqno_idx] ==
+ hdr->seq_ctrl)) {
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index e25e490..6e38ef0 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -606,6 +606,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sockaddr_atmpvc pvc;
+ int state;
+
++ memset(&pvc, 0, sizeof(pvc));
+ pvc.sap_family = AF_ATMPVC;
+ pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+ pvc.sap_addr.vpi = flow->vcc->vpi;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index b7cddb9..7f59944 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1467,6 +1467,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_cbq_wrropt opt;
+
++ memset(&opt, 0, sizeof(opt));
+ opt.flags = 0;
+ opt.allot = cl->allot;
+ opt.priority = cl->priority + 1;
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index 96eb168..3dd7207 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -205,6 +205,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
+ */
+ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+ {
++ memset(q, 0, sizeof(struct sctp_outq));
++
+ q->asoc = asoc;
+ INIT_LIST_HEAD(&q->out_chunk_list);
+ INIT_LIST_HEAD(&q->control_chunk_list);
+@@ -212,13 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+ INIT_LIST_HEAD(&q->sacked);
+ INIT_LIST_HEAD(&q->abandoned);
+
+- q->fast_rtx = 0;
+- q->outstanding_bytes = 0;
+ q->empty = 1;
+- q->cork = 0;
+-
+- q->malloced = 0;
+- q->out_qlen = 0;
+ }
+
+ /* Free the outqueue structure and any related pending chunks.
+diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+index 2763e3e..38f388c 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
++++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+@@ -82,9 +82,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
+ >>PAGE_CACHE_SHIFT;
+ unsigned int offset = (buf->page_base + len - 1)
+ & (PAGE_CACHE_SIZE - 1);
+- ptr = kmap_atomic(buf->pages[last], KM_USER0);
++ ptr = kmap_atomic(buf->pages[last]);
+ pad = *(ptr + offset);
+- kunmap_atomic(ptr, KM_USER0);
++ kunmap_atomic(ptr);
+ goto out;
+ } else
+ len -= buf->page_len;
+diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
+index 145e6784..0a648c5 100644
+--- a/net/sunrpc/socklib.c
++++ b/net/sunrpc/socklib.c
+@@ -114,7 +114,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
+ }
+
+ len = PAGE_CACHE_SIZE;
+- kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
++ kaddr = kmap_atomic(*ppage);
+ if (base) {
+ len -= base;
+ if (pglen < len)
+@@ -127,7 +127,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
+ ret = copy_actor(desc, kaddr, len);
+ }
+ flush_dcache_page(*ppage);
+- kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
++ kunmap_atomic(kaddr);
+ copied += ret;
+ if (ret != len || !desc->count)
+ goto out;
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 593f4c6..6997cdd 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -122,9 +122,9 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
+ {
+ char *kaddr;
+
+- kaddr = kmap_atomic(buf->pages[0], KM_USER0);
++ kaddr = kmap_atomic(buf->pages[0]);
+ kaddr[buf->page_base + len] = '\0';
+- kunmap_atomic(kaddr, KM_USER0);
++ kunmap_atomic(kaddr);
+ }
+ EXPORT_SYMBOL_GPL(xdr_terminate_string);
+
+@@ -232,12 +232,15 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
+ pgto_base -= copy;
+ pgfrom_base -= copy;
+
+- vto = kmap_atomic(*pgto, KM_USER0);
+- vfrom = kmap_atomic(*pgfrom, KM_USER1);
+- memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
++ vto = kmap_atomic(*pgto);
++ if (*pgto != *pgfrom) {
++ vfrom = kmap_atomic(*pgfrom);
++ memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
++ kunmap_atomic(vfrom);
++ } else
++ memmove(vto + pgto_base, vto + pgfrom_base, copy);
+ flush_dcache_page(*pgto);
+- kunmap_atomic(vfrom, KM_USER1);
+- kunmap_atomic(vto, KM_USER0);
++ kunmap_atomic(vto);
+
+ } while ((len -= copy) != 0);
+ }
+@@ -267,9 +270,9 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
+ if (copy > len)
+ copy = len;
+
+- vto = kmap_atomic(*pgto, KM_USER0);
++ vto = kmap_atomic(*pgto);
+ memcpy(vto + pgbase, p, copy);
+- kunmap_atomic(vto, KM_USER0);
++ kunmap_atomic(vto);
+
+ len -= copy;
+ if (len == 0)
+@@ -311,9 +314,9 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ if (copy > len)
+ copy = len;
+
+- vfrom = kmap_atomic(*pgfrom, KM_USER0);
++ vfrom = kmap_atomic(*pgfrom);
+ memcpy(p, vfrom + pgbase, copy);
+- kunmap_atomic(vfrom, KM_USER0);
++ kunmap_atomic(vfrom);
+
+ pgbase += copy;
+ if (pgbase == PAGE_CACHE_SIZE) {
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index 554d081..1776e57 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -338,9 +338,9 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
+ curlen = copy_len;
+ dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
+ __func__, i, destp, copy_len, curlen);
+- srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
++ srcp = kmap_atomic(ppages[i]);
+ memcpy(destp, srcp+page_base, curlen);
+- kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
++ kunmap_atomic(srcp);
+ rqst->rq_svec[0].iov_len += curlen;
+ destp += curlen;
+ copy_len -= curlen;
+@@ -639,10 +639,10 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
+ dprintk("RPC: %s: page %d"
+ " srcp 0x%p len %d curlen %d\n",
+ __func__, i, srcp, copy_len, curlen);
+- destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
++ destp = kmap_atomic(ppages[i]);
+ memcpy(destp + page_base, srcp, curlen);
+ flush_dcache_page(ppages[i]);
+- kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
++ kunmap_atomic(destp);
+ srcp += curlen;
+ copy_len -= curlen;
+ if (copy_len == 0)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c06c365..6d4d263 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4826,12 +4826,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
+
+ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+ {
++ struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
+ void *hdr = ((void **)skb->cb)[1];
+ struct nlattr *data = ((void **)skb->cb)[2];
+
+ nla_nest_end(skb, data);
+ genlmsg_end(skb, hdr);
+- genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
++ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
++ nl80211_testmode_mcgrp.id, gfp);
+ }
+ EXPORT_SYMBOL(cfg80211_testmode_event);
+ #endif
+@@ -7282,7 +7284,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
+ return;
+ }
+
+- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
++ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
++ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
+index cef813d..ed726d1 100644
+--- a/sound/i2c/other/ak4xxx-adda.c
++++ b/sound/i2c/other/ak4xxx-adda.c
+@@ -571,7 +571,7 @@ static int ak4xxx_capture_source_info(struct snd_kcontrol *kcontrol,
+ struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol);
+ int mixer_ch = AK_GET_SHIFT(kcontrol->private_value);
+ const char **input_names;
+- int num_names, idx;
++ unsigned int num_names, idx;
+
+ num_names = ak4xxx_capture_num_inputs(ak, mixer_ch);
+ if (!num_names)
+diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
+index 97871be..cba84ef 100644
+--- a/sound/isa/opti9xx/opti92x-ad1848.c
++++ b/sound/isa/opti9xx/opti92x-ad1848.c
+@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
+
+ #endif /* CONFIG_PNP */
+
+-#ifdef OPTi93X
+-#define DEV_NAME "opti93x"
+-#else
+-#define DEV_NAME "opti92x"
+-#endif
++#define DEV_NAME KBUILD_MODNAME
+
+ static char * snd_opti9xx_names[] = {
+ "unknown",
+@@ -1126,7 +1122,7 @@ static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard)
+
+ static struct pnp_card_driver opti9xx_pnpc_driver = {
+ .flags = PNP_DRIVER_RES_DISABLE,
+- .name = "opti9xx",
++ .name = DEV_NAME,
+ .id_table = snd_opti9xx_pnpids,
+ .probe = snd_opti9xx_pnp_probe,
+ .remove = __devexit_p(snd_opti9xx_pnp_remove),
+diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
+index 6c9e8e8..1fc28f5 100644
+--- a/sound/oss/Kconfig
++++ b/sound/oss/Kconfig
+@@ -250,6 +250,7 @@ config MSND_FIFOSIZE
+ menuconfig SOUND_OSS
+ tristate "OSS sound modules"
+ depends on ISA_DMA_API && VIRT_TO_BUS
++ depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
+ help
+ OSS is the Open Sound System suite of sound card drivers. They make
+ sound programming easier since they provide a common API. Say Y or
+diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
+index c994daa..af6ec8d 100644
+--- a/sound/usb/6fire/comm.c
++++ b/sound/usb/6fire/comm.c
+@@ -111,19 +111,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
+ static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 value)
+ {
+- u8 buffer[13]; /* 13: maximum length of message */
++ u8 *buffer;
++ int ret;
++
++ /* 13: maximum length of message */
++ buffer = kmalloc(13, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
+- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++
++ kfree(buffer);
++ return ret;
+ }
+
+ static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 vl, u8 vh)
+ {
+- u8 buffer[13]; /* 13: maximum length of message */
++ u8 *buffer;
++ int ret;
++
++ /* 13: maximum length of message */
++ buffer = kmalloc(13, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
+- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++
++ kfree(buffer);
++ return ret;
+ }
+
+ int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+@@ -136,6 +154,12 @@ int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+ if (!rt)
+ return -ENOMEM;
+
++ rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
++ if (!rt->receiver_buffer) {
++ kfree(rt);
++ return -ENOMEM;
++ }
++
+ rt->serial = 1;
+ rt->chip = chip;
+ usb_init_urb(urb);
+@@ -153,6 +177,7 @@ int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+ urb->interval = 1;
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret < 0) {
++ kfree(rt->receiver_buffer);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
+ return ret;
+@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
+
+ void usb6fire_comm_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->comm);
++ struct comm_runtime *rt = chip->comm;
++
++ kfree(rt->receiver_buffer);
++ kfree(rt);
+ chip->comm = NULL;
+ }
+diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
+index edc5dc8..19e2f92 100644
+--- a/sound/usb/6fire/comm.h
++++ b/sound/usb/6fire/comm.h
+@@ -25,7 +25,7 @@ struct comm_runtime {
+ struct sfire_chip *chip;
+
+ struct urb receiver;
+- u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
++ u8 *receiver_buffer;
+
+ u8 serial; /* urb serial */
+
+diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
+index 13f4509..8283c5d 100644
+--- a/sound/usb/6fire/midi.c
++++ b/sound/usb/6fire/midi.c
+@@ -20,6 +20,10 @@
+ #include "chip.h"
+ #include "comm.h"
+
++enum {
++ MIDI_BUFSIZE = 64
++};
++
+ static void usb6fire_midi_out_handler(struct urb *urb)
+ {
+ struct midi_runtime *rt = urb->context;
+@@ -157,6 +161,12 @@ int __devinit usb6fire_midi_init(struct sfire_chip *chip)
+ if (!rt)
+ return -ENOMEM;
+
++ rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
++ if (!rt->out_buffer) {
++ kfree(rt);
++ return -ENOMEM;
++ }
++
+ rt->chip = chip;
+ rt->in_received = usb6fire_midi_in_received;
+ rt->out_buffer[0] = 0x80; /* 'send midi' command */
+@@ -170,6 +180,7 @@ int __devinit usb6fire_midi_init(struct sfire_chip *chip)
+
+ ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
+ if (ret < 0) {
++ kfree(rt->out_buffer);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
+ return ret;
+@@ -198,6 +209,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
+
+ void usb6fire_midi_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->midi);
++ struct midi_runtime *rt = chip->midi;
++
++ kfree(rt->out_buffer);
++ kfree(rt);
+ chip->midi = NULL;
+ }
+diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
+index 97a7bf6..7f8f448 100644
+--- a/sound/usb/6fire/midi.h
++++ b/sound/usb/6fire/midi.h
+@@ -17,10 +17,6 @@
+
+ #include "common.h"
+
+-enum {
+- MIDI_BUFSIZE = 64
+-};
+-
+ struct midi_runtime {
+ struct sfire_chip *chip;
+ struct snd_rawmidi *instance;
+@@ -33,7 +29,7 @@ struct midi_runtime {
+ struct snd_rawmidi_substream *out;
+ struct urb out_urb;
+ u8 out_serial; /* serial number of out packet */
+- u8 out_buffer[MIDI_BUFSIZE];
++ u8 *out_buffer;
+ int buffer_offset;
+
+ void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
+diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
+index 888a7c7..8609c74 100644
+--- a/sound/usb/6fire/pcm.c
++++ b/sound/usb/6fire/pcm.c
+@@ -579,6 +579,33 @@ static void __devinit usb6fire_pcm_init_urb(struct pcm_urb *urb,
+ urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
+ }
+
++static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
++{
++ int i;
++
++ for (i = 0; i < PCM_N_URBS; i++) {
++ rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
++ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
++ if (!rt->out_urbs[i].buffer)
++ return -ENOMEM;
++ rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
++ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
++ if (!rt->in_urbs[i].buffer)
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
++{
++ int i;
++
++ for (i = 0; i < PCM_N_URBS; i++) {
++ kfree(rt->out_urbs[i].buffer);
++ kfree(rt->in_urbs[i].buffer);
++ }
++}
++
+ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ {
+ int i;
+@@ -590,6 +617,13 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ if (!rt)
+ return -ENOMEM;
+
++ ret = usb6fire_pcm_buffers_init(rt);
++ if (ret) {
++ usb6fire_pcm_buffers_destroy(rt);
++ kfree(rt);
++ return ret;
++ }
++
+ rt->chip = chip;
+ rt->stream_state = STREAM_DISABLED;
+ rt->rate = ARRAY_SIZE(rates);
+@@ -611,6 +645,7 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+
+ ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
+ if (ret < 0) {
++ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
+ return ret;
+@@ -626,6 +661,7 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ snd_dma_continuous_data(GFP_KERNEL),
+ MAX_BUFSIZE, MAX_BUFSIZE);
+ if (ret) {
++ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX
+ "error preallocating pcm buffers.\n");
+@@ -670,6 +706,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
+
+ void usb6fire_pcm_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->pcm);
++ struct pcm_runtime *rt = chip->pcm;
++
++ usb6fire_pcm_buffers_destroy(rt);
++ kfree(rt);
+ chip->pcm = NULL;
+ }
+diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
+index 2bee813..a8e8899 100644
+--- a/sound/usb/6fire/pcm.h
++++ b/sound/usb/6fire/pcm.h
+@@ -33,7 +33,7 @@ struct pcm_urb {
+ struct urb instance;
+ struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
+ /* END DO NOT SEPARATE */
+- u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
++ u8 *buffer;
+
+ struct pcm_urb *peer;
+ };
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index aeb26eb..41b9fe0 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -720,8 +720,20 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
+ return 0;
+ }
+ case UAC1_PROCESSING_UNIT:
+- case UAC1_EXTENSION_UNIT: {
++ case UAC1_EXTENSION_UNIT:
++ /* UAC2_PROCESSING_UNIT_V2 */
++ /* UAC2_EFFECT_UNIT */
++ case UAC2_EXTENSION_UNIT_V2: {
+ struct uac_processing_unit_descriptor *d = p1;
++
++ if (state->mixer->protocol == UAC_VERSION_2 &&
++ hdr[2] == UAC2_EFFECT_UNIT) {
++ /* UAC2/UAC1 unit IDs overlap here in an
++ * uncompatible way. Ignore this unit for now.
++ */
++ return 0;
++ }
++
+ if (d->bNrInPins) {
+ id = d->baSourceID[0];
+ break; /* continue to parse */
+@@ -1956,6 +1968,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
+ return parse_audio_extension_unit(state, unitid, p1);
+ else /* UAC_VERSION_2 */
+ return parse_audio_processing_unit(state, unitid, p1);
++ case UAC2_EXTENSION_UNIT_V2:
++ return parse_audio_extension_unit(state, unitid, p1);
+ default:
+ snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
+ return -EINVAL;
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index 78284b1..42dffa0 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -15,7 +15,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
+
+ static inline int is_anon_memory(const char *filename)
+ {
+- return strcmp(filename, "//anon") == 0;
++ return !strcmp(filename, "//anon") ||
++ !strcmp(filename, "/anon_hugepage (deleted)");
+ }
+
+ static inline int is_no_dso_memory(const char *filename)
diff --git a/3.2.54/1051_linux-3.2.52.patch b/3.2.54/1051_linux-3.2.52.patch
new file mode 100644
index 0000000..94b9359
--- /dev/null
+++ b/3.2.54/1051_linux-3.2.52.patch
@@ -0,0 +1,5221 @@
+diff --git a/Makefile b/Makefile
+index 0f11936..1dd2c09 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
+index c898deb..189ed00 100644
+--- a/arch/arm/mach-versatile/pci.c
++++ b/arch/arm/mach-versatile/pci.c
+@@ -43,9 +43,9 @@
+ #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
+ #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
+ #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
+-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
+-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
+-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
+ #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
+
+ #define DEVICE_ID_OFFSET 0x00
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index fbdd12e..cc3f35d 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -98,6 +98,9 @@ void show_mem(unsigned int filter)
+ printk("Mem-info:\n");
+ show_free_areas(filter);
+
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
++
+ for_each_bank (i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
+index f114a3b..ce6e7a8 100644
+--- a/arch/ia64/mm/contig.c
++++ b/arch/ia64/mm/contig.c
+@@ -46,6 +46,8 @@ void show_mem(unsigned int filter)
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
+ printk(KERN_INFO "Node memory in pages:\n");
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
+index c641333..2230817 100644
+--- a/arch/ia64/mm/discontig.c
++++ b/arch/ia64/mm/discontig.c
+@@ -623,6 +623,8 @@ void show_mem(unsigned int filter)
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
+new file mode 100644
+index 0000000..40e02d9
+--- /dev/null
++++ b/arch/m68k/kernel/vmlinux-nommu.lds
+@@ -0,0 +1,93 @@
++/*
++ * vmlinux.lds.S -- master linker script for m68knommu arch
++ *
++ * (C) Copyright 2002-2012, Greg Ungerer <gerg@snapgear.com>
++ *
++ * This linker script is equipped to build either ROM loaded or RAM
++ * run kernels.
++ */
++
++#if defined(CONFIG_RAMKERNEL)
++#define KTEXT_ADDR CONFIG_KERNELBASE
++#endif
++#if defined(CONFIG_ROMKERNEL)
++#define KTEXT_ADDR CONFIG_ROMSTART
++#define KDATA_ADDR CONFIG_KERNELBASE
++#define LOAD_OFFSET KDATA_ADDR + (ADDR(.text) + SIZEOF(.text))
++#endif
++
++#include <asm/page.h>
++#include <asm/thread_info.h>
++#include <asm-generic/vmlinux.lds.h>
++
++OUTPUT_ARCH(m68k)
++ENTRY(_start)
++
++jiffies = jiffies_64 + 4;
++
++SECTIONS {
++
++#ifdef CONFIG_ROMVEC
++ . = CONFIG_ROMVEC;
++ .romvec : {
++ __rom_start = .;
++ _romvec = .;
++ *(.romvec)
++ *(.data..initvect)
++ }
++#endif
++
++ . = KTEXT_ADDR;
++
++ _text = .;
++ _stext = .;
++ .text : {
++ HEAD_TEXT
++ TEXT_TEXT
++ SCHED_TEXT
++ LOCK_TEXT
++ *(.fixup)
++ . = ALIGN(16);
++ }
++ _etext = .;
++
++#ifdef KDATA_ADDR
++ . = KDATA_ADDR;
++#endif
++
++ _sdata = .;
++ RO_DATA_SECTION(PAGE_SIZE)
++ RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
++ _edata = .;
++
++ EXCEPTION_TABLE(16)
++ NOTES
++
++ . = ALIGN(PAGE_SIZE);
++ __init_begin = .;
++ INIT_TEXT_SECTION(PAGE_SIZE)
++ INIT_DATA_SECTION(16)
++ PERCPU_SECTION(16)
++ .m68k_fixup : {
++ __start_fixup = .;
++ *(.m68k_fixup)
++ __stop_fixup = .;
++ }
++ .init.data : {
++ . = ALIGN(PAGE_SIZE);
++ __init_end = .;
++ }
++
++ _sbss = .;
++ BSS_SECTION(0, 0, 0)
++ _ebss = .;
++
++ _end = .;
++
++ STABS_DEBUG
++ .comment 0 : { *(.comment) }
++
++ /* Sections to be discarded */
++ DISCARDS
++}
++
+diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
+index 030dabf..69ec796 100644
+--- a/arch/m68k/kernel/vmlinux.lds.S
++++ b/arch/m68k/kernel/vmlinux.lds.S
+@@ -1,5 +1,14 @@
+-#ifdef CONFIG_MMU
+-#include "vmlinux.lds_mm.S"
++#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
++PHDRS
++{
++ text PT_LOAD FILEHDR PHDRS FLAGS (7);
++ data PT_LOAD FLAGS (7);
++}
++#ifdef CONFIG_SUN3
++#include "vmlinux-sun3.lds"
+ #else
+-#include "vmlinux.lds_no.S"
++#include "vmlinux-std.lds"
++#endif
++#else
++#include "vmlinux-nommu.lds"
+ #endif
+diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S
+deleted file mode 100644
+index 99ba315..0000000
+--- a/arch/m68k/kernel/vmlinux.lds_mm.S
++++ /dev/null
+@@ -1,10 +0,0 @@
+-PHDRS
+-{
+- text PT_LOAD FILEHDR PHDRS FLAGS (7);
+- data PT_LOAD FLAGS (7);
+-}
+-#ifdef CONFIG_SUN3
+-#include "vmlinux-sun3.lds"
+-#else
+-#include "vmlinux-std.lds"
+-#endif
+diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
+deleted file mode 100644
+index 4e23893..0000000
+--- a/arch/m68k/kernel/vmlinux.lds_no.S
++++ /dev/null
+@@ -1,187 +0,0 @@
+-/*
+- * vmlinux.lds.S -- master linker script for m68knommu arch
+- *
+- * (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
+- *
+- * This linker script is equipped to build either ROM loaded or RAM
+- * run kernels.
+- */
+-
+-#include <asm-generic/vmlinux.lds.h>
+-#include <asm/page.h>
+-#include <asm/thread_info.h>
+-
+-#if defined(CONFIG_RAMKERNEL)
+-#define RAM_START CONFIG_KERNELBASE
+-#define RAM_LENGTH (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
+-#define TEXT ram
+-#define DATA ram
+-#define INIT ram
+-#define BSSS ram
+-#endif
+-#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
+-#define RAM_START CONFIG_RAMBASE
+-#define RAM_LENGTH CONFIG_RAMSIZE
+-#define ROMVEC_START CONFIG_ROMVEC
+-#define ROMVEC_LENGTH CONFIG_ROMVECSIZE
+-#define ROM_START CONFIG_ROMSTART
+-#define ROM_LENGTH CONFIG_ROMSIZE
+-#define TEXT rom
+-#define DATA ram
+-#define INIT ram
+-#define BSSS ram
+-#endif
+-
+-#ifndef DATA_ADDR
+-#define DATA_ADDR
+-#endif
+-
+-
+-OUTPUT_ARCH(m68k)
+-ENTRY(_start)
+-
+-MEMORY {
+- ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
+-#ifdef ROM_START
+- romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
+- rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
+-#endif
+-}
+-
+-jiffies = jiffies_64 + 4;
+-
+-SECTIONS {
+-
+-#ifdef ROMVEC_START
+- . = ROMVEC_START ;
+- .romvec : {
+- __rom_start = . ;
+- _romvec = .;
+- *(.data..initvect)
+- } > romvec
+-#endif
+-
+- .text : {
+- _text = .;
+- _stext = . ;
+- HEAD_TEXT
+- TEXT_TEXT
+- SCHED_TEXT
+- LOCK_TEXT
+- *(.text..lock)
+-
+- . = ALIGN(16); /* Exception table */
+- __start___ex_table = .;
+- *(__ex_table)
+- __stop___ex_table = .;
+-
+- *(.rodata) *(.rodata.*)
+- *(__vermagic) /* Kernel version magic */
+- *(.rodata1)
+- *(.rodata.str1.1)
+-
+- /* Kernel symbol table: Normal symbols */
+- . = ALIGN(4);
+- __start___ksymtab = .;
+- *(SORT(___ksymtab+*))
+- __stop___ksymtab = .;
+-
+- /* Kernel symbol table: GPL-only symbols */
+- __start___ksymtab_gpl = .;
+- *(SORT(___ksymtab_gpl+*))
+- __stop___ksymtab_gpl = .;
+-
+- /* Kernel symbol table: Normal unused symbols */
+- __start___ksymtab_unused = .;
+- *(SORT(___ksymtab_unused+*))
+- __stop___ksymtab_unused = .;
+-
+- /* Kernel symbol table: GPL-only unused symbols */
+- __start___ksymtab_unused_gpl = .;
+- *(SORT(___ksymtab_unused_gpl+*))
+- __stop___ksymtab_unused_gpl = .;
+-
+- /* Kernel symbol table: GPL-future symbols */
+- __start___ksymtab_gpl_future = .;
+- *(SORT(___ksymtab_gpl_future+*))
+- __stop___ksymtab_gpl_future = .;
+-
+- /* Kernel symbol table: Normal symbols */
+- __start___kcrctab = .;
+- *(SORT(___kcrctab+*))
+- __stop___kcrctab = .;
+-
+- /* Kernel symbol table: GPL-only symbols */
+- __start___kcrctab_gpl = .;
+- *(SORT(___kcrctab_gpl+*))
+- __stop___kcrctab_gpl = .;
+-
+- /* Kernel symbol table: Normal unused symbols */
+- __start___kcrctab_unused = .;
+- *(SORT(___kcrctab_unused+*))
+- __stop___kcrctab_unused = .;
+-
+- /* Kernel symbol table: GPL-only unused symbols */
+- __start___kcrctab_unused_gpl = .;
+- *(SORT(___kcrctab_unused_gpl+*))
+- __stop___kcrctab_unused_gpl = .;
+-
+- /* Kernel symbol table: GPL-future symbols */
+- __start___kcrctab_gpl_future = .;
+- *(SORT(___kcrctab_gpl_future+*))
+- __stop___kcrctab_gpl_future = .;
+-
+- /* Kernel symbol table: strings */
+- *(__ksymtab_strings)
+-
+- /* Built-in module parameters */
+- . = ALIGN(4) ;
+- __start___param = .;
+- *(__param)
+- __stop___param = .;
+-
+- /* Built-in module versions */
+- . = ALIGN(4) ;
+- __start___modver = .;
+- *(__modver)
+- __stop___modver = .;
+-
+- . = ALIGN(4) ;
+- _etext = . ;
+- } > TEXT
+-
+- .data DATA_ADDR : {
+- . = ALIGN(4);
+- _sdata = . ;
+- DATA_DATA
+- CACHELINE_ALIGNED_DATA(32)
+- PAGE_ALIGNED_DATA(PAGE_SIZE)
+- *(.data..shared_aligned)
+- INIT_TASK_DATA(THREAD_SIZE)
+- _edata = . ;
+- } > DATA
+-
+- .init.text : {
+- . = ALIGN(PAGE_SIZE);
+- __init_begin = .;
+- } > INIT
+- INIT_TEXT_SECTION(PAGE_SIZE) > INIT
+- INIT_DATA_SECTION(16) > INIT
+- .init.data : {
+- . = ALIGN(PAGE_SIZE);
+- __init_end = .;
+- } > INIT
+-
+- .bss : {
+- . = ALIGN(4);
+- _sbss = . ;
+- *(.bss)
+- *(COMMON)
+- . = ALIGN(4) ;
+- _ebss = . ;
+- _end = . ;
+- } > BSSS
+-
+- DISCARDS
+-}
+-
+diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
+index 82f364e..0b62162 100644
+--- a/arch/parisc/mm/init.c
++++ b/arch/parisc/mm/init.c
+@@ -685,6 +685,8 @@ void show_mem(unsigned int filter)
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
+ #ifndef CONFIG_DISCONTIGMEM
+ i = max_mapnr;
+ while (i-- > 0) {
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index 8184ee9..3fcbae0 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
+ nb = aligninfo[instr].len;
+ flags = aligninfo[instr].flags;
+
++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
++ nb = 8;
++ flags = LD+SW;
++ } else if (IS_XFORM(instruction) &&
++ ((instruction >> 1) & 0x3ff) == 660) {
++ nb = 8;
++ flags = ST+SW;
++ }
++
+ /* Byteswap little endian loads and stores */
+ swiz = 0;
+ if (regs->msr & MSR_LE) {
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 0cfcf98..d0b205c 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -495,7 +495,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
+ /* number of bytes needed for the bitmap */
+ sz = (tbl->it_size + 7) >> 3;
+
+- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
++ page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
+ if (!page)
+ panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
+ tbl->it_map = page_address(page);
+diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
+index 826681d..26af24b 100644
+--- a/arch/powerpc/kernel/lparcfg.c
++++ b/arch/powerpc/kernel/lparcfg.c
+@@ -375,6 +375,7 @@ static void parse_system_parameter_string(struct seq_file *m)
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
+ memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
++ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
+ spin_unlock(&rtas_data_buf_lock);
+
+ if (call_status != 0) {
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index 55be64d..ca683a1 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -18,6 +18,7 @@
+ #include <asm/machdep.h>
+ #include <asm/smp.h>
+ #include <asm/pmc.h>
++#include <asm/firmware.h>
+
+ #include "cacheinfo.h"
+
+@@ -178,14 +179,24 @@ SYSFS_PMCSETUP(purr, SPRN_PURR);
+ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
+ SYSFS_PMCSETUP(dscr, SPRN_DSCR);
+
++/*
++ Lets only enable read for phyp resources and
++ enable write when needed with a separate function.
++ Lets be conservative and default to pseries.
++*/
+ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+ static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
+ static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
+-static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
++static SYSDEV_ATTR(purr, 0400, show_purr, store_purr);
+
+ unsigned long dscr_default = 0;
+ EXPORT_SYMBOL(dscr_default);
+
++static void add_write_permission_dev_attr(struct sysdev_attribute *attr)
++{
++ attr->attr.mode |= 0200;
++}
++
+ static ssize_t show_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+ {
+@@ -394,8 +405,11 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
+ if (cpu_has_feature(CPU_FTR_MMCRA))
+ sysdev_create_file(s, &attr_mmcra);
+
+- if (cpu_has_feature(CPU_FTR_PURR))
++ if (cpu_has_feature(CPU_FTR_PURR)) {
++ if (!firmware_has_feature(FW_FEATURE_LPAR))
++ add_write_permission_dev_attr(&attr_purr);
+ sysdev_create_file(s, &attr_purr);
++ }
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ sysdev_create_file(s, &attr_spurr);
+diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
+index 18245af..3cdbc64 100644
+--- a/arch/powerpc/lib/checksum_64.S
++++ b/arch/powerpc/lib/checksum_64.S
+@@ -229,19 +229,35 @@ _GLOBAL(csum_partial)
+ blr
+
+
+- .macro source
++ .macro srcnr
+ 100:
+ .section __ex_table,"a"
+ .align 3
+- .llong 100b,.Lsrc_error
++ .llong 100b,.Lsrc_error_nr
+ .previous
+ .endm
+
+- .macro dest
++ .macro source
++150:
++ .section __ex_table,"a"
++ .align 3
++ .llong 150b,.Lsrc_error
++ .previous
++ .endm
++
++ .macro dstnr
+ 200:
+ .section __ex_table,"a"
+ .align 3
+- .llong 200b,.Ldest_error
++ .llong 200b,.Ldest_error_nr
++ .previous
++ .endm
++
++ .macro dest
++250:
++ .section __ex_table,"a"
++ .align 3
++ .llong 250b,.Ldest_error
+ .previous
+ .endm
+
+@@ -272,16 +288,16 @@ _GLOBAL(csum_partial_copy_generic)
+ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
+ beq .Lcopy_aligned
+
+- li r7,4
+- sub r6,r7,r6
++ li r9,4
++ sub r6,r9,r6
+ mtctr r6
+
+ 1:
+-source; lhz r6,0(r3) /* align to doubleword */
++srcnr; lhz r6,0(r3) /* align to doubleword */
+ subi r5,r5,2
+ addi r3,r3,2
+ adde r0,r0,r6
+-dest; sth r6,0(r4)
++dstnr; sth r6,0(r4)
+ addi r4,r4,2
+ bdnz 1b
+
+@@ -395,10 +411,10 @@ dest; std r16,56(r4)
+
+ mtctr r6
+ 3:
+-source; ld r6,0(r3)
++srcnr; ld r6,0(r3)
+ addi r3,r3,8
+ adde r0,r0,r6
+-dest; std r6,0(r4)
++dstnr; std r6,0(r4)
+ addi r4,r4,8
+ bdnz 3b
+
+@@ -408,10 +424,10 @@ dest; std r6,0(r4)
+ srdi. r6,r5,2
+ beq .Lcopy_tail_halfword
+
+-source; lwz r6,0(r3)
++srcnr; lwz r6,0(r3)
+ addi r3,r3,4
+ adde r0,r0,r6
+-dest; stw r6,0(r4)
++dstnr; stw r6,0(r4)
+ addi r4,r4,4
+ subi r5,r5,4
+
+@@ -419,10 +435,10 @@ dest; stw r6,0(r4)
+ srdi. r6,r5,1
+ beq .Lcopy_tail_byte
+
+-source; lhz r6,0(r3)
++srcnr; lhz r6,0(r3)
+ addi r3,r3,2
+ adde r0,r0,r6
+-dest; sth r6,0(r4)
++dstnr; sth r6,0(r4)
+ addi r4,r4,2
+ subi r5,r5,2
+
+@@ -430,10 +446,10 @@ dest; sth r6,0(r4)
+ andi. r6,r5,1
+ beq .Lcopy_finish
+
+-source; lbz r6,0(r3)
++srcnr; lbz r6,0(r3)
+ sldi r9,r6,8 /* Pad the byte out to 16 bits */
+ adde r0,r0,r9
+-dest; stb r6,0(r4)
++dstnr; stb r6,0(r4)
+
+ .Lcopy_finish:
+ addze r0,r0 /* add in final carry */
+@@ -443,6 +459,11 @@ dest; stb r6,0(r4)
+ blr
+
+ .Lsrc_error:
++ ld r14,STK_REG(r14)(r1)
++ ld r15,STK_REG(r15)(r1)
++ ld r16,STK_REG(r16)(r1)
++ addi r1,r1,STACKFRAMESIZE
++.Lsrc_error_nr:
+ cmpdi 0,r7,0
+ beqlr
+ li r6,-EFAULT
+@@ -450,6 +471,11 @@ dest; stb r6,0(r4)
+ blr
+
+ .Ldest_error:
++ ld r14,STK_REG(r14)(r1)
++ ld r15,STK_REG(r15)(r1)
++ ld r16,STK_REG(r16)(r1)
++ addi r1,r1,STACKFRAMESIZE
++.Ldest_error_nr:
+ cmpdi 0,r8,0
+ beqlr
+ li r6,-EFAULT
+diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
+index f445e98..cfabc3d 100644
+--- a/arch/sparc/kernel/entry.S
++++ b/arch/sparc/kernel/entry.S
+@@ -1177,7 +1177,7 @@ sys_sigreturn:
+ nop
+
+ call syscall_trace
+- nop
++ mov 1, %o1
+
+ 1:
+ /* We don't want to muck with user registers like a
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
+index 79f3103..7c00735 100644
+--- a/arch/sparc/kernel/ktlb.S
++++ b/arch/sparc/kernel/ktlb.S
+@@ -25,11 +25,10 @@ kvmap_itlb:
+ */
+ kvmap_itlb_4v:
+
+-kvmap_itlb_nonlinear:
+ /* Catch kernel NULL pointer calls. */
+ sethi %hi(PAGE_SIZE), %g5
+ cmp %g4, %g5
+- bleu,pn %xcc, kvmap_dtlb_longpath
++ blu,pn %xcc, kvmap_itlb_longpath
+ nop
+
+ KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 7f5f65d..817187d 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -147,7 +147,7 @@ linux_syscall_trace32:
+ srl %i4, 0, %o4
+ srl %i1, 0, %o1
+ srl %i2, 0, %o2
+- ba,pt %xcc, 2f
++ ba,pt %xcc, 5f
+ srl %i3, 0, %o3
+
+ linux_syscall_trace:
+@@ -177,13 +177,13 @@ linux_sparc_syscall32:
+ srl %i1, 0, %o1 ! IEU0 Group
+ ldx [%g6 + TI_FLAGS], %l0 ! Load
+
+- srl %i5, 0, %o5 ! IEU1
++ srl %i3, 0, %o3 ! IEU0
+ srl %i2, 0, %o2 ! IEU0 Group
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+- call %l7 ! CTI Group brk forced
+- srl %i3, 0, %o3 ! IEU0
++5: call %l7 ! CTI Group brk forced
++ srl %i5, 0, %o5 ! IEU1
+ ba,a,pt %xcc, 3f
+
+ /* Linux native system calls enter here... */
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index da1b781..8fa84a3 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -131,7 +131,6 @@ startup_continue:
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+- add %l6, 1, %l6
+
+ mov 15, %l7
+ BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
+@@ -224,7 +223,6 @@ niagara_lock_tlb:
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+- add %l6, 1, %l6
+
+ 1:
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 1b30bb3..fbb8005 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -131,15 +131,6 @@ EXPORT_SYMBOL(___copy_from_user);
+ EXPORT_SYMBOL(___copy_in_user);
+ EXPORT_SYMBOL(__clear_user);
+
+-/* RW semaphores */
+-EXPORT_SYMBOL(__down_read);
+-EXPORT_SYMBOL(__down_read_trylock);
+-EXPORT_SYMBOL(__down_write);
+-EXPORT_SYMBOL(__down_write_trylock);
+-EXPORT_SYMBOL(__up_read);
+-EXPORT_SYMBOL(__up_write);
+-EXPORT_SYMBOL(__downgrade_write);
+-
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
+ EXPORT_SYMBOL(atomic_add_ret);
+diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
+index 3b379cd..d1af4ed 100644
+--- a/arch/unicore32/mm/init.c
++++ b/arch/unicore32/mm/init.c
+@@ -65,6 +65,9 @@ void show_mem(unsigned int filter)
+ printk(KERN_DEFAULT "Mem-info:\n");
+ show_free_areas(filter);
+
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
++
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 47f4e5f..a4e1b4b 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -468,6 +468,22 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ },
+ },
++ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
++ .callback = set_pci_reboot,
++ .ident = "Dell PowerEdge C6100",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++ },
++ },
++ { /* Some C6100 machines were shipped with vendor being 'Dell'. */
++ .callback = set_pci_reboot,
++ .ident = "Dell PowerEdge C6100",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++ },
++ },
+ { }
+ };
+
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index f9537e3..a18d20d 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -703,10 +703,13 @@ void __init efi_enter_virtual_mode(void)
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+- md->type != EFI_BOOT_SERVICES_CODE &&
+- md->type != EFI_BOOT_SERVICES_DATA)
+- continue;
++ if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
++#ifdef CONFIG_X86_64
++ if (md->type != EFI_BOOT_SERVICES_CODE &&
++ md->type != EFI_BOOT_SERVICES_DATA)
++#endif
++ continue;
++ }
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+diff --git a/crypto/api.c b/crypto/api.c
+index 033a714..cea3cf6 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+ BLOCKING_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
++
+ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+ {
+ atomic_inc(&alg->cra_refcnt);
+@@ -150,8 +152,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
+ }
+ up_write(&crypto_alg_sem);
+
+- if (alg != &larval->alg)
++ if (alg != &larval->alg) {
+ kfree(larval);
++ if (crypto_is_larval(alg))
++ alg = crypto_larval_wait(alg);
++ }
+
+ return alg;
+ }
+diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
+index f40acef..a6977e1 100644
+--- a/drivers/acpi/acpi_ipmi.c
++++ b/drivers/acpi/acpi_ipmi.c
+@@ -39,6 +39,7 @@
+ #include <linux/ipmi.h>
+ #include <linux/device.h>
+ #include <linux/pnp.h>
++#include <linux/spinlock.h>
+
+ MODULE_AUTHOR("Zhao Yakui");
+ MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
+ struct list_head head;
+ /* the IPMI request message list */
+ struct list_head tx_msg_list;
+- struct mutex tx_msg_lock;
++ spinlock_t tx_msg_lock;
+ acpi_handle handle;
+ struct pnp_dev *pnp_dev;
+ ipmi_user_t user_interface;
+@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+ struct kernel_ipmi_msg *msg;
+ struct acpi_ipmi_buffer *buffer;
+ struct acpi_ipmi_device *device;
++ unsigned long flags;
+
+ msg = &tx_msg->tx_message;
+ /*
+@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+
+ /* Get the msgid */
+ device = tx_msg->device;
+- mutex_lock(&device->tx_msg_lock);
++ spin_lock_irqsave(&device->tx_msg_lock, flags);
+ device->curr_msgid++;
+ tx_msg->tx_msgid = device->curr_msgid;
+- mutex_unlock(&device->tx_msg_lock);
++ spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+ }
+
+ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+ int msg_found = 0;
+ struct acpi_ipmi_msg *tx_msg;
+ struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
++ unsigned long flags;
+
+ if (msg->user != ipmi_device->user_interface) {
+ dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+ ipmi_free_recv_msg(msg);
+ return;
+ }
+- mutex_lock(&ipmi_device->tx_msg_lock);
++ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+ list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+ if (msg->msgid == tx_msg->tx_msgid) {
+ msg_found = 1;
+@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+ }
+ }
+
+- mutex_unlock(&ipmi_device->tx_msg_lock);
++ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ if (!msg_found) {
+ dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+ "returned.\n", msg->msgid);
+@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ struct acpi_ipmi_device *ipmi_device = handler_context;
+ int err, rem_time;
+ acpi_status status;
++ unsigned long flags;
+ /*
+ * IPMI opregion message.
+ * IPMI message is firstly written to the BMC and system software
+@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ return AE_NO_MEMORY;
+
+ acpi_format_ipmi_msg(tx_msg, address, value);
+- mutex_lock(&ipmi_device->tx_msg_lock);
++ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+ list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+- mutex_unlock(&ipmi_device->tx_msg_lock);
++ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ err = ipmi_request_settime(ipmi_device->user_interface,
+ &tx_msg->addr,
+ tx_msg->tx_msgid,
+@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ status = AE_OK;
+
+ end_label:
+- mutex_lock(&ipmi_device->tx_msg_lock);
++ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+ list_del(&tx_msg->head);
+- mutex_unlock(&ipmi_device->tx_msg_lock);
++ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ kfree(tx_msg);
+ return status;
+ }
+@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+
+ INIT_LIST_HEAD(&ipmi_device->head);
+
+- mutex_init(&ipmi_device->tx_msg_lock);
++ spin_lock_init(&ipmi_device->tx_msg_lock);
+ INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+ ipmi_install_space_handler(ipmi_device);
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 51de186..8176b82 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -964,6 +964,10 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ ec_enlarge_storm_threshold, "CLEVO hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
++ {
++ ec_validate_ecdt, "ASUS hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {},
+ };
+
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index d3446f6..d7ad865 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1186,6 +1186,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ int err;
+ u32 cp;
+
++ memset(&arg64, 0, sizeof(arg64));
+ err = 0;
+ err |=
+ copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
+index 9125bbe..504bc16 100644
+--- a/drivers/block/cpqarray.c
++++ b/drivers/block/cpqarray.c
+@@ -1195,6 +1195,7 @@ out_passthru:
+ ida_pci_info_struct pciinfo;
+
+ if (!arg) return -EINVAL;
++ memset(&pciinfo, 0, sizeof(pciinfo));
+ pciinfo.bus = host->pci_dev->bus->number;
+ pciinfo.dev_fn = host->pci_dev->devfn;
+ pciinfo.board_id = host->board_id;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index bde72f7..3539f9b 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -84,6 +84,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
++ { USB_DEVICE(0x0CF3, 0xE005) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+@@ -125,6 +126,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1bd3924..f18b5a2 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -108,6 +108,7 @@ static struct usb_device_id btusb_table[] = {
+
+ /* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0b05, 0x17b5) },
++ { USB_DEVICE(0x0b05, 0x17cb) },
+ { USB_DEVICE(0x04ca, 0x2003) },
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x413c, 0x8197) },
+@@ -154,6 +155,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 7211f67..72f460e 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -125,6 +125,9 @@ static struct edid_quirk {
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
++
++ /* Medion MD 30217 PG */
++ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ };
+
+ /*** DDC fetch and block validation ***/
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index a07ccab..72163e8 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -621,7 +621,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+- udelay(100);
++ /*
++ * For now, just give more slack to branch devices. We
++ * could check the DPCD for I2C bit rate capabilities,
++ * and if available, adjust the interval. We could also
++ * be more careful with DP-to-Legacy adapters where a
++ * long legacy cable may force very low I2C bit rates.
++ */
++ if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
++ DP_DWN_STRM_PORT_PRESENT)
++ usleep_range(500, 600);
++ else
++ usleep_range(300, 400);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+index cffb007..356a252 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -161,7 +161,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
+- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++ asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index f0dc04b..3171294 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1385,8 +1385,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
++ /* some dce3.x boards have a bug in their transmitter control table.
++ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
++ * does the same thing and more.
++ */
++ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
++ (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index f5962a0..a68057a 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -501,7 +501,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+ {
+- u32 tmp;
++ u32 tmp, buffer_alloc, i;
++ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+@@ -524,18 +525,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+- if (other_mode)
++ if (other_mode) {
+ tmp = 0; /* 1/2 */
+- else
++ buffer_alloc = 1;
++ } else {
+ tmp = 2; /* whole */
+- } else
++ buffer_alloc = 2;
++ }
++ } else {
+ tmp = 0;
++ buffer_alloc = 0;
++ }
+
+ /* second controller of the pair uses second half of the lb */
+ if (radeon_crtc->crtc_id % 2)
+ tmp += 4;
+ WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
++ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
++ DMIF_BUFFERS_ALLOCATED_COMPLETED)
++ break;
++ udelay(1);
++ }
++ }
++
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index fe44a95..47f3bd2 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -459,6 +459,10 @@
+ # define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+ # define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
++#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
++# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
++# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
++
+ #define IH_RB_CNTL 0x3e00
+ # define IH_RB_ENABLE (1 << 0)
+ # define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 383b38e..cda89c6b 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -709,13 +709,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
++ u8 *num_dst_objs = (u8 *)
++ ((u8 *)router_src_dst_table + 1 +
++ (router_src_dst_table->ucNumberOfSrc * 2));
++ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
+ int enum_id;
+
+ router.router_id = router_obj_id;
+- for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
+- enum_id++) {
++ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+- le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
++ le16_to_cpu(dst_objs[enum_id]))
+ break;
+ }
+
+@@ -1616,7 +1619,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ kfree(edid);
+ }
+ }
+- record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
++ record += fake_edid_record->ucFakeEDIDLength ?
++ fake_edid_record->ucFakeEDIDLength + 2 :
++ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 6fd53b6..b101843 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1387,6 +1387,24 @@ struct drm_connector_funcs radeon_dp_connector_funcs = {
+ .force = radeon_dvi_force,
+ };
+
++static const struct drm_connector_funcs radeon_edp_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = radeon_dp_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = radeon_lvds_set_property,
++ .destroy = radeon_dp_connector_destroy,
++ .force = radeon_dvi_force,
++};
++
++static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = radeon_dp_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = radeon_lvds_set_property,
++ .destroy = radeon_dp_connector_destroy,
++ .force = radeon_dvi_force,
++};
++
+ void
+ radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+@@ -1478,8 +1496,6 @@ radeon_add_atom_connector(struct drm_device *dev,
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+@@ -1496,6 +1512,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ default:
++ drm_connector_init(dev, &radeon_connector->base,
++ &radeon_dp_connector_funcs, connector_type);
++ drm_connector_helper_add(&radeon_connector->base,
++ &radeon_dp_connector_helper_funcs);
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ radeon_connector->dac_load_detect = true;
+@@ -1508,6 +1528,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ case DRM_MODE_CONNECTOR_DisplayPort:
++ drm_connector_init(dev, &radeon_connector->base,
++ &radeon_dp_connector_funcs, connector_type);
++ drm_connector_helper_add(&radeon_connector->base,
++ &radeon_dp_connector_helper_funcs);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+@@ -1532,6 +1556,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
++ drm_connector_init(dev, &radeon_connector->base,
++ &radeon_lvds_bridge_connector_funcs, connector_type);
++ drm_connector_helper_add(&radeon_connector->base,
++ &radeon_dp_connector_helper_funcs);
+ drm_connector_attach_property(&radeon_connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+@@ -1695,7 +1723,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
++ drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index cd94abb..8cde84b 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -818,10 +818,16 @@ int radeon_device_init(struct radeon_device *rdev,
+ return r;
+ }
+ if (radeon_testing) {
+- radeon_test_moves(rdev);
++ if (rdev->accel_working)
++ radeon_test_moves(rdev);
++ else
++ DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
+ }
+ if (radeon_benchmarking) {
+- radeon_benchmark(rdev, radeon_benchmarking);
++ if (rdev->accel_working)
++ radeon_benchmark(rdev, radeon_benchmarking);
++ else
++ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
+ }
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index 4dd9512..c087434 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
+ /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+ * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+- WREG32_MC(RS480_MC_MISC_CNTL,
+- (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
++ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
++ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
++ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+ } else {
+- WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
++ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
++ tmp |= RS480_GART_INDEX_REG_EN;
++ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+ }
+ /* Enable gart */
+ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 611aafc..9ac4389 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -59,6 +59,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
+ struct hid_report_enum *report_enum = device->report_enum + type;
+ struct hid_report *report;
+
++ if (id >= HID_MAX_IDS)
++ return NULL;
+ if (report_enum->report_id_hash[id])
+ return report_enum->report_id_hash[id];
+
+@@ -216,9 +218,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ {
+ struct hid_report *report;
+ struct hid_field *field;
+- int usages;
++ unsigned usages;
+ unsigned offset;
+- int i;
++ unsigned i;
+
+ report = hid_register_report(parser->device, report_type, parser->global.report_id);
+ if (!report) {
+@@ -237,7 +239,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ if (!parser->local.usage_index) /* Ignore padding fields */
+ return 0;
+
+- usages = max_t(int, parser->local.usage_index, parser->global.report_count);
++ usages = max_t(unsigned, parser->local.usage_index,
++ parser->global.report_count);
+
+ field = hid_register_field(report, usages, parser->global.report_count);
+ if (!field)
+@@ -248,7 +251,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
+
+ for (i = 0; i < usages; i++) {
+- int j = i;
++ unsigned j = i;
+ /* Duplicate the last usage we parsed if we have excess values */
+ if (i >= parser->local.usage_index)
+ j = parser->local.usage_index - 1;
+@@ -380,8 +383,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+
+ case HID_GLOBAL_ITEM_TAG_REPORT_ID:
+ parser->global.report_id = item_udata(item);
+- if (parser->global.report_id == 0) {
+- dbg_hid("report_id 0 is invalid\n");
++ if (parser->global.report_id == 0 ||
++ parser->global.report_id >= HID_MAX_IDS) {
++ dbg_hid("report_id %u is invalid\n",
++ parser->global.report_id);
+ return -1;
+ }
+ return 0;
+@@ -552,7 +557,7 @@ static void hid_device_release(struct device *dev)
+ for (i = 0; i < HID_REPORT_TYPES; i++) {
+ struct hid_report_enum *report_enum = device->report_enum + i;
+
+- for (j = 0; j < 256; j++) {
++ for (j = 0; j < HID_MAX_IDS; j++) {
+ struct hid_report *report = report_enum->report_id_hash[j];
+ if (report)
+ hid_free_report(report);
+@@ -710,6 +715,64 @@ err:
+ }
+ EXPORT_SYMBOL_GPL(hid_parse_report);
+
++static const char * const hid_report_names[] = {
++ "HID_INPUT_REPORT",
++ "HID_OUTPUT_REPORT",
++ "HID_FEATURE_REPORT",
++};
++/**
++ * hid_validate_values - validate existing device report's value indexes
++ *
++ * @device: hid device
++ * @type: which report type to examine
++ * @id: which report ID to examine (0 for first)
++ * @field_index: which report field to examine
++ * @report_counts: expected number of values
++ *
++ * Validate the number of values in a given field of a given report, after
++ * parsing.
++ */
++struct hid_report *hid_validate_values(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int field_index,
++ unsigned int report_counts)
++{
++ struct hid_report *report;
++
++ if (type > HID_FEATURE_REPORT) {
++ hid_err(hid, "invalid HID report type %u\n", type);
++ return NULL;
++ }
++
++ if (id >= HID_MAX_IDS) {
++ hid_err(hid, "invalid HID report id %u\n", id);
++ return NULL;
++ }
++
++ /*
++ * Explicitly not using hid_get_report() here since it depends on
++ * ->numbered being checked, which may not always be the case when
++ * drivers go to access report values.
++ */
++ report = hid->report_enum[type].report_id_hash[id];
++ if (!report) {
++ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->maxfield <= field_index) {
++ hid_err(hid, "not enough fields in %s %u\n",
++ hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->field[field_index]->report_count < report_counts) {
++ hid_err(hid, "not enough values in %s %u field %u\n",
++ hid_report_names[type], id, field_index);
++ return NULL;
++ }
++ return report;
++}
++EXPORT_SYMBOL_GPL(hid_validate_values);
++
+ /*
+ * Convert a signed n-bit integer to signed 32-bit integer. Common
+ * cases are done through the compiler, the screwed things has to be
+@@ -990,7 +1053,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
+
+ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
+ {
+- unsigned size = field->report_size;
++ unsigned size;
++
++ if (!field)
++ return -1;
++
++ size = field->report_size;
+
+ hid_dump_input(field->report->device, field->usage + offset, value);
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 08075f2..ca2b3e6 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -581,6 +581,7 @@
+ #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
+ #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
+ #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
+
+ #define USB_VENDOR_ID_ONTRAK 0x0a07
+ #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index f333139..95c79a3 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -284,6 +284,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ if (field->flags & HID_MAIN_ITEM_CONSTANT)
+ goto ignore;
+
++ /* Ignore if report count is out of bounds. */
++ if (field->report_count < 1)
++ goto ignore;
++
+ /* only LED usages are supported in output fields */
+ if (field->report_type == HID_OUTPUT_REPORT &&
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
+@@ -887,10 +891,15 @@ static void report_features(struct hid_device *hid)
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list)
+- for (i = 0; i < rep->maxfield; i++)
++ for (i = 0; i < rep->maxfield; i++) {
++ /* Ignore if report count is out of bounds. */
++ if (rep->field[i]->report_count < 1)
++ continue;
++
+ for (j = 0; j < rep->field[i]->maxusage; j++)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
++ }
+ }
+
+ /*
+diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
+index 3c31bc6..128f011 100644
+--- a/drivers/hid/hid-lg2ff.c
++++ b/drivers/hid/hid-lg2ff.c
+@@ -66,26 +66,13 @@ int lg2ff_init(struct hid_device *hid)
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+- struct list_head *report_list =
+- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+- if (list_empty(report_list)) {
+- hid_err(hid, "no output report found\n");
++ /* Check that the report looks ok */
++ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
++ if (!report)
+ return -ENODEV;
+- }
+-
+- report = list_entry(report_list->next, struct hid_report, list);
+-
+- if (report->maxfield < 1) {
+- hid_err(hid, "output report is empty\n");
+- return -ENODEV;
+- }
+- if (report->field[0]->report_count < 7) {
+- hid_err(hid, "not enough values in the field\n");
+- return -ENODEV;
+- }
+
+ lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
+ if (!lg2ff)
+diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
+index f98644c..91f981f 100644
+--- a/drivers/hid/hid-lg3ff.c
++++ b/drivers/hid/hid-lg3ff.c
+@@ -68,10 +68,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
+ int x, y;
+
+ /*
+- * Maxusage should always be 63 (maximum fields)
+- * likely a better way to ensure this data is clean
++ * Available values in the field should always be 63, but we only use up to
++ * 35. Instead, clear the entire area, however big it is.
+ */
+- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
++ memset(report->field[0]->value, 0,
++ sizeof(__s32) * report->field[0]->report_count);
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+@@ -131,32 +132,14 @@ static const signed short ff3_joystick_ac[] = {
+ int lg3ff_init(struct hid_device *hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ const signed short *ff_bits = ff3_joystick_ac;
+ int error;
+ int i;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- if (!report) {
+- hid_err(hid, "NULL output report\n");
+- return -1;
+- }
+-
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
++ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
++ return -ENODEV;
+
+ /* Assume single fixed device G940 */
+ for (i = 0; ff_bits[i] >= 0; i++)
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 103f30d..5c6bf4b 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -339,33 +339,15 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
+ int lg4ff_init(struct hid_device *hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ struct lg4ff_device_entry *entry;
+ struct usb_device_descriptor *udesc;
+ int error, i, j;
+ __u16 bcdDevice, rev_maj, rev_min;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- if (!report) {
+- hid_err(hid, "NULL output report\n");
++ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -1;
+- }
+-
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
+
+ /* Check what wheel has been connected */
+ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
+index 27bc54f..1d978daa 100644
+--- a/drivers/hid/hid-lgff.c
++++ b/drivers/hid/hid-lgff.c
+@@ -130,27 +130,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
+ int lgff_init(struct hid_device* hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ const signed short *ff_bits = ff_joystick;
+ int error;
+ int i;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
++ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
++ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(devices); i++) {
+ if (dev->id.vendor == devices[i].idVendor &&
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 8821ecc..828a0dd 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -791,6 +791,12 @@ static int logi_dj_probe(struct hid_device *hdev,
+ goto hid_parse_fail;
+ }
+
++ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
++ 0, DJREPORT_SHORT_LENGTH - 1)) {
++ retval = -ENODEV;
++ goto hid_parse_fail;
++ }
++
+ /* Starts the usb device and connects to upper interfaces hiddev and
+ * hidraw */
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
+index 9fae2eb..48cba85 100644
+--- a/drivers/hid/hid-ntrig.c
++++ b/drivers/hid/hid-ntrig.c
+@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
+ struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
+ report_id_hash[0x0d];
+
+- if (!report)
++ if (!report || report->maxfield < 1 ||
++ report->field[0]->report_count < 1)
+ return -EINVAL;
+
+ usbhid_submit_report(hdev, report, USB_DIR_IN);
+diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
+index 01e7d2c..1daeaca 100644
+--- a/drivers/hid/hid-picolcd.c
++++ b/drivers/hid/hid-picolcd.c
+@@ -1424,7 +1424,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
+ buf += 10;
+ cnt -= 10;
+ }
+- if (!report)
++ if (!report || report->maxfield != 1)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
+index 070f93a..12786cd 100644
+--- a/drivers/hid/hid-pl.c
++++ b/drivers/hid/hid-pl.c
+@@ -129,8 +129,14 @@ static int plff_init(struct hid_device *hid)
+ strong = &report->field[0]->value[2];
+ weak = &report->field[0]->value[3];
+ debug("detected single-field device");
+- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
+- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
++ } else if (report->field[0]->maxusage == 1 &&
++ report->field[0]->usage[0].hid ==
++ (HID_UP_LED | 0x43) &&
++ report->maxfield >= 4 &&
++ report->field[0]->report_count >= 1 &&
++ report->field[1]->report_count >= 1 &&
++ report->field[2]->report_count >= 1 &&
++ report->field[3]->report_count >= 1) {
+ report->field[0]->value[0] = 0x00;
+ report->field[1]->value[0] = 0x00;
+ strong = &report->field[2]->value[0];
+diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
+index 6020137..2b03c9b 100644
+--- a/drivers/hid/hid-speedlink.c
++++ b/drivers/hid/hid-speedlink.c
+@@ -3,7 +3,7 @@
+ * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
+ * the HID descriptor.
+ *
+- * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
++ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@stefankriwanek.de>
+ */
+
+ /*
+@@ -48,8 +48,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+ {
+ /* No other conditions due to usage_table. */
+- /* Fix "jumpy" cursor (invalid events sent by device). */
+- if (value == 256)
++
++ /* This fixes the "jumpy" cursor occuring due to invalid events sent
++ * by the device. Some devices only send them with value==+256, others
++ * don't. However, catching abs(value)>=256 is restrictive enough not
++ * to interfere with devices that were bug-free (has been tested).
++ */
++ if (abs(value) >= 256)
+ return 1;
+ /* Drop useless distance 0 events (on button clicks etc.) as well */
+ if (value == 0)
+diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
+index f6ba81d..f348f7f 100644
+--- a/drivers/hid/hid-zpff.c
++++ b/drivers/hid/hid-zpff.c
+@@ -70,21 +70,13 @@ static int zpff_init(struct hid_device *hid)
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+- struct list_head *report_list =
+- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- int error;
++ int i, error;
+
+- if (list_empty(report_list)) {
+- hid_err(hid, "no output report found\n");
+- return -ENODEV;
+- }
+-
+- report = list_entry(report_list->next, struct hid_report, list);
+-
+- if (report->maxfield < 4) {
+- hid_err(hid, "not enough fields in report\n");
+- return -ENODEV;
++ for (i = 0; i < 4; i++) {
++ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
++ if (!report)
++ return -ENODEV;
+ }
+
+ zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 17d15bb..9e50f61 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -42,7 +42,6 @@ static struct cdev hidraw_cdev;
+ static struct class *hidraw_class;
+ static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
+ static DEFINE_MUTEX(minors_lock);
+-static void drop_ref(struct hidraw *hid, int exists_bit);
+
+ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+ {
+@@ -296,14 +295,37 @@ out:
+
+ }
+
++static void drop_ref(struct hidraw *hidraw, int exists_bit)
++{
++ if (exists_bit) {
++ hid_hw_close(hidraw->hid);
++ hidraw->exist = 0;
++ if (hidraw->open)
++ wake_up_interruptible(&hidraw->wait);
++ } else {
++ --hidraw->open;
++ }
++
++ if (!hidraw->open && !hidraw->exist) {
++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
++ hidraw_table[hidraw->minor] = NULL;
++ kfree(hidraw);
++ }
++}
++
+ static int hidraw_release(struct inode * inode, struct file * file)
+ {
+ unsigned int minor = iminor(inode);
+ struct hidraw_list *list = file->private_data;
+
+- drop_ref(hidraw_table[minor], 0);
++ mutex_lock(&minors_lock);
++
+ list_del(&list->node);
+ kfree(list);
++
++ drop_ref(hidraw_table[minor], 0);
++
++ mutex_unlock(&minors_lock);
+ return 0;
+ }
+
+@@ -506,7 +528,12 @@ EXPORT_SYMBOL_GPL(hidraw_connect);
+ void hidraw_disconnect(struct hid_device *hid)
+ {
+ struct hidraw *hidraw = hid->hidraw;
++
++ mutex_lock(&minors_lock);
++
+ drop_ref(hidraw, 1);
++
++ mutex_unlock(&minors_lock);
+ }
+ EXPORT_SYMBOL_GPL(hidraw_disconnect);
+
+@@ -555,23 +582,3 @@ void hidraw_exit(void)
+ unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
+
+ }
+-
+-static void drop_ref(struct hidraw *hidraw, int exists_bit)
+-{
+- mutex_lock(&minors_lock);
+- if (exists_bit) {
+- hid_hw_close(hidraw->hid);
+- hidraw->exist = 0;
+- if (hidraw->open)
+- wake_up_interruptible(&hidraw->wait);
+- } else {
+- --hidraw->open;
+- }
+-
+- if (!hidraw->open && !hidraw->exist) {
+- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+- hidraw_table[hidraw->minor] = NULL;
+- kfree(hidraw);
+- }
+- mutex_unlock(&minors_lock);
+-}
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 96a1e0f..f98fbad 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -99,6 +99,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
++
+ { 0, 0 }
+ };
+
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index d99aa84..30cac58 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -344,8 +344,10 @@ static int applesmc_get_lower_bound(unsigned int *lo, const char *key)
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+- if (IS_ERR(entry))
++ if (IS_ERR(entry)) {
++ *lo = 0;
+ return PTR_ERR(entry);
++ }
+ if (strcmp(entry->key, key) < 0)
+ begin = middle + 1;
+ else
+@@ -364,8 +366,10 @@ static int applesmc_get_upper_bound(unsigned int *hi, const char *key)
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+- if (IS_ERR(entry))
++ if (IS_ERR(entry)) {
++ *hi = smcreg.key_count;
+ return PTR_ERR(entry);
++ }
+ if (strcmp(key, entry->key) < 0)
+ end = middle;
+ else
+@@ -485,16 +489,25 @@ static int applesmc_init_smcreg_try(void)
+ {
+ struct applesmc_registers *s = &smcreg;
+ bool left_light_sensor, right_light_sensor;
++ unsigned int count;
+ u8 tmp[1];
+ int ret;
+
+ if (s->init_complete)
+ return 0;
+
+- ret = read_register_count(&s->key_count);
++ ret = read_register_count(&count);
+ if (ret)
+ return ret;
+
++ if (s->cache && s->key_count != count) {
++ pr_warn("key count changed from %d to %d\n",
++ s->key_count, count);
++ kfree(s->cache);
++ s->cache = NULL;
++ }
++ s->key_count = count;
++
+ if (!s->cache)
+ s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
+ if (!s->cache)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index f44a067..b4a4aaf 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -861,56 +861,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
+ return order;
+ }
+
++static void dma_pte_free_level(struct dmar_domain *domain, int level,
++ struct dma_pte *pte, unsigned long pfn,
++ unsigned long start_pfn, unsigned long last_pfn)
++{
++ pfn = max(start_pfn, pfn);
++ pte = &pte[pfn_level_offset(pfn, level)];
++
++ do {
++ unsigned long level_pfn;
++ struct dma_pte *level_pte;
++
++ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
++ goto next;
++
++ level_pfn = pfn & level_mask(level - 1);
++ level_pte = phys_to_virt(dma_pte_addr(pte));
++
++ if (level > 2)
++ dma_pte_free_level(domain, level - 1, level_pte,
++ level_pfn, start_pfn, last_pfn);
++
++ /* If range covers entire pagetable, free it */
++ if (!(start_pfn > level_pfn ||
++ last_pfn < level_pfn + level_size(level))) {
++ dma_clear_pte(pte);
++ domain_flush_cache(domain, pte, sizeof(*pte));
++ free_pgtable_page(level_pte);
++ }
++next:
++ pfn += level_size(level);
++ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
++}
++
+ /* free page table pages. last level pte should already be cleared */
+ static void dma_pte_free_pagetable(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
+ {
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+- struct dma_pte *first_pte, *pte;
+- int total = agaw_to_level(domain->agaw);
+- int level;
+- unsigned long tmp;
+- int large_page = 2;
+
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
+
+ /* We don't need lock here; nobody else touches the iova range */
+- level = 2;
+- while (level <= total) {
+- tmp = align_to_level(start_pfn, level);
+-
+- /* If we can't even clear one PTE at this level, we're done */
+- if (tmp + level_size(level) - 1 > last_pfn)
+- return;
+-
+- do {
+- large_page = level;
+- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
+- if (large_page > level)
+- level = large_page + 1;
+- if (!pte) {
+- tmp = align_to_level(tmp + 1, level + 1);
+- continue;
+- }
+- do {
+- if (dma_pte_present(pte)) {
+- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+- dma_clear_pte(pte);
+- }
+- pte++;
+- tmp += level_size(level);
+- } while (!first_pte_in_page(pte) &&
+- tmp + level_size(level) - 1 <= last_pfn);
++ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
++ domain->pgd, 0, start_pfn, last_pfn);
+
+- domain_flush_cache(domain, first_pte,
+- (void *)pte - (void *)first_pte);
+-
+- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
+- level++;
+- }
+ /* free pgd */
+ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
+ free_pgtable_page(domain->pgd);
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index b4aaa7b..5c30316 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -721,17 +721,16 @@ static int calc_max_buckets(void)
+ */
+ static int init_hash_tables(struct dm_snapshot *s)
+ {
+- sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
++ sector_t hash_size, cow_dev_size, max_buckets;
+
+ /*
+ * Calculate based on the size of the original volume or
+ * the COW volume...
+ */
+ cow_dev_size = get_dev_size(s->cow->bdev);
+- origin_dev_size = get_dev_size(s->origin->bdev);
+ max_buckets = calc_max_buckets();
+
+- hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
++ hash_size = cow_dev_size >> s->store->chunk_shift;
+ hash_size = min(hash_size, max_buckets);
+
+ if (hash_size < 64)
+diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
+index 441dacf..060353e 100644
+--- a/drivers/media/video/hdpvr/hdpvr-core.c
++++ b/drivers/media/video/hdpvr/hdpvr-core.c
+@@ -297,6 +297,11 @@ static int hdpvr_probe(struct usb_interface *interface,
+
+ dev->workqueue = 0;
+
++ /* init video transfer queues first of all */
++ /* to prevent oops in hdpvr_delete() on error paths */
++ INIT_LIST_HEAD(&dev->free_buff_list);
++ INIT_LIST_HEAD(&dev->rec_buff_list);
++
+ /* register v4l2_device early so it can be used for printks */
+ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
+ err("v4l2_device_register failed");
+@@ -319,10 +324,6 @@ static int hdpvr_probe(struct usb_interface *interface,
+ if (!dev->workqueue)
+ goto error;
+
+- /* init video transfer queues */
+- INIT_LIST_HEAD(&dev->free_buff_list);
+- INIT_LIST_HEAD(&dev->rec_buff_list);
+-
+ dev->options = hdpvr_default_options;
+
+ if (default_video_input < HDPVR_VIDEO_INPUTS)
+@@ -373,12 +374,6 @@ static int hdpvr_probe(struct usb_interface *interface,
+ }
+ mutex_unlock(&dev->io_mutex);
+
+- if (hdpvr_register_videodev(dev, &interface->dev,
+- video_nr[atomic_inc_return(&dev_nr)])) {
+- v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
+- goto error;
+- }
+-
+ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ retval = hdpvr_register_i2c_adapter(dev);
+ if (retval < 0) {
+@@ -399,6 +394,13 @@ static int hdpvr_probe(struct usb_interface *interface,
+ }
+ #endif
+
++ retval = hdpvr_register_videodev(dev, &interface->dev,
++ video_nr[atomic_inc_return(&dev_nr)]);
++ if (retval < 0) {
++ v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
++ goto reg_fail;
++ }
++
+ /* let the user know what node this device is now attached to */
+ v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
+ video_device_node_name(dev->video_dev));
+diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
+index 86f259c..be9e74d 100644
+--- a/drivers/mmc/host/tmio_mmc_dma.c
++++ b/drivers/mmc/host/tmio_mmc_dma.c
+@@ -92,6 +92,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+ pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
++ tmio_mmc_enable_dma(host, false);
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+@@ -104,7 +105,6 @@ pio:
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+- tmio_mmc_enable_dma(host, false);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+@@ -173,6 +173,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+ pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
++ tmio_mmc_enable_dma(host, false);
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+@@ -185,7 +186,6 @@ pio:
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+- tmio_mmc_enable_dma(host, false);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b436b84..1bf36ac 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1911,6 +1911,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave, *oldcurrent;
+ struct sockaddr addr;
++ int old_flags = bond_dev->flags;
+ u32 old_features = bond_dev->features;
+
+ /* slave is not a slave or master is not master of this slave */
+@@ -2041,12 +2042,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ * already taken care of above when we detached the slave
+ */
+ if (!USES_PRIMARY(bond->params.mode)) {
+- /* unset promiscuity level from slave */
+- if (bond_dev->flags & IFF_PROMISC)
++ /* unset promiscuity level from slave
++ * NOTE: The NETDEV_CHANGEADDR call above may change the value
++ * of the IFF_PROMISC flag in the bond_dev, but we need the
++ * value of that flag before that change, as that was the value
++ * when this slave was attached, so we cache at the start of the
++ * function and use it here. Same goes for ALLMULTI below
++ */
++ if (old_flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+
+ /* unset allmulti level from slave */
+- if (bond_dev->flags & IFF_ALLMULTI)
++ if (old_flags & IFF_ALLMULTI)
+ dev_set_allmulti(slave_dev, -1);
+
+ /* flush master's mc_list from slave */
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e59d006..bb828c2 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -60,7 +60,7 @@
+ #define FLEXCAN_MCR_BCC BIT(16)
+ #define FLEXCAN_MCR_LPRIO_EN BIT(13)
+ #define FLEXCAN_MCR_AEN BIT(12)
+-#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
++#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
+ #define FLEXCAN_MCR_IDAM_A (0 << 8)
+ #define FLEXCAN_MCR_IDAM_B (1 << 8)
+ #define FLEXCAN_MCR_IDAM_C (2 << 8)
+@@ -666,7 +666,6 @@ static int flexcan_chip_start(struct net_device *dev)
+ {
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->base;
+- unsigned int i;
+ int err;
+ u32 reg_mcr, reg_ctrl;
+
+@@ -700,9 +699,11 @@ static int flexcan_chip_start(struct net_device *dev)
+ *
+ */
+ reg_mcr = flexcan_read(&regs->mcr);
++ reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
+ reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
+ FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
+- FLEXCAN_MCR_IDAM_C;
++ FLEXCAN_MCR_IDAM_C |
++ FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
+ dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+ flexcan_write(reg_mcr, &regs->mcr);
+
+@@ -732,16 +733,9 @@ static int flexcan_chip_start(struct net_device *dev)
+ dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+ flexcan_write(reg_ctrl, &regs->ctrl);
+
+- for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
+- flexcan_write(0, &regs->cantxfg[i].can_ctrl);
+- flexcan_write(0, &regs->cantxfg[i].can_id);
+- flexcan_write(0, &regs->cantxfg[i].data[0]);
+- flexcan_write(0, &regs->cantxfg[i].data[1]);
+-
+- /* put MB into rx queue */
+- flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+- &regs->cantxfg[i].can_ctrl);
+- }
++ /* Abort any pending TX, mark Mailbox as INACTIVE */
++ flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
++ &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
+ /* acceptance mask/acceptance code (accept everything) */
+ flexcan_write(0x0, &regs->rxgmask);
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index d0722a7..fb9e7d3 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -397,7 +397,13 @@ static void gfar_init_mac(struct net_device *ndev)
+ if (ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+- tctrl |= TCTRL_TXSCHED_PRIO;
++ if (priv->prio_sched_en)
++ tctrl |= TCTRL_TXSCHED_PRIO;
++ else {
++ tctrl |= TCTRL_TXSCHED_WRRS;
++ gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
++ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
++ }
+
+ gfar_write(&regs->tctrl, tctrl);
+
+@@ -1157,6 +1163,9 @@ static int gfar_probe(struct platform_device *ofdev)
+ priv->rx_filer_enable = 1;
+ /* Enable most messages by default */
+ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
++ /* use pritority h/w tx queue scheduling for single queue devices */
++ if (priv->num_tx_queues == 1)
++ priv->prio_sched_en = 1;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
+index 9aa4377..abeb79a 100644
+--- a/drivers/net/ethernet/freescale/gianfar.h
++++ b/drivers/net/ethernet/freescale/gianfar.h
+@@ -304,8 +304,16 @@ extern const char gfar_driver_version[];
+ #define TCTRL_TFCPAUSE 0x00000008
+ #define TCTRL_TXSCHED_MASK 0x00000006
+ #define TCTRL_TXSCHED_INIT 0x00000000
++/* priority scheduling */
+ #define TCTRL_TXSCHED_PRIO 0x00000002
++/* weighted round-robin scheduling (WRRS) */
+ #define TCTRL_TXSCHED_WRRS 0x00000004
++/* default WRRS weight and policy setting,
++ * tailored to the tr03wt and tr47wt registers:
++ * equal weight for all Tx Qs, measured in 64byte units
++ */
++#define DEFAULT_WRRS_WEIGHT 0x18181818
++
+ #define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
+
+ #define IEVENT_INIT_CLEAR 0xffffffff
+@@ -1101,7 +1109,8 @@ struct gfar_private {
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+- wol_en:1; /* Wake-on-LAN enabled */
++ wol_en:1, /* Wake-on-LAN enabled */
++ prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
+ unsigned short padding;
+
+ /* PHY stuff */
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 8f47907..4236b82 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -478,7 +478,7 @@ rx_status_loop:
+
+ while (1) {
+ u32 status, len;
+- dma_addr_t mapping;
++ dma_addr_t mapping, new_mapping;
+ struct sk_buff *skb, *new_skb;
+ struct cp_desc *desc;
+ const unsigned buflen = cp->rx_buf_sz;
+@@ -520,6 +520,14 @@ rx_status_loop:
+ goto rx_next;
+ }
+
++ new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
++ PCI_DMA_FROMDEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
++ dev->stats.rx_dropped++;
++ kfree_skb(new_skb);
++ goto rx_next;
++ }
++
+ dma_unmap_single(&cp->pdev->dev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+
+@@ -531,12 +539,11 @@ rx_status_loop:
+
+ skb_put(skb, len);
+
+- mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+- PCI_DMA_FROMDEVICE);
+ cp->rx_skb[rx_tail] = new_skb;
+
+ cp_rx_skb(cp, skb, desc);
+ rx++;
++ mapping = new_mapping;
+
+ rx_next:
+ cp->rx_ring[rx_tail].opts2 = 0;
+@@ -704,6 +711,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+ }
+
++static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
++ int first, int entry_last)
++{
++ int frag, index;
++ struct cp_desc *txd;
++ skb_frag_t *this_frag;
++ for (frag = 0; frag+first < entry_last; frag++) {
++ index = first+frag;
++ cp->tx_skb[index] = NULL;
++ txd = &cp->tx_ring[index];
++ this_frag = &skb_shinfo(skb)->frags[frag];
++ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
++ skb_frag_size(this_frag), PCI_DMA_TODEVICE);
++ }
++}
++
+ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ struct net_device *dev)
+ {
+@@ -737,6 +760,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+
+ len = skb->len;
+ mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, mapping))
++ goto out_dma_error;
++
+ txd->opts2 = opts2;
+ txd->addr = cpu_to_le64(mapping);
+ wmb();
+@@ -774,6 +800,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ first_len = skb_headlen(skb);
+ first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ first_len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, first_mapping))
++ goto out_dma_error;
++
+ cp->tx_skb[entry] = skb;
+ entry = NEXT_TX(entry);
+
+@@ -787,6 +816,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ mapping = dma_map_single(&cp->pdev->dev,
+ skb_frag_address(this_frag),
+ len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
++ unwind_tx_frag_mapping(cp, skb, first_entry, entry);
++ goto out_dma_error;
++ }
++
+ eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
+
+ ctrl = eor | len | DescOwn;
+@@ -845,11 +879,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
++out_unlock:
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
+
+ cpw8(TxPoll, NormalTxPoll);
+
+ return NETDEV_TX_OK;
++out_dma_error:
++ kfree_skb(skb);
++ cp->dev->stats.tx_dropped++;
++ goto out_unlock;
+ }
+
+ /* Set or clear the multicast filter for this adaptor.
+@@ -1023,6 +1062,10 @@ static int cp_refill_rx(struct cp_private *cp)
+
+ mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
++ kfree_skb(skb);
++ goto err_out;
++ }
+ cp->rx_skb[i] = skb;
+
+ cp->rx_ring[i].opts2 = 0;
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index 9ce8665..c231b3f 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -312,8 +312,9 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ new_buf = efx_rx_buffer(rx_queue, index);
+- new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+ new_buf->u.page = rx_buf->u.page;
++ new_buf->page_offset = rx_buf->page_offset ^ (PAGE_SIZE >> 1);
++ new_buf->dma_addr = state->dma_addr + new_buf->page_offset;
+ new_buf->len = rx_buf->len;
+ new_buf->is_page = true;
+ ++rx_queue->added_count;
+diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
+index f34dd99..f37e0ae 100644
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -32,7 +32,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #define DRV_NAME "via-rhine"
+-#define DRV_VERSION "1.5.0"
++#define DRV_VERSION "1.5.1"
+ #define DRV_RELDATE "2010-10-09"
+
+
+@@ -1518,7 +1518,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
+ cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+ if (unlikely(vlan_tx_tag_present(skb))) {
+- rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
++ u16 vid_pcp = vlan_tx_tag_get(skb);
++
++ /* drop CFI/DEI bit, register needs VID and PCP */
++ vid_pcp = (vid_pcp & VLAN_VID_MASK) |
++ ((vid_pcp & VLAN_PRIO_MASK) >> 1);
++ rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
+ /* request tagging */
+ rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+ }
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 2681b53..e26945d 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -308,6 +308,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+
++ /* Init descriptor indexes */
++ lp->tx_bd_ci = 0;
++ lp->tx_bd_next = 0;
++ lp->tx_bd_tail = 0;
++ lp->rx_bd_ci = 0;
++
+ return 0;
+
+ out:
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 96b9e3c..b0f9015 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -641,6 +641,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ return 0;
+ }
+
++static unsigned long iov_pages(const struct iovec *iv, int offset,
++ unsigned long nr_segs)
++{
++ unsigned long seg, base;
++ int pages = 0, len, size;
++
++ while (nr_segs && (offset >= iv->iov_len)) {
++ offset -= iv->iov_len;
++ ++iv;
++ --nr_segs;
++ }
++
++ for (seg = 0; seg < nr_segs; seg++) {
++ base = (unsigned long)iv[seg].iov_base + offset;
++ len = iv[seg].iov_len - offset;
++ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
++ pages += size;
++ offset = 0;
++ }
++
++ return pages;
++}
+
+ /* Get packet from user space buffer */
+ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+@@ -687,31 +709,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (unlikely(count > UIO_MAXIOV))
+ goto err;
+
+- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
+- zerocopy = true;
+-
+- if (zerocopy) {
+- /* Userspace may produce vectors with count greater than
+- * MAX_SKB_FRAGS, so we need to linearize parts of the skb
+- * to let the rest of data to be fit in the frags.
+- */
+- if (count > MAX_SKB_FRAGS) {
+- copylen = iov_length(iv, count - MAX_SKB_FRAGS);
+- if (copylen < vnet_hdr_len)
+- copylen = 0;
+- else
+- copylen -= vnet_hdr_len;
+- }
+- /* There are 256 bytes to be copied in skb, so there is enough
+- * room for skb expand head in case it is used.
+- * The rest buffer is mapped from userspace.
+- */
+- if (copylen < vnet_hdr.hdr_len)
+- copylen = vnet_hdr.hdr_len;
+- if (!copylen)
+- copylen = GOODCOPY_LEN;
++ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
++ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
+ linear = copylen;
+- } else {
++ if (iov_pages(iv, vnet_hdr_len + copylen, count)
++ <= MAX_SKB_FRAGS)
++ zerocopy = true;
++ }
++
++ if (!zerocopy) {
+ copylen = len;
+ linear = vnet_hdr.hdr_len;
+ }
+@@ -723,9 +729,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+
+ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+- else
++ else {
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
++ if (!err && m && m->msg_control) {
++ struct ubuf_info *uarg = m->msg_control;
++ uarg->callback(uarg);
++ }
++ }
++
+ if (err)
+ goto err_kfree;
+
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index ad6a9d9..2b349d3 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ nf_reset(skb);
+
+ skb->ip_summed = CHECKSUM_NONE;
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+ ip_send_check(iph);
+
+ ip_local_out(skb);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index f4c5de6..ee1aab0 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -614,8 +614,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
+ int offset = 0;
+
+ if (!(tun->flags & TUN_NO_PI)) {
+- if ((len -= sizeof(pi)) > count)
++ if (len < sizeof(pi))
+ return -EINVAL;
++ len -= sizeof(pi);
+
+ if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
+ return -EFAULT;
+@@ -623,8 +624,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
+ }
+
+ if (tun->flags & TUN_VNET_HDR) {
+- if ((len -= tun->vnet_hdr_sz) > count)
++ if (len < tun->vnet_hdr_sz)
+ return -EINVAL;
++ len -= tun->vnet_hdr_sz;
+
+ if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+ return -EFAULT;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 2ba40cf..43aa06b 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -615,6 +615,11 @@ static const struct usb_device_id products [] = {
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long)&wwan_info,
+ }, {
++ /* Telit modules */
++ USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++ .driver_info = (kernel_ulong_t) &wwan_info,
++}, {
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index fbc0e4d..136ecf3 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -384,7 +384,7 @@ static void dm9601_set_multicast(struct net_device *net)
+ rx_ctl |= 0x02;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > DM_MAX_MCAST) {
+- rx_ctl |= 0x04;
++ rx_ctl |= 0x08;
+ } else if (!netdev_mc_empty(net)) {
+ struct netdev_hw_addr *ha;
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index 73be7ff..f146824 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -1016,6 +1016,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ * is_on == 0 means MRC CCK is OFF (more noise imm)
+ */
+ bool is_on = param ? 1 : 0;
++
++ if (ah->caps.rx_chainmask == 1)
++ break;
++
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_ENABLE, is_on);
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
+index 1c269f5..7c70cf2 100644
+--- a/drivers/net/wireless/ath/ath9k/ath9k.h
++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
+@@ -77,10 +77,6 @@ struct ath_config {
+ sizeof(struct ath_buf_state)); \
+ } while (0)
+
+-#define ATH_RXBUF_RESET(_bf) do { \
+- (_bf)->bf_stale = false; \
+- } while (0)
+-
+ /**
+ * enum buffer_type - Buffer type flags
+ *
+@@ -308,6 +304,7 @@ struct ath_rx {
+ struct ath_buf *rx_bufptr;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
++ struct ath_buf *buf_hold;
+ struct sk_buff *frag;
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index d171a72..8326c14 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -78,8 +78,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+ struct ath_desc *ds;
+ struct sk_buff *skb;
+
+- ATH_RXBUF_RESET(bf);
+-
+ ds = bf->bf_desc;
+ ds->ds_link = 0; /* link to null */
+ ds->ds_data = bf->bf_buf_addr;
+@@ -106,6 +104,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+ sc->rx.rxlink = &ds->ds_link;
+ }
+
++static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
++{
++ if (sc->rx.buf_hold)
++ ath_rx_buf_link(sc, sc->rx.buf_hold);
++
++ sc->rx.buf_hold = bf;
++}
++
+ static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
+ {
+ /* XXX block beacon interrupts */
+@@ -153,7 +159,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+
+ skb = bf->bf_mpdu;
+
+- ATH_RXBUF_RESET(bf);
+ memset(skb->data, 0, ah->caps.rx_status_len);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ ah->caps.rx_status_len, DMA_TO_DEVICE);
+@@ -492,6 +497,7 @@ int ath_startrecv(struct ath_softc *sc)
+ if (list_empty(&sc->rx.rxbuf))
+ goto start_recv;
+
++ sc->rx.buf_hold = NULL;
+ sc->rx.rxlink = NULL;
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
+ ath_rx_buf_link(sc, bf);
+@@ -742,6 +748,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ }
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
++ if (bf == sc->rx.buf_hold)
++ return NULL;
++
+ ds = bf->bf_desc;
+
+ /*
+@@ -1974,7 +1983,7 @@ requeue:
+ if (edma) {
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+- ath_rx_buf_link(sc, bf);
++ ath_rx_buf_relink(sc, bf);
+ ath9k_hw_rxena(ah);
+ }
+ } while (1);
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 18da100..126ed31 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2423,6 +2423,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+ for (acno = 0, ac = &an->ac[acno];
+ acno < WME_NUM_AC; acno++, ac++) {
+ ac->sched = false;
++ ac->clear_ps_filter = true;
+ ac->txq = sc->tx.txq_map[acno];
+ INIT_LIST_HEAD(&ac->tid_q);
+ }
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 564218c..0784493 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
++ {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
+ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 67cbe5a..8fb8c9e 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -2067,6 +2067,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
+ int i;
+
+ /*
++ * First check if temperature compensation is supported.
++ */
++ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
++ if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
++ return 0;
++
++ /*
+ * Read TSSI boundaries for temperature compensation from
+ * the EEPROM.
+ *
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index deb87e9..82baaa2 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1630,7 +1630,7 @@ struct rtl_priv {
+ that it points to the data allocated
+ beyond this structure like:
+ rtl_pci_priv or rtl_usb_priv */
+- u8 priv[0];
++ u8 priv[0] __aligned(sizeof(void *));
+ };
+
+ #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 9b6588e..37639a6 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1189,6 +1189,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
+ ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ if (!ap)
+ continue;
++ memset(ap, 0, sizeof(*ap) + len + 1);
+ ap->alias = start;
+ of_alias_add(ap, np, id, start, len);
+ }
+diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
+index 394ed9e..4aa30d8 100644
+--- a/drivers/scsi/esp_scsi.c
++++ b/drivers/scsi/esp_scsi.c
+@@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp)
+ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ struct esp_lun_data *lp)
+ {
+- if (!ent->tag[0]) {
++ if (!ent->orig_tag[0]) {
+ /* Non-tagged, slot already taken? */
+ if (lp->non_tagged_cmd)
+ return -EBUSY;
+@@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ return -EBUSY;
+ }
+
+- BUG_ON(lp->tagged_cmds[ent->tag[1]]);
++ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
+
+- lp->tagged_cmds[ent->tag[1]] = ent;
++ lp->tagged_cmds[ent->orig_tag[1]] = ent;
+ lp->num_tagged++;
+
+ return 0;
+@@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
+ struct esp_lun_data *lp)
+ {
+- if (ent->tag[0]) {
+- BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
+- lp->tagged_cmds[ent->tag[1]] = NULL;
++ if (ent->orig_tag[0]) {
++ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
++ lp->tagged_cmds[ent->orig_tag[1]] = NULL;
+ lp->num_tagged--;
+ } else {
+ BUG_ON(lp->non_tagged_cmd != ent);
+@@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
+ ent->tag[0] = 0;
+ ent->tag[1] = 0;
+ }
++ ent->orig_tag[0] = ent->tag[0];
++ ent->orig_tag[1] = ent->tag[1];
+
+ if (esp_alloc_lun_tag(ent, lp) < 0)
+ continue;
+diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
+index 28e22ac..cd68805 100644
+--- a/drivers/scsi/esp_scsi.h
++++ b/drivers/scsi/esp_scsi.h
+@@ -271,6 +271,7 @@ struct esp_cmd_entry {
+ #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
+
+ u8 tag[2];
++ u8 orig_tag[2];
+
+ u8 status;
+ u8 message;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 96029e6..c874458 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2105,7 +2105,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ break;
+ err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
++ } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
+ skb_pull(skb, rlen);
+ }
+ mutex_unlock(&rx_queue_mutex);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 17603da..f6d2b62 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2136,14 +2136,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ }
+ }
+
+- if (modepage == 0x3F) {
+- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+- "present\n");
+- goto defaults;
+- } else if ((buffer[offset] & 0x3f) != modepage) {
+- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
+- goto defaults;
+- }
++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
++ goto defaults;
++
+ Page_found:
+ if (modepage == 8) {
+ sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
+diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
+index 95ebc26..e3adb38 100644
+--- a/drivers/staging/comedi/drivers/dt282x.c
++++ b/drivers/staging/comedi/drivers/dt282x.c
+@@ -407,8 +407,9 @@ struct dt282x_private {
+ } \
+ udelay(5); \
+ } \
+- if (_i) \
++ if (_i) { \
+ b \
++ } \
+ } while (0)
+
+ static int dt282x_attach(struct comedi_device *dev,
+diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
+index 403fc09..8b564ad 100644
+--- a/drivers/staging/comedi/drivers/ni_65xx.c
++++ b/drivers/staging/comedi/drivers/ni_65xx.c
+@@ -411,29 +411,25 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+ {
+- unsigned base_bitfield_channel;
+- const unsigned max_ports_per_bitfield = 5;
++ int base_bitfield_channel;
+ unsigned read_bits = 0;
+- unsigned j;
++ int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
++ int port_offset;
++
+ if (insn->n != 2)
+ return -EINVAL;
+ base_bitfield_channel = CR_CHAN(insn->chanspec);
+- for (j = 0; j < max_ports_per_bitfield; ++j) {
+- const unsigned port_offset =
+- ni_65xx_port_by_channel(base_bitfield_channel) + j;
+- const unsigned port =
+- sprivate(s)->base_port + port_offset;
+- unsigned base_port_channel;
++ for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
++ port_offset <= last_port_offset; port_offset++) {
++ unsigned port = sprivate(s)->base_port + port_offset;
++ int base_port_channel = port_offset * ni_65xx_channels_per_port;
+ unsigned port_mask, port_data, port_read_bits;
+- int bitshift;
+- if (port >= ni_65xx_total_num_ports(board(dev)))
++ int bitshift = base_port_channel - base_bitfield_channel;
++
++ if (bitshift >= 32)
+ break;
+- base_port_channel = port_offset * ni_65xx_channels_per_port;
+ port_mask = data[0];
+ port_data = data[1];
+- bitshift = base_port_channel - base_bitfield_channel;
+- if (bitshift >= 32 || bitshift <= -32)
+- break;
+ if (bitshift > 0) {
+ port_mask >>= bitshift;
+ port_data >>= bitshift;
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 754d54e..f680766 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -1221,6 +1221,8 @@ device_release_WPADEV(pDevice);
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+
++ pDevice->flags &= ~DEVICE_FLAGS_OPENED;
++
+ device_free_tx_bufs(pDevice);
+ device_free_rx_bufs(pDevice);
+ device_free_int_bufs(pDevice);
+@@ -1232,7 +1234,6 @@ device_release_WPADEV(pDevice);
+ usb_free_urb(pDevice->pInterruptURB);
+
+ BSSvClearNodeDBTable(pDevice, 0);
+- pDevice->flags &=(~DEVICE_FLAGS_OPENED);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
+
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 926d483..d197b3e 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -709,9 +709,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ struct zram *zram;
+
+ zram = bdev->bd_disk->private_data;
+- down_write(&zram->lock);
+ zram_free_page(zram, index);
+- up_write(&zram->lock);
+ zram_stat64_inc(zram, &zram->stats.notify_free);
+ }
+
+diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
+index 87f2fec..e5cd246 100644
+--- a/drivers/staging/zram/zram_drv.h
++++ b/drivers/staging/zram/zram_drv.h
+@@ -107,9 +107,8 @@ struct zram {
+ void *compress_buffer;
+ struct table *table;
+ spinlock_t stat64_lock; /* protect 64-bit stats */
+- struct rw_semaphore lock; /* protect compression buffers, table,
+- * 32bit stat counters against concurrent
+- * notifications, reads and writes */
++ struct rw_semaphore lock; /* protect compression buffers and table
++ * against concurrent read and writes */
+ struct request_queue *queue;
+ struct gendisk *disk;
+ int init_done;
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index c0b4872..f5440a7 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -552,11 +552,12 @@ static int dma_push_rx(struct eg20t_port *priv, int size)
+ dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+ size - room);
+ if (!room)
+- return room;
++ goto out;
+
+ tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
+
+ port->icount.rx += room;
++out:
+ tty_kref_put(tty);
+
+ return room;
+@@ -970,6 +971,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+ if (tty == NULL) {
+ for (i = 0; error_msg[i] != NULL; i++)
+ dev_err(&priv->pdev->dev, error_msg[i]);
++ } else {
++ tty_kref_put(tty);
+ }
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index fe8c04b..06dfb4f 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -187,6 +187,7 @@ skip_error:
+ static void wdm_int_callback(struct urb *urb)
+ {
+ int rv = 0;
++ int responding;
+ int status = urb->status;
+ struct wdm_device *desc;
+ struct usb_ctrlrequest *req;
+@@ -260,8 +261,8 @@ static void wdm_int_callback(struct urb *urb)
+ desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ spin_lock(&desc->iuspin);
+ clear_bit(WDM_READ, &desc->flags);
+- set_bit(WDM_RESPONDING, &desc->flags);
+- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
+ && !test_bit(WDM_SUSPENDING, &desc->flags)) {
+ rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+ dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
+@@ -658,16 +659,20 @@ static void wdm_rxwork(struct work_struct *work)
+ {
+ struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
+ unsigned long flags;
+- int rv;
++ int rv = 0;
++ int responding;
+
+ spin_lock_irqsave(&desc->iuspin, flags);
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+ } else {
++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+- rv = usb_submit_urb(desc->response, GFP_KERNEL);
++ if (!responding)
++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ if (rv < 0 && rv != -EPERM) {
+ spin_lock_irqsave(&desc->iuspin, flags);
++ clear_bit(WDM_RESPONDING, &desc->flags);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ schedule_work(&desc->rxwork);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index f4bdd0c..78609d3 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+ if (config->desc.bDescriptorType != USB_DT_CONFIG ||
+- config->desc.bLength < USB_DT_CONFIG_SIZE) {
++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
++ config->desc.bLength > size) {
+ dev_err(ddev, "invalid descriptor for config index %d: "
+ "type = 0x%X, length = %d\n", cfgidx,
+ config->desc.bDescriptorType, config->desc.bLength);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 22f770a..49257b3 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -646,6 +646,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+ if ((index & ~USB_DIR_IN) == 0)
+ return 0;
+ ret = findintfep(ps->dev, index);
++ if (ret < 0) {
++ /*
++ * Some not fully compliant Win apps seem to get
++ * index wrong and have the endpoint number here
++ * rather than the endpoint address (with the
++ * correct direction). Win does let this through,
++ * so we'll not reject it here but leave it to
++ * the device to not break KVM. But we warn.
++ */
++ ret = findintfep(ps->dev, index ^ 0x80);
++ if (ret >= 0)
++ dev_info(&ps->dev->dev,
++ "%s: process %i (%s) requesting ep %02x but needs %02x\n",
++ __func__, task_pid_nr(current),
++ current->comm, index, index ^ 0x80);
++ }
+ if (ret >= 0)
+ ret = checkintf(ps, ret);
+ break;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2768a7e..a5ea85f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3749,7 +3749,8 @@ static void hub_events(void)
+ hub->hdev->children[i - 1];
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- if (!udev) {
++ if (!udev || !(portstatus &
++ USB_PORT_STAT_CONNECTION)) {
+ status = hub_port_reset(hub, i,
+ NULL, HUB_BH_RESET_TIME,
+ true);
+@@ -3759,8 +3760,8 @@ static void hub_events(void)
+ usb_lock_device(udev);
+ status = usb_reset_device(udev);
+ usb_unlock_device(udev);
++ connect_change = 0;
+ }
+- connect_change = 0;
+ }
+
+ if (connect_change)
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index f77c000..9edc582 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -45,6 +45,8 @@
+ /* FIXME define these in <linux/pci_ids.h> */
+ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
++#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
++#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
+
+ #define DWC3_PCI_DEVS_POSSIBLE 32
+
+@@ -191,6 +193,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ { } /* Terminating Entry */
+ };
+ MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
+index 55978fc..0874473 100644
+--- a/drivers/usb/host/ehci-mxc.c
++++ b/drivers/usb/host/ehci-mxc.c
+@@ -296,7 +296,7 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+
+- if (pdata->otg)
++ if (pdata && pdata->otg)
+ otg_shutdown(pdata->otg);
+
+ usb_remove_hcd(hcd);
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index b71e22e..29c0421 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -543,7 +543,7 @@ static struct pci_driver ehci_pci_driver = {
+ .remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index bc01b06..839cb64 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -413,7 +413,7 @@ static struct pci_driver ohci_pci_driver = {
+ .remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index c300bd2f7..0f228c4 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
+ .remove = usb_hcd_pci_remove,
+ .shutdown = uhci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 79d2720..61b0668 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -330,7 +330,7 @@ static struct pci_driver xhci_pci_driver = {
+ /* suspend and resume implemented later */
+
+ .shutdown = usb_hcd_pci_shutdown,
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 633476e..2b4f42b 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -879,8 +879,12 @@ remove_finished_td:
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
+- ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
++
++ /* Clear stopped_td and stopped_trb if endpoint is not halted */
++ if (!(ep->ep_state & EP_HALTED)) {
++ ep->stopped_td = NULL;
++ ep->stopped_trb = NULL;
++ }
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 6e1c92a..629aa74 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3484,10 +3484,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev;
++ struct device *dev = hcd->self.controller;
+ unsigned long flags;
+ u32 state;
+ int i, ret;
+
++#ifndef CONFIG_USB_DEFAULT_PERSIST
++ /*
++ * We called pm_runtime_get_noresume when the device was attached.
++ * Decrement the counter here to allow controller to runtime suspend
++ * if no devices remain.
++ */
++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ pm_runtime_put_noidle(dev);
++#endif
++
+ ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+ /* If the host is halted due to driver unload, we still need to free the
+ * device.
+@@ -3559,6 +3570,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++ struct device *dev = hcd->self.controller;
+ unsigned long flags;
+ int timeleft;
+ int ret;
+@@ -3611,6 +3623,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ goto disable_slot;
+ }
+ udev->slot_id = xhci->slot_id;
++
++#ifndef CONFIG_USB_DEFAULT_PERSIST
++ /*
++ * If resetting upon resume, we can't put the controller into runtime
++ * suspend if there is a device attached.
++ */
++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ pm_runtime_get_noresume(dev);
++#endif
++
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 9270d5c..8e02ff2 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -383,7 +383,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
+ if (!urbtrack->setup) {
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+@@ -391,8 +391,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ }
+ urbtrack->setup->bRequestType = (__u8)0x40;
+ urbtrack->setup->bRequest = (__u8)0x0e;
+- urbtrack->setup->wValue = get_reg_value(reg, dummy);
+- urbtrack->setup->wIndex = get_reg_index(reg);
++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
+ urbtrack->setup->wLength = 0;
+ usb_fill_control_urb(urbtrack->urb, usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c2103f4..536c4ad 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
+
+ #define HUAWEI_VENDOR_ID 0x12D1
+ #define HUAWEI_PRODUCT_E173 0x140C
++#define HUAWEI_PRODUCT_E1750 0x1406
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+@@ -581,6 +582,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index bf1c094..b657de6 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -355,9 +355,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+ void (*fn)(void *), void *arg, u16 count)
+ {
+ unsigned long flags;
++ struct gnttab_free_callback *cb;
++
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+- if (callback->next)
+- goto out;
++
++ /* Check if the callback is already on the list */
++ cb = gnttab_free_callback_list;
++ while (cb) {
++ if (cb == callback)
++ goto out;
++ cb = cb->next;
++ }
++
+ callback->fn = fn;
+ callback->arg = arg;
+ callback->count = count;
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index f3a257d..fb001cd 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -380,8 +380,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
+ */
+ void debugfs_remove_recursive(struct dentry *dentry)
+ {
+- struct dentry *child;
+- struct dentry *parent;
++ struct dentry *child, *next, *parent;
+
+ if (!dentry)
+ return;
+@@ -391,61 +390,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
+ return;
+
+ parent = dentry;
++ down:
+ mutex_lock(&parent->d_inode->i_mutex);
++ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
++ if (!debugfs_positive(child))
++ continue;
+
+- while (1) {
+- /*
+- * When all dentries under "parent" has been removed,
+- * walk up the tree until we reach our starting point.
+- */
+- if (list_empty(&parent->d_subdirs)) {
+- mutex_unlock(&parent->d_inode->i_mutex);
+- if (parent == dentry)
+- break;
+- parent = parent->d_parent;
+- mutex_lock(&parent->d_inode->i_mutex);
+- }
+- child = list_entry(parent->d_subdirs.next, struct dentry,
+- d_u.d_child);
+- next_sibling:
+-
+- /*
+- * If "child" isn't empty, walk down the tree and
+- * remove all its descendants first.
+- */
++ /* perhaps simple_empty(child) makes more sense */
+ if (!list_empty(&child->d_subdirs)) {
+ mutex_unlock(&parent->d_inode->i_mutex);
+ parent = child;
+- mutex_lock(&parent->d_inode->i_mutex);
+- continue;
+- }
+- __debugfs_remove(child, parent);
+- if (parent->d_subdirs.next == &child->d_u.d_child) {
+- /*
+- * Try the next sibling.
+- */
+- if (child->d_u.d_child.next != &parent->d_subdirs) {
+- child = list_entry(child->d_u.d_child.next,
+- struct dentry,
+- d_u.d_child);
+- goto next_sibling;
+- }
+-
+- /*
+- * Avoid infinite loop if we fail to remove
+- * one dentry.
+- */
+- mutex_unlock(&parent->d_inode->i_mutex);
+- break;
++ goto down;
+ }
+- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
++ up:
++ if (!__debugfs_remove(child, parent))
++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ }
+
+- parent = dentry->d_parent;
++ mutex_unlock(&parent->d_inode->i_mutex);
++ child = parent;
++ parent = parent->d_parent;
+ mutex_lock(&parent->d_inode->i_mutex);
+- __debugfs_remove(dentry, parent);
++
++ if (child != dentry) {
++ next = list_entry(child->d_u.d_child.next, struct dentry,
++ d_u.d_child);
++ goto up;
++ }
++
++ if (!__debugfs_remove(child, parent))
++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ mutex_unlock(&parent->d_inode->i_mutex);
+- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ }
+ EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 3ca3b7f..2e0e34f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2054,7 +2054,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
+ int err = 0;
+
+ /* ext4_handle_valid() assumes a valid handle_t pointer */
+- if (handle && !ext4_handle_valid(handle))
++ if (handle && !ext4_handle_valid(handle) &&
++ !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
+ return 0;
+
+ mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 5ef7afb..06e2f73 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1063,6 +1063,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
+ return -EIO;
+ if (reclen > nbytes)
+ break;
++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
++ return -EIO;
+
+ over = filldir(dstbuf, dirent->name, dirent->namelen,
+ file->f_pos, dirent->ino, dirent->type);
+@@ -1282,6 +1284,7 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+ {
+ struct inode *inode = entry->d_inode;
+ struct fuse_conn *fc = get_fuse_conn(inode);
++ struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_req *req;
+ struct fuse_setattr_in inarg;
+ struct fuse_attr_out outarg;
+@@ -1312,8 +1315,10 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+- if (is_truncate)
++ if (is_truncate) {
+ fuse_set_nowrite(inode);
++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
++ }
+
+ memset(&inarg, 0, sizeof(inarg));
+ memset(&outarg, 0, sizeof(outarg));
+@@ -1375,12 +1380,14 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+ invalidate_inode_pages2(inode->i_mapping);
+ }
+
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ return 0;
+
+ error:
+ if (is_truncate)
+ fuse_release_nowrite(inode);
+
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ return err;
+ }
+
+@@ -1439,6 +1446,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
+ fc->no_setxattr = 1;
+ err = -EOPNOTSUPP;
+ }
++ if (!err)
++ fuse_invalidate_attr(inode);
+ return err;
+ }
+
+@@ -1568,6 +1577,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
+ fc->no_removexattr = 1;
+ err = -EOPNOTSUPP;
+ }
++ if (!err)
++ fuse_invalidate_attr(inode);
+ return err;
+ }
+
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 5242006..510d4aa 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -519,7 +519,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fc->lock);
+- if (attr_ver == fi->attr_version && size < inode->i_size) {
++ if (attr_ver == fi->attr_version && size < inode->i_size &&
++ !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
+ fi->attr_version = ++fc->attr_version;
+ i_size_write(inode, size);
+ }
+@@ -881,12 +882,16 @@ static ssize_t fuse_perform_write(struct file *file,
+ {
+ struct inode *inode = mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
++ struct fuse_inode *fi = get_fuse_inode(inode);
+ int err = 0;
+ ssize_t res = 0;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
++ if (inode->i_size < pos + iov_iter_count(ii))
++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
++
+ do {
+ struct fuse_req *req;
+ ssize_t count;
+@@ -921,6 +926,7 @@ static ssize_t fuse_perform_write(struct file *file,
+ if (res > 0)
+ fuse_write_update_size(inode, pos);
+
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ fuse_invalidate_attr(inode);
+
+ return res > 0 ? res : err;
+@@ -1251,7 +1257,6 @@ static int fuse_writepage_locked(struct page *page)
+
+ inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
+ inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+- end_page_writeback(page);
+
+ spin_lock(&fc->lock);
+ list_add(&req->writepages_entry, &fi->writepages);
+@@ -1259,6 +1264,8 @@ static int fuse_writepage_locked(struct page *page)
+ fuse_flush_writepages(inode);
+ spin_unlock(&fc->lock);
+
++ end_page_writeback(page);
++
+ return 0;
+
+ err_free:
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 89c4a58..52ffd24 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -103,6 +103,15 @@ struct fuse_inode {
+
+ /** List of writepage requestst (pending or sent) */
+ struct list_head writepages;
++
++ /** Miscellaneous bits describing inode state */
++ unsigned long state;
++};
++
++/** FUSE inode state bits */
++enum {
++ /** An operation changing file size is in progress */
++ FUSE_I_SIZE_UNSTABLE,
+ };
+
+ struct fuse_conn;
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 1f82d95..912c250 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -92,6 +92,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ fi->attr_version = 0;
+ fi->writectr = 0;
+ fi->orig_ino = 0;
++ fi->state = 0;
+ INIT_LIST_HEAD(&fi->write_files);
+ INIT_LIST_HEAD(&fi->queued_writes);
+ INIT_LIST_HEAD(&fi->writepages);
+@@ -200,7 +201,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ loff_t oldsize;
+
+ spin_lock(&fc->lock);
+- if (attr_version != 0 && fi->attr_version > attr_version) {
++ if ((attr_version != 0 && fi->attr_version > attr_version) ||
++ test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index f950059..a5f25a7 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -120,8 +120,8 @@ static void destroy_inodecache(void)
+
+ static int isofs_remount(struct super_block *sb, int *flags, char *data)
+ {
+- /* we probably want a lot more here */
+- *flags |= MS_RDONLY;
++ if (!(*flags & MS_RDONLY))
++ return -EROFS;
+ return 0;
+ }
+
+@@ -770,15 +770,6 @@ root_found:
+ */
+ s->s_maxbytes = 0x80000000000LL;
+
+- /*
+- * The CDROM is read-only, has no nodes (devices) on it, and since
+- * all of the files appear to be owned by root, we really do not want
+- * to allow suid. (suid or devices will not show up unless we have
+- * Rock Ridge extensions)
+- */
+-
+- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
+-
+ /* Set this for reference. Its not currently used except on write
+ which we don't have .. */
+
+@@ -1535,6 +1526,9 @@ struct inode *isofs_iget(struct super_block *sb,
+ static struct dentry *isofs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
++ /* We don't support read-write mounts */
++ if (!(flags & MS_RDONLY))
++ return ERR_PTR(-EACCES);
+ return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ }
+
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 65221a0..16eacec 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
+ clear_buffer_nilfs_redirected(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
+ if (nilfs_page_buffers_clean(page))
+ __nilfs_clear_page_dirty(page);
+@@ -390,6 +391,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
+ bh = head = page_buffers(page);
+ do {
+ lock_buffer(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 6f24e67..233d3ed 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -662,7 +662,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+
+ bh = head = page_buffers(page);
+ do {
+- if (!buffer_dirty(bh))
++ if (!buffer_dirty(bh) || buffer_async_write(bh))
+ continue;
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers, listp);
+@@ -696,7 +696,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ bh = head = page_buffers(pvec.pages[i]);
+ do {
+- if (buffer_dirty(bh)) {
++ if (buffer_dirty(bh) &&
++ !buffer_async_write(bh)) {
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers,
+ listp);
+@@ -1576,6 +1577,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ set_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page) {
+ lock_page(bd_page);
+@@ -1589,6 +1591,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
++ set_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ lock_page(bd_page);
+@@ -1674,6 +1677,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ list_for_each_entry(segbuf, logs, sb_list) {
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ clear_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1683,6 +1687,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
++ clear_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ end_page_writeback(bd_page);
+@@ -1752,6 +1757,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ b_assoc_buffers) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
++ clear_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1773,6 +1779,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ b_assoc_buffers) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_delay(bh);
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_redirected(bh);
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index a506360..0c2f912 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+ old->tgid == new->tgid) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
++#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
++ /* dont merge two permission events */
++ if ((old->mask & FAN_ALL_PERM_EVENTS) &&
++ (new->mask & FAN_ALL_PERM_EVENTS))
++ return false;
++#endif
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index 7eb1c0c..cf22847 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -782,7 +782,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ cpos = map_start >> osb->s_clustersize_bits;
+ mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
+ map_start + map_len);
+- mapping_end -= cpos;
+ is_last = 0;
+ while (cpos < mapping_end && !is_last) {
+ u32 fe_flags;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 331e2ef..19fe719 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -416,10 +416,12 @@ struct hid_report {
+ struct hid_device *device; /* associated device */
+ };
+
++#define HID_MAX_IDS 256
++
+ struct hid_report_enum {
+ unsigned numbered;
+ struct list_head report_list;
+- struct hid_report *report_id_hash[256];
++ struct hid_report *report_id_hash[HID_MAX_IDS];
+ };
+
+ #define HID_REPORT_TYPES 3
+@@ -716,6 +718,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
+ struct hid_device *hid_allocate_device(void);
+ struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
++struct hid_report *hid_validate_values(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int field_index,
++ unsigned int report_counts);
+ int hid_check_keys_pressed(struct hid_device *hid);
+ int hid_connect(struct hid_device *hid, unsigned int connect_mask);
+ void hid_disconnect(struct hid_device *hid);
+diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
+index ba45e6b..f5a21d0 100644
+--- a/include/linux/icmpv6.h
++++ b/include/linux/icmpv6.h
+@@ -123,6 +123,8 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+ #define ICMPV6_NOT_NEIGHBOUR 2
+ #define ICMPV6_ADDR_UNREACH 3
+ #define ICMPV6_PORT_UNREACH 4
++#define ICMPV6_POLICY_FAIL 5
++#define ICMPV6_REJECT_ROUTE 6
+
+ /*
+ * Codes for Time Exceeded
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 0c99776..84b1447 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -255,6 +255,7 @@ struct inet6_skb_parm {
+ #define IP6SKB_XFRM_TRANSFORMED 1
+ #define IP6SKB_FORWARDED 2
+ #define IP6SKB_REROUTED 4
++#define IP6SKB_FRAGMENTED 16
+ };
+
+ #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index d0493f6..305fd75 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -865,7 +865,8 @@ extern void pagefault_out_of_memory(void);
+ * Flags passed to show_mem() and show_free_areas() to suppress output in
+ * various contexts.
+ */
+-#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
++#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
++#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */
+
+ extern void show_free_areas(unsigned int flags);
+ extern bool skip_free_areas_node(unsigned int flags, int nid);
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 3cfcfea..eeb6a29 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -927,7 +927,7 @@ struct perf_cpu_context {
+ int exclusive;
+ struct list_head rotation_list;
+ int jiffies_interval;
+- struct pmu *active_pmu;
++ struct pmu *unique_pmu;
+ struct perf_cgroup *cgrp;
+ };
+
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index 6f95e24..3863352 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -254,8 +254,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
+ */
+ #define list_first_or_null_rcu(ptr, type, member) \
+ ({struct list_head *__ptr = (ptr); \
+- struct list_head __rcu *__next = list_next_rcu(__ptr); \
+- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
++ likely(__ptr != __next) ? \
++ list_entry_rcu(__next, type, member) : NULL; \
+ })
+
+ /**
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 03354d5..0daa46b 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -395,7 +395,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif
+ #endif /* CONFIG_PCI */
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index e9ff3fc..34b06da 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -41,6 +41,10 @@ struct inet_peer {
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
++ union {
++ struct list_head gc_list;
++ struct rcu_head gc_rcu;
++ };
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+@@ -96,6 +100,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
+ extern void inet_putpeer(struct inet_peer *p);
+ extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+
++extern void inetpeer_invalidate_tree(int family);
++
+ /*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+diff --git a/include/net/ip.h b/include/net/ip.h
+index eca0ef7..06aed72 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -266,9 +266,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
+
+ extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+
+-static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
++static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
+ {
+- if (iph->frag_off & htons(IP_DF)) {
++ struct iphdr *iph = ip_hdr(skb);
++
++ if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+ /* This is only to work around buggy Windows95/2000
+ * VJ compression implementations. If the ID field
+ * does not change, they drop every other packet in
+@@ -280,9 +282,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
+ __ip_select_ident(iph, dst, 0);
+ }
+
+-static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
++static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
+ {
+- if (iph->frag_off & htons(IP_DF)) {
++ struct iphdr *iph = ip_hdr(skb);
++
++ if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+ if (sk && inet_sk(sk)->inet_daddr) {
+ iph->id = htons(inet_sk(sk)->inet_id);
+ inet_sk(sk)->inet_id += 1 + more;
+diff --git a/include/net/ipip.h b/include/net/ipip.h
+index a32654d..4dccfe3 100644
+--- a/include/net/ipip.h
++++ b/include/net/ipip.h
+@@ -50,7 +50,7 @@ struct ip_tunnel_prl_entry {
+ int pkt_len = skb->len - skb_transport_offset(skb); \
+ \
+ skb->ip_summed = CHECKSUM_NONE; \
+- ip_select_ident(iph, &rt->dst, NULL); \
++ ip_select_ident(skb, &rt->dst, NULL); \
+ \
+ err = ip_local_out(skb); \
+ if (likely(net_xmit_eval(err) == 0)) { \
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index d2a01fe..2a1ffb7 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -3504,6 +3504,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+ {
+ struct cgroup_event *event = NULL;
++ struct cgroup *cgrp_cfile;
+ unsigned int efd, cfd;
+ struct file *efile = NULL;
+ struct file *cfile = NULL;
+@@ -3559,6 +3560,16 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
+ goto fail;
+ }
+
++ /*
++ * The file to be monitored must be in the same cgroup as
++ * cgroup.event_control is.
++ */
++ cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
++ if (cgrp_cfile != cgrp) {
++ ret = -EINVAL;
++ goto fail;
++ }
++
+ if (!event->cft->register_event || !event->cft->unregister_event) {
+ ret = -EINVAL;
+ goto fail;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 5bbe443..83d5621 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -242,9 +242,9 @@ perf_cgroup_match(struct perf_event *event)
+ return !event->cgrp || event->cgrp == cpuctx->cgrp;
+ }
+
+-static inline void perf_get_cgroup(struct perf_event *event)
++static inline bool perf_tryget_cgroup(struct perf_event *event)
+ {
+- css_get(&event->cgrp->css);
++ return css_tryget(&event->cgrp->css);
+ }
+
+ static inline void perf_put_cgroup(struct perf_event *event)
+@@ -360,6 +360,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
+
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++ if (cpuctx->unique_pmu != pmu)
++ continue; /* ensure we process each cpuctx once */
+
+ /*
+ * perf_cgroup_events says at least one
+@@ -383,9 +385,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
+
+ if (mode & PERF_CGROUP_SWIN) {
+ WARN_ON_ONCE(cpuctx->cgrp);
+- /* set cgrp before ctxsw in to
+- * allow event_filter_match() to not
+- * have to pass task around
++ /*
++ * set cgrp before ctxsw in to allow
++ * event_filter_match() to not have to pass
++ * task around
+ */
+ cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+@@ -473,7 +476,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
+ event->cgrp = cgrp;
+
+ /* must be done before we fput() the file */
+- perf_get_cgroup(event);
++ if (!perf_tryget_cgroup(event)) {
++ event->cgrp = NULL;
++ ret = -ENOENT;
++ goto out;
++ }
+
+ /*
+ * all events in a group must monitor
+@@ -4377,7 +4384,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_task_ctx(&cpuctx->ctx, task_event);
+
+@@ -4523,7 +4530,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+
+@@ -4719,7 +4726,7 @@ got_name:
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+ vma->vm_flags & VM_EXEC);
+@@ -5741,8 +5748,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
+
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+- if (cpuctx->active_pmu == old_pmu)
+- cpuctx->active_pmu = pmu;
++ if (cpuctx->unique_pmu == old_pmu)
++ cpuctx->unique_pmu = pmu;
+ }
+ }
+
+@@ -5877,7 +5884,7 @@ skip_type:
+ cpuctx->ctx.pmu = pmu;
+ cpuctx->jiffies_interval = 1;
+ INIT_LIST_HEAD(&cpuctx->rotation_list);
+- cpuctx->active_pmu = pmu;
++ cpuctx->unique_pmu = pmu;
+ }
+
+ got_cpu_context:
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 59474c5..c261da7 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4890,11 +4890,15 @@ static void task_fork_fair(struct task_struct *p)
+
+ update_rq_clock(rq);
+
+- if (unlikely(task_cpu(p) != this_cpu)) {
+- rcu_read_lock();
+- __set_task_cpu(p, this_cpu);
+- rcu_read_unlock();
+- }
++ /*
++ * Not only the cpu but also the task_group of the parent might have
++ * been changed after parent->se.parent,cfs_rq were copied to
++ * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
++ * of child point to valid ones.
++ */
++ rcu_read_lock();
++ __set_task_cpu(p, this_cpu);
++ rcu_read_unlock();
+
+ update_curr(cfs_rq);
+
+diff --git a/lib/show_mem.c b/lib/show_mem.c
+index 4407f8c..b7c7231 100644
+--- a/lib/show_mem.c
++++ b/lib/show_mem.c
+@@ -18,6 +18,9 @@ void show_mem(unsigned int filter)
+ printk("Mem-Info:\n");
+ show_free_areas(filter);
+
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
++
+ for_each_online_pgdat(pgdat) {
+ unsigned long i, flags;
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index d80ac4b..ed0ed8a 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1882,6 +1882,8 @@ static void collapse_huge_page(struct mm_struct *mm,
+ goto out;
+
+ vma = find_vma(mm, address);
++ if (!vma)
++ goto out;
+ hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ hend = vma->vm_end & HPAGE_PMD_MASK;
+ if (address < hstart || address + HPAGE_PMD_SIZE > hend)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d027a24..204de6a 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4385,7 +4385,13 @@ static int compare_thresholds(const void *a, const void *b)
+ const struct mem_cgroup_threshold *_a = a;
+ const struct mem_cgroup_threshold *_b = b;
+
+- return _a->threshold - _b->threshold;
++ if (_a->threshold > _b->threshold)
++ return 1;
++
++ if (_a->threshold < _b->threshold)
++ return -1;
++
++ return 0;
+ }
+
+ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index b5afea2..d8762b2 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1760,6 +1760,13 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
+ return;
+
+ /*
++ * Walking all memory to count page types is very expensive and should
++ * be inhibited in non-blockable contexts.
++ */
++ if (!(gfp_mask & __GFP_WAIT))
++ filter |= SHOW_MEM_FILTER_PAGE_COUNT;
++
++ /*
+ * This documents exceptions given to allocations in certain
+ * contexts that are allowed to allocate outside current's set
+ * of allowed nodes.
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index b81500c..a06deca 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1155,7 +1155,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ if (!mld2q->mld2q_nsrcs)
+ group = &mld2q->mld2q_mca;
+- max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
++
++ max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
+ }
+
+ if (!group)
+diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
+index dd147d7..8ac946f 100644
+--- a/net/bridge/br_stp.c
++++ b/net/bridge/br_stp.c
+@@ -189,7 +189,7 @@ static void br_record_config_information(struct net_bridge_port *p,
+ p->designated_age = jiffies + bpdu->message_age;
+
+ mod_timer(&p->message_age_timer, jiffies
+- + (p->br->max_age - bpdu->message_age));
++ + (bpdu->max_age - bpdu->message_age));
+ }
+
+ /* called under bridge lock */
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index 5cf5222..84efbe4 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -288,9 +288,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
+
+ count = cfctrl_cancel_req(&cfctrl->serv.layer,
+ user_layer);
+- if (count != 1)
++ if (count != 1) {
+ pr_err("Could not remove request (%d)", count);
+ return -ENODEV;
++ }
+ }
+ return 0;
+ }
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index f4f3f58..a70f426 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1719,6 +1719,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ dout("osdc_start_request failed map, "
+ " will retry %lld\n", req->r_tid);
+ rc = 0;
++ } else {
++ __unregister_request(osdc, req);
+ }
+ goto out_unlock;
+ }
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index db4bb7a..9649cea 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -923,15 +923,14 @@ EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+
+ void netpoll_cleanup(struct netpoll *np)
+ {
+- if (!np->dev)
+- return;
+-
+ rtnl_lock();
++ if (!np->dev)
++ goto out;
+ __netpoll_cleanup(np);
+- rtnl_unlock();
+-
+ dev_put(np->dev);
+ np->dev = NULL;
++out:
++ rtnl_unlock();
+ }
+ EXPORT_SYMBOL(netpoll_cleanup);
+
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 77a65f0..f0bdd36 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -19,6 +19,9 @@
+ #include <net/sock.h>
+ #include <net/net_ratelimit.h>
+
++static int zero = 0;
++static int ushort_max = USHRT_MAX;
++
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+@@ -192,7 +195,9 @@ static struct ctl_table netns_core_table[] = {
+ .data = &init_net.core.sysctl_somaxconn,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .extra1 = &zero,
++ .extra2 = &ushort_max,
++ .proc_handler = proc_dointvec_minmax
+ },
+ { }
+ };
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index cd2d639..c7c6724 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -72,7 +72,6 @@
+ #include <linux/init.h>
+ #include <linux/list.h>
+ #include <linux/slab.h>
+-#include <linux/prefetch.h>
+ #include <linux/export.h>
+ #include <net/net_namespace.h>
+ #include <net/ip.h>
+@@ -1773,10 +1772,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
+ if (!c)
+ continue;
+
+- if (IS_LEAF(c)) {
+- prefetch(rcu_dereference_rtnl(p->child[idx]));
++ if (IS_LEAF(c))
+ return (struct leaf *) c;
+- }
+
+ /* Rescan start scanning in new node */
+ p = (struct tnode *) c;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index c8989a7..75b0860 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -342,7 +342,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ pip->saddr = fl4.saddr;
+ pip->protocol = IPPROTO_IGMP;
+ pip->tot_len = 0; /* filled in later */
+- ip_select_ident(pip, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+ ((u8*)&pip[1])[0] = IPOPT_RA;
+ ((u8*)&pip[1])[1] = 4;
+ ((u8*)&pip[1])[2] = 0;
+@@ -683,7 +683,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
+ iph->daddr = dst;
+ iph->saddr = fl4.saddr;
+ iph->protocol = IPPROTO_IGMP;
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+ ((u8*)&iph[1])[0] = IPOPT_RA;
+ ((u8*)&iph[1])[1] = 4;
+ ((u8*)&iph[1])[2] = 0;
+@@ -705,7 +705,7 @@ static void igmp_gq_timer_expire(unsigned long data)
+
+ in_dev->mr_gq_running = 0;
+ igmpv3_send_report(in_dev, NULL);
+- __in_dev_put(in_dev);
++ in_dev_put(in_dev);
+ }
+
+ static void igmp_ifc_timer_expire(unsigned long data)
+@@ -717,7 +717,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
+ in_dev->mr_ifc_count--;
+ igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
+ }
+- __in_dev_put(in_dev);
++ in_dev_put(in_dev);
+ }
+
+ static void igmp_ifc_event(struct in_device *in_dev)
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 86f13c67..58c4e696 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/net.h>
++#include <linux/workqueue.h>
+ #include <net/ip.h>
+ #include <net/inetpeer.h>
+ #include <net/secure_seq.h>
+@@ -31,8 +32,8 @@
+ * At the moment of writing this notes identifier of IP packets is generated
+ * to be unpredictable using this code only for packets subjected
+ * (actually or potentially) to defragmentation. I.e. DF packets less than
+- * PMTU in size uses a constant ID and do not use this code (see
+- * ip_select_ident() in include/net/ip.h).
++ * PMTU in size when local fragmentation is disabled use a constant ID and do
++ * not use this code (see ip_select_ident() in include/net/ip.h).
+ *
+ * Route cache entries hold references to our nodes.
+ * New cache entries get references via lookup by destination IP address in
+@@ -66,6 +67,11 @@
+
+ static struct kmem_cache *peer_cachep __read_mostly;
+
++static LIST_HEAD(gc_list);
++static const int gc_delay = 60 * HZ;
++static struct delayed_work gc_work;
++static DEFINE_SPINLOCK(gc_lock);
++
+ #define node_height(x) x->avl_height
+
+ #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
+ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
+ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+
++static void inetpeer_gc_worker(struct work_struct *work)
++{
++ struct inet_peer *p, *n;
++ LIST_HEAD(list);
++
++ spin_lock_bh(&gc_lock);
++ list_replace_init(&gc_list, &list);
++ spin_unlock_bh(&gc_lock);
++
++ if (list_empty(&list))
++ return;
++
++ list_for_each_entry_safe(p, n, &list, gc_list) {
++
++ if(need_resched())
++ cond_resched();
++
++ if (p->avl_left != peer_avl_empty) {
++ list_add_tail(&p->avl_left->gc_list, &list);
++ p->avl_left = peer_avl_empty;
++ }
++
++ if (p->avl_right != peer_avl_empty) {
++ list_add_tail(&p->avl_right->gc_list, &list);
++ p->avl_right = peer_avl_empty;
++ }
++
++ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
++
++ if (!atomic_read(&p->refcnt)) {
++ list_del(&p->gc_list);
++ kmem_cache_free(peer_cachep, p);
++ }
++ }
++
++ if (list_empty(&list))
++ return;
++
++ spin_lock_bh(&gc_lock);
++ list_splice(&list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
+
+ /* Called from ip_output.c:ip_init */
+ void __init inet_initpeers(void)
+@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+ NULL);
+
++ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
+ }
+
+ static int addr_compare(const struct inetpeer_addr *a,
+@@ -448,7 +499,7 @@ relookup:
+ p->pmtu_expires = 0;
+ p->pmtu_orig = 0;
+ memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
+-
++ INIT_LIST_HEAD(&p->gc_list);
+
+ /* Link the node. */
+ link_to_pool(p, base);
+@@ -508,3 +559,38 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+ return rc;
+ }
+ EXPORT_SYMBOL(inet_peer_xrlim_allow);
++
++static void inetpeer_inval_rcu(struct rcu_head *head)
++{
++ struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
++
++ spin_lock_bh(&gc_lock);
++ list_add_tail(&p->gc_list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
++
++void inetpeer_invalidate_tree(int family)
++{
++ struct inet_peer *old, *new, *prev;
++ struct inet_peer_base *base = family_to_base(family);
++
++ write_seqlock_bh(&base->lock);
++
++ old = base->root;
++ if (old == peer_avl_empty_rcu)
++ goto out;
++
++ new = peer_avl_empty_rcu;
++
++ prev = cmpxchg(&base->root, old, new);
++ if (prev == old) {
++ base->total = 0;
++ call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
++ }
++
++out:
++ write_sequnlock_bh(&base->lock);
++}
++EXPORT_SYMBOL(inetpeer_invalidate_tree);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 0bc95f3..daf408e 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -162,7 +162,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
+ iph->saddr = saddr;
+ iph->protocol = sk->sk_protocol;
+- ip_select_ident(iph, &rt->dst, sk);
++ ip_select_ident(skb, &rt->dst, sk);
+
+ if (opt && opt->opt.optlen) {
+ iph->ihl += opt->opt.optlen>>2;
+@@ -390,7 +390,7 @@ packet_routed:
+ ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
+ }
+
+- ip_select_ident_more(iph, &rt->dst, sk,
++ ip_select_ident_more(skb, &rt->dst, sk,
+ (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+
+ skb->priority = sk->sk_priority;
+@@ -1334,7 +1334,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ iph->ihl = 5;
+ iph->tos = inet->tos;
+ iph->frag_off = df;
+- ip_select_ident(iph, &rt->dst, sk);
++ ip_select_ident(skb, &rt->dst, sk);
+ iph->ttl = ttl;
+ iph->protocol = sk->sk_protocol;
+ iph->saddr = fl4->saddr;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 0064394..b5e64e4 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1576,7 +1576,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+ iph->protocol = IPPROTO_IPIP;
+ iph->ihl = 5;
+ iph->tot_len = htons(skb->len);
+- ip_select_ident(iph, skb_dst(skb), NULL);
++ ip_select_ident(skb, skb_dst(skb), NULL);
+ ip_send_check(iph);
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index e1d4f30..2815014 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -380,7 +380,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ iph->check = 0;
+ iph->tot_len = htons(length);
+ if (!iph->id)
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 94cdbc5..c45a155a3 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -939,6 +939,7 @@ static void rt_cache_invalidate(struct net *net)
+ get_random_bytes(&shuffle, sizeof(shuffle));
+ atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
++ inetpeer_invalidate_tree(AF_INET);
+ }
+
+ /*
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index f376b05..b78eac2 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -204,8 +204,8 @@ static u32 cubic_root(u64 a)
+ */
+ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+ {
+- u64 offs;
+- u32 delta, t, bic_target, max_cnt;
++ u32 delta, bic_target, max_cnt;
++ u64 offs, t;
+
+ ca->ack_cnt++; /* count the number of ACKs */
+
+@@ -248,9 +248,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+ * if the cwnd < 1 million packets !!!
+ */
+
++ t = (s32)(tcp_time_stamp - ca->epoch_start);
++ t += msecs_to_jiffies(ca->delay_min >> 3);
+ /* change the unit from HZ to bictcp_HZ */
+- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
+- - ca->epoch_start) << BICTCP_HZ) / HZ;
++ t <<= BICTCP_HZ;
++ do_div(t, HZ);
+
+ if (t < ca->bic_K) /* t - K */
+ offs = ca->bic_K - t;
+@@ -412,7 +414,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+ return;
+
+ /* Discard delay samples right after fast recovery */
+- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
++ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ return;
+
+ delay = (rtt_us << 3) / USEC_PER_MSEC;
+diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
+index ed4bf11..938553e 100644
+--- a/net/ipv4/xfrm4_mode_tunnel.c
++++ b/net/ipv4/xfrm4_mode_tunnel.c
+@@ -54,7 +54,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+ 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
+- ip_select_ident(top_iph, dst->child, NULL);
++ ip_select_ident(skb, dst->child, NULL);
+
+ top_iph->ttl = ip4_dst_hoplimit(dst->child);
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 314bda2..5d41293 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -913,12 +913,10 @@ retry:
+ if (ifp->flags & IFA_F_OPTIMISTIC)
+ addr_flags |= IFA_F_OPTIMISTIC;
+
+- ift = !max_addresses ||
+- ipv6_count_addresses(idev) < max_addresses ?
+- ipv6_add_addr(idev, &addr, tmp_plen,
+- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+- addr_flags) : NULL;
+- if (!ift || IS_ERR(ift)) {
++ ift = ipv6_add_addr(idev, &addr, tmp_plen,
++ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
++ addr_flags);
++ if (IS_ERR(ift)) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ printk(KERN_INFO
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 90868fb..d505453 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -911,6 +911,14 @@ static const struct icmp6_err {
+ .err = ECONNREFUSED,
+ .fatal = 1,
+ },
++ { /* POLICY_FAIL */
++ .err = EACCES,
++ .fatal = 1,
++ },
++ { /* REJECT_ROUTE */
++ .err = EACCES,
++ .fatal = 1,
++ },
+ };
+
+ int icmpv6_err_convert(u8 type, u8 code, int *err)
+@@ -922,7 +930,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ fatal = 1;
+- if (code <= ICMPV6_PORT_UNREACH) {
++ if (code < ARRAY_SIZE(tab_unreach)) {
+ *err = tab_unreach[code].err;
+ fatal = tab_unreach[code].fatal;
+ }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 93718f3..443724f 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -862,14 +862,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
+
+ if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
+ #ifdef CONFIG_IPV6_SUBTREES
+- if (fn->subtree)
+- fn = fib6_lookup_1(fn->subtree, args + 1);
++ if (fn->subtree) {
++ struct fib6_node *sfn;
++ sfn = fib6_lookup_1(fn->subtree,
++ args + 1);
++ if (!sfn)
++ goto backtrack;
++ fn = sfn;
++ }
+ #endif
+- if (!fn || fn->fn_flags & RTN_RTINFO)
++ if (fn->fn_flags & RTN_RTINFO)
+ return fn;
+ }
+ }
+-
++#ifdef CONFIG_IPV6_SUBTREES
++backtrack:
++#endif
+ if (fn->fn_flags & RTN_ROOT)
+ break;
+
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index db60043..91d0711 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1125,6 +1125,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ * udp datagram
+ */
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
++ struct frag_hdr fhdr;
++
+ skb = sock_alloc_send_skb(sk,
+ hh_len + fragheaderlen + transhdrlen + 20,
+ (flags & MSG_DONTWAIT), &err);
+@@ -1145,12 +1147,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+- }
+-
+- err = skb_append_datato_frags(sk,skb, getfrag, from,
+- (length - transhdrlen));
+- if (!err) {
+- struct frag_hdr fhdr;
+
+ /* Specify the length of each IPv6 datagram fragment.
+ * It has to be a multiple of 8.
+@@ -1161,15 +1157,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ ipv6_select_ident(&fhdr, rt);
+ skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+-
+- return 0;
+ }
+- /* There is not enough support do UPD LSO,
+- * so follow normal path
+- */
+- kfree_skb(skb);
+
+- return err;
++ return skb_append_datato_frags(sk, skb, getfrag, from,
++ (length - transhdrlen));
+ }
+
+ static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
+@@ -1342,27 +1333,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ * --yoshfuji
+ */
+
+- cork->length += length;
+- if (length > mtu) {
+- int proto = sk->sk_protocol;
+- if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
+- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
+- return -EMSGSIZE;
+- }
+-
+- if (proto == IPPROTO_UDP &&
+- (rt->dst.dev->features & NETIF_F_UFO)) {
++ if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
++ sk->sk_protocol == IPPROTO_RAW)) {
++ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
++ return -EMSGSIZE;
++ }
+
+- err = ip6_ufo_append_data(sk, getfrag, from, length,
+- hh_len, fragheaderlen,
+- transhdrlen, mtu, flags, rt);
+- if (err)
+- goto error;
+- return 0;
+- }
++ skb = skb_peek_tail(&sk->sk_write_queue);
++ cork->length += length;
++ if (((length > mtu) ||
++ (skb && skb_is_gso(skb))) &&
++ (sk->sk_protocol == IPPROTO_UDP) &&
++ (rt->dst.dev->features & NETIF_F_UFO)) {
++ err = ip6_ufo_append_data(sk, getfrag, from, length,
++ hh_len, fragheaderlen,
++ transhdrlen, mtu, flags, rt);
++ if (err)
++ goto error;
++ return 0;
+ }
+
+- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
++ if (!skb)
+ goto alloc_new_skb;
+
+ while (length > 0) {
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index c7ec4bb..d20a9be 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2159,7 +2159,7 @@ static void mld_gq_timer_expire(unsigned long data)
+
+ idev->mc_gq_running = 0;
+ mld_send_report(idev, NULL);
+- __in6_dev_put(idev);
++ in6_dev_put(idev);
+ }
+
+ static void mld_ifc_timer_expire(unsigned long data)
+@@ -2172,7 +2172,7 @@ static void mld_ifc_timer_expire(unsigned long data)
+ if (idev->mc_ifc_count)
+ mld_ifc_start_timer(idev, idev->mc_maxdelay);
+ }
+- __in6_dev_put(idev);
++ in6_dev_put(idev);
+ }
+
+ static void mld_ifc_event(struct inet6_dev *idev)
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 9ffc37f..bc55358 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -447,7 +447,6 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
+ struct sk_buff *skb;
+ struct icmp6hdr *hdr;
+ int len;
+- int err;
+ u8 *opt;
+
+ if (!dev->addr_len)
+@@ -457,14 +456,12 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
+ if (llinfo)
+ len += ndisc_opt_addr_space(dev);
+
+- skb = sock_alloc_send_skb(sk,
+- (MAX_HEADER + sizeof(struct ipv6hdr) +
+- len + LL_ALLOCATED_SPACE(dev)),
+- 1, &err);
++ skb = alloc_skb((MAX_HEADER + sizeof(struct ipv6hdr) +
++ len + LL_ALLOCATED_SPACE(dev)), GFP_ATOMIC);
+ if (!skb) {
+ ND_PRINTK0(KERN_ERR
+- "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
+- __func__, err);
++ "ICMPv6 ND: %s() failed to allocate an skb.\n",
++ __func__);
+ return NULL;
+ }
+
+@@ -492,6 +489,11 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
+ csum_partial(hdr,
+ len, 0));
+
++ /* Manually assign socket ownership as we avoid calling
++ * sock_alloc_send_pskb() to bypass wmem buffer limits
++ */
++ skb_set_owner_w(skb, sk);
++
+ return skb;
+ }
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 411fe2c..eba5deb 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -517,6 +517,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ head->tstamp = fq->q.stamp;
+ ipv6_hdr(head)->payload_len = htons(payload_len);
+ IP6CB(head)->nhoff = nhoff;
++ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+
+ /* Yes, and fold redundant checksum back. 8) */
+ if (head->ip_summed == CHECKSUM_COMPLETE)
+@@ -552,6 +553,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct net *net = dev_net(skb_dst(skb)->dev);
+
++ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
++ goto fail_hdr;
++
+ IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
+
+ /* Jumbo payload inhibits frag. header */
+@@ -572,6 +576,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
+
+ IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
++ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ return 1;
+ }
+
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index aa2d720..38c0813 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -853,7 +853,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ iph->daddr = cp->daddr.ip;
+ iph->saddr = saddr;
+ iph->ttl = old_iph->ttl;
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+
+ /* Another hack: avoid icmp_send in ip_fragment */
+ skb->local_df = 1;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index f08b9166..caa5aff 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -86,7 +86,7 @@ struct htb_class {
+ unsigned int children;
+ struct htb_class *parent; /* parent class */
+
+- int prio; /* these two are used only by leaves... */
++ u32 prio; /* these two are used only by leaves... */
+ int quantum; /* but stored for parent-to-leaf return */
+
+ union {
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 8104278..0b6a391 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -205,45 +205,24 @@ out:
+ in6_dev_put(idev);
+ }
+
+-/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
+ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
+ {
+ struct sock *sk = skb->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+- struct flowi6 fl6;
+-
+- memset(&fl6, 0, sizeof(fl6));
+-
+- fl6.flowi6_proto = sk->sk_protocol;
+-
+- /* Fill in the dest address from the route entry passed with the skb
+- * and the source address from the transport.
+- */
+- ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
+- ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+-
+- fl6.flowlabel = np->flow_label;
+- IP6_ECN_flow_xmit(sk, fl6.flowlabel);
+- if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
+- fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
+- else
+- fl6.flowi6_oif = sk->sk_bound_dev_if;
+-
+- if (np->opt && np->opt->srcrt) {
+- struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
+- ipv6_addr_copy(&fl6.daddr, rt0->addr);
+- }
++ struct flowi6 *fl6 = &transport->fl.u.ip6;
+
+ SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
+ __func__, skb, skb->len,
+- &fl6.saddr, &fl6.daddr);
++ &fl6->saddr, &fl6->daddr);
+
+- SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
++ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+
+ if (!(transport->param_flags & SPP_PMTUD_ENABLE))
+ skb->local_df = 1;
+
+- return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++ SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
++
++ return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+ }
+
+ /* Returns the dst cache entry for the given source and destination ip
+@@ -256,10 +235,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ struct dst_entry *dst = NULL;
+ struct flowi6 *fl6 = &fl->u.ip6;
+ struct sctp_bind_addr *bp;
++ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sctp_sockaddr_entry *laddr;
+ union sctp_addr *baddr = NULL;
+ union sctp_addr *daddr = &t->ipaddr;
+ union sctp_addr dst_saddr;
++ struct in6_addr *final_p, final;
+ __u8 matchlen = 0;
+ __u8 bmatchlen;
+ sctp_scope_t scope;
+@@ -282,7 +263,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
+ }
+
+- dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
++ final_p = fl6_update_dst(fl6, np->opt, &final);
++ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ if (!asoc || saddr)
+ goto out;
+
+@@ -333,10 +315,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ }
+ }
+ rcu_read_unlock();
++
+ if (baddr) {
+ ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+ fl6->fl6_sport = baddr->v6.sin6_port;
+- dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
++ final_p = fl6_update_dst(fl6, np->opt, &final);
++ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ }
+
+ out:
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 9032d50..76388b0 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1604,9 +1604,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
+ asoc->outqueue.outstanding_bytes;
+ sackh.num_gap_ack_blocks = 0;
+ sackh.num_dup_tsns = 0;
+- chunk->subh.sack_hdr = &sackh;
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
+- SCTP_CHUNK(chunk));
++ SCTP_SACKH(&sackh));
+ break;
+
+ case SCTP_CMD_DISCARD_PACKET:
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ba0108f..c53d01e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -814,6 +814,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
+ goto skip_mkasconf;
+ }
+
++ if (laddr == NULL)
++ return -EINVAL;
++
+ /* We do not need RCU protection throughout this loop
+ * because this is done under a socket lock from the
+ * setsockopt call.
+diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
+index e728d4c..a224a38 100644
+--- a/net/tipc/eth_media.c
++++ b/net/tipc/eth_media.c
+@@ -53,6 +53,7 @@ struct eth_bearer {
+ struct tipc_bearer *bearer;
+ struct net_device *dev;
+ struct packet_type tipc_packet_type;
++ struct work_struct setup;
+ };
+
+ static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+@@ -121,6 +122,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
+ }
+
+ /**
++ * setup_bearer - setup association between Ethernet bearer and interface
++ */
++static void setup_bearer(struct work_struct *work)
++{
++ struct eth_bearer *eb_ptr =
++ container_of(work, struct eth_bearer, setup);
++
++ dev_add_pack(&eb_ptr->tipc_packet_type);
++}
++
++/**
+ * enable_bearer - attach TIPC bearer to an Ethernet interface
+ */
+
+@@ -164,7 +176,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
+ eb_ptr->tipc_packet_type.func = recv_msg;
+ eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+ INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+- dev_add_pack(&eb_ptr->tipc_packet_type);
++ INIT_WORK(&eb_ptr->setup, setup_bearer);
++ schedule_work(&eb_ptr->setup);
+
+ /* Associate TIPC bearer with Ethernet bearer */
+
+diff --git a/scripts/kernel-doc b/scripts/kernel-doc
+index d793001..ba3d9df 100755
+--- a/scripts/kernel-doc
++++ b/scripts/kernel-doc
+@@ -2044,6 +2044,9 @@ sub process_file($) {
+
+ $section_counter = 0;
+ while (<IN>) {
++ while (s/\\\s*$//) {
++ $_ .= <IN>;
++ }
+ if ($state == 0) {
+ if (/$doc_start/o) {
+ $state = 1; # next line is always the function name
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a166a85..7ebe4b7 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2621,6 +2621,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
+ {}
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 55d9b30..05f097a 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -512,6 +512,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
+ }
+ }
+
++ if (!ca) {
++ /* if there was no match, select the regular ALSA channel
++ * allocation with the matching number of channels */
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ if (channels == channel_allocations[i].channels) {
++ ca = channel_allocations[i].ca_index;
++ break;
++ }
++ }
++ }
++
+ snd_print_channel_allocation(eld->spk_alloc, buf, sizeof(buf));
+ snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
+ ca, channels, buf);
+diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
+index 5ca122e..290f4d3 100644
+--- a/sound/soc/codecs/88pm860x-codec.c
++++ b/sound/soc/codecs/88pm860x-codec.c
+@@ -351,6 +351,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol,
+ val = ucontrol->value.integer.value[0];
+ val2 = ucontrol->value.integer.value[1];
+
++ if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
++ return -EINVAL;
++
+ err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
+ if (err < 0)
+ return err;
+diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
+index 26d7b08..a52c15b 100644
+--- a/sound/soc/codecs/max98095.c
++++ b/sound/soc/codecs/max98095.c
+@@ -1861,7 +1861,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
+ struct max98095_pdata *pdata = max98095->pdata;
+ int channel = max98095_get_eq_channel(kcontrol->id.name);
+ struct max98095_cdata *cdata;
+- int sel = ucontrol->value.integer.value[0];
++ unsigned int sel = ucontrol->value.integer.value[0];
+ struct max98095_eq_cfg *coef_set;
+ int fs, best, best_val, i;
+ int regmask, regsave;
+@@ -2014,7 +2014,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
+ struct max98095_pdata *pdata = max98095->pdata;
+ int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
+ struct max98095_cdata *cdata;
+- int sel = ucontrol->value.integer.value[0];
++ unsigned int sel = ucontrol->value.integer.value[0];
+ struct max98095_biquad_cfg *coef_set;
+ int fs, best, best_val, i;
+ int regmask, regsave;
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 2df253c..ef96ca6 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -805,9 +805,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+ if (pll_div.k) {
+ reg |= 0x20;
+
+- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
+- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
+- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
+ }
+ snd_soc_write(codec, WM8960_PLL1, reg);
+
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index 42dffa0..f7a7b9d 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -16,6 +16,7 @@ const char *map_type__name[MAP__NR_TYPES] = {
+ static inline int is_anon_memory(const char *filename)
+ {
+ return !strcmp(filename, "//anon") ||
++ !strcmp(filename, "/dev/zero (deleted)") ||
+ !strcmp(filename, "/anon_hugepage (deleted)");
+ }
+
diff --git a/3.2.54/1052_linux-3.2.53.patch b/3.2.54/1052_linux-3.2.53.patch
new file mode 100644
index 0000000..986d714
--- /dev/null
+++ b/3.2.54/1052_linux-3.2.53.patch
@@ -0,0 +1,3357 @@
+diff --git a/Makefile b/Makefile
+index 1dd2c09..90f57dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
+index 1881b31..b19559c 100644
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -22,7 +22,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\tnop\n\t"
++ asm_volatile_goto("1:\tnop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index 37aabd7..d2d5825 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -195,6 +195,8 @@ common_stext:
+ ldw MEM_PDC_HI(%r0),%r6
+ depd %r6, 31, 32, %r3 /* move to upper word */
+
++ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
++
+ ldo PDC_PSW(%r0),%arg0 /* 21 */
+ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
+@@ -203,6 +205,8 @@ common_stext:
+ copy %r0,%arg3
+
+ stext_pdc_ret:
++ mtctl %r6,%cr30 /* restore task thread info */
++
+ /* restore rfi target address*/
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index f19e660..cd8b02f 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -811,14 +811,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ else {
+
+ /*
+- * The kernel should never fault on its own address space.
++ * The kernel should never fault on its own address space,
++ * unless pagefault_disable() was called before.
+ */
+
+- if (fault_space == 0)
++ if (fault_space == 0 && !in_atomic())
+ {
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ parisc_terminate("Kernel Fault", regs, code, fault_address);
+-
+ }
+ }
+
+diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
+index 938986e..ee33888 100644
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -19,7 +19,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 5e8dc08..e3b3cf9 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -922,7 +922,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+- std r8, VCPU_DSCR(r7)
++ std r8, VCPU_DSCR(r9)
+ mtspr SPRN_DSCR, r7
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
+index 95a6cf2..8512d0a 100644
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -15,7 +15,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("0: brcl 0,0\n"
++ asm_volatile_goto("0: brcl 0,0\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ASM_ALIGN "\n"
+ ASM_PTR " 0b, %l[label], %0\n"
+diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
+index fc73a82..e17b65b 100644
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -9,7 +9,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
+index 63294f5..4f7ae39 100644
+--- a/arch/tile/include/asm/percpu.h
++++ b/arch/tile/include/asm/percpu.h
+@@ -15,9 +15,37 @@
+ #ifndef _ASM_TILE_PERCPU_H
+ #define _ASM_TILE_PERCPU_H
+
+-register unsigned long __my_cpu_offset __asm__("tp");
+-#define __my_cpu_offset __my_cpu_offset
+-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
++register unsigned long my_cpu_offset_reg asm("tp");
++
++#ifdef CONFIG_PREEMPT
++/*
++ * For full preemption, we can't just use the register variable
++ * directly, since we need barrier() to hazard against it, causing the
++ * compiler to reload anything computed from a previous "tp" value.
++ * But we also don't want to use volatile asm, since we'd like the
++ * compiler to be able to cache the value across multiple percpu reads.
++ * So we use a fake stack read as a hazard against barrier().
++ * The 'U' constraint is like 'm' but disallows postincrement.
++ */
++static inline unsigned long __my_cpu_offset(void)
++{
++ unsigned long tp;
++ register unsigned long *sp asm("sp");
++ asm("move %0, tp" : "=r" (tp) : "U" (*sp));
++ return tp;
++}
++#define __my_cpu_offset __my_cpu_offset()
++#else
++/*
++ * We don't need to hazard against barrier() since "tp" doesn't ever
++ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
++ * changes at function call points, at which we are already re-reading
++ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
++ */
++#define __my_cpu_offset my_cpu_offset_reg
++#endif
++
++#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
+
+ #include <asm-generic/percpu.h>
+
+diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
+index 829df49..41ebbfe 100644
+--- a/arch/um/kernel/exitcode.c
++++ b/arch/um/kernel/exitcode.c
+@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+ {
+ char *end, buf[sizeof("nnnnn\0")];
++ size_t size;
+ int tmp;
+
+- if (copy_from_user(buf, buffer, count))
++ size = min(count, sizeof(buf));
++ if (copy_from_user(buf, buffer, size))
+ return -EFAULT;
+
+ tmp = simple_strtol(buf, &end, 0);
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 0c3b775..a315f1c 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -334,7 +334,7 @@ extern const char * const x86_power_flags[32];
+ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ {
+ #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+- asm goto("1: jmp %l[t_no]\n"
++ asm_volatile_goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
+index a32b18c..e12c1bc 100644
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:"
++ asm_volatile_goto("1:"
+ JUMP_LABEL_INITIAL_NOP
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
+index f2220b5..cf3e9cb 100644
+--- a/arch/xtensa/kernel/signal.c
++++ b/arch/xtensa/kernel/signal.c
+@@ -346,7 +346,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ sp = regs->areg[1];
+
+- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
++ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index aea627e..7d1a478 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1286,14 +1286,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
+ * should be retried. To be used from EH.
+ *
+ * SCSI midlayer limits the number of retries to scmd->allowed.
+- * scmd->retries is decremented for commands which get retried
++ * scmd->allowed is incremented for commands which get retried
+ * due to unrelated failures (qc->err_mask is zero).
+ */
+ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+ {
+ struct scsi_cmnd *scmd = qc->scsicmd;
+- if (!qc->err_mask && scmd->retries)
+- scmd->retries--;
++ if (!qc->err_mask)
++ scmd->allowed++;
+ __ata_eh_qc_complete(qc);
+ }
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index b651733..c244f0e 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -668,7 +668,7 @@ static void set_timer_rand_state(unsigned int irq,
+ */
+ void add_device_randomness(const void *buf, unsigned int size)
+ {
+- unsigned long time = get_cycles() ^ jiffies;
++ unsigned long time = random_get_entropy() ^ jiffies;
+
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+@@ -705,7 +705,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ goto out;
+
+ sample.jiffies = jiffies;
+- sample.cycles = get_cycles();
++ sample.cycles = random_get_entropy();
+ sample.num = num;
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+@@ -772,7 +772,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+- __u32 input[4], cycles = get_cycles();
++ __u32 input[4], cycles = random_get_entropy();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+@@ -1480,12 +1480,11 @@ ctl_table random_table[] = {
+
+ static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+-static int __init random_int_secret_init(void)
++int random_int_secret_init(void)
+ {
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
+ return 0;
+ }
+-late_initcall(random_int_secret_init);
+
+ /*
+ * Get a random word for internal kernel use only. Similar to urandom but
+@@ -1504,7 +1503,7 @@ unsigned int get_random_int(void)
+
+ hash = get_cpu_var(get_random_int_hash);
+
+- hash[0] += current->pid + jiffies + get_cycles();
++ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
+ put_cpu_var(get_random_int_hash);
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 46bbf43..66d5384 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -64,6 +64,7 @@ void proc_fork_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -79,6 +80,7 @@ void proc_fork_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ /* If cn_netlink_send() failed, the data is not sent */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+@@ -95,6 +97,7 @@ void proc_exec_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -105,6 +108,7 @@ void proc_exec_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -121,6 +125,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+ ev->event_data.id.process_pid = task->pid;
+ ev->event_data.id.process_tgid = task->tgid;
+@@ -144,6 +149,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -159,6 +165,7 @@ void proc_sid_connector(struct task_struct *task)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -169,6 +176,7 @@ void proc_sid_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -184,6 +192,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -202,6 +211,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -217,6 +227,7 @@ void proc_comm_connector(struct task_struct *task)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -228,6 +239,7 @@ void proc_comm_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -243,6 +255,7 @@ void proc_exit_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -255,6 +268,7 @@ void proc_exit_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -278,6 +292,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -287,6 +302,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = rcvd_ack + 1;
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index dde6a0f..ea6efe8 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
+ static void cn_rx_skb(struct sk_buff *__skb)
+ {
+ struct nlmsghdr *nlh;
+- int err;
+ struct sk_buff *skb;
++ int len, err;
+
+ skb = skb_get(__skb);
+
+ if (skb->len >= NLMSG_SPACE(0)) {
+ nlh = nlmsg_hdr(skb);
++ len = nlmsg_len(nlh);
+
+- if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
++ if (len < (int)sizeof(struct cn_msg) ||
+ skb->len < nlh->nlmsg_len ||
+- nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
++ len > CONNECTOR_MAX_MSG_SIZE) {
+ kfree_skb(skb);
+ return;
+ }
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 40c187c..acfe567 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -408,9 +408,16 @@ long drm_ioctl(struct file *filp,
+ asize = drv_size;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
++ u32 drv_size;
++
+ ioctl = &drm_ioctls[nr];
+- cmd = ioctl->cmd;
++
++ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
++ if (drv_size > asize)
++ asize = drv_size;
++
++ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 3171294..475a275 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1390,7 +1390,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+- (rdev->family != CHIP_RS880))
++ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index a68057a..5efba47 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1797,7 +1797,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+- rdev->config.evergreen.max_hw_contexts = 8;
++ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 30cac58..0b86d47 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -212,6 +212,7 @@ static int send_argument(const char *key)
+
+ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ {
++ u8 status, data = 0;
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+@@ -219,6 +220,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ return -EIO;
+ }
+
++ /* This has no effect on newer (2012) SMCs */
+ outb(len, APPLESMC_DATA_PORT);
+
+ for (i = 0; i < len; i++) {
+@@ -229,6 +231,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ buffer[i] = inb(APPLESMC_DATA_PORT);
+ }
+
++ /* Read the data port until bit0 is cleared */
++ for (i = 0; i < 16; i++) {
++ udelay(APPLESMC_MIN_WAIT);
++ status = inb(APPLESMC_CMD_PORT);
++ if (!(status & 0x01))
++ break;
++ data = inb(APPLESMC_DATA_PORT);
++ }
++ if (i)
++ pr_warn("flushed %d bytes, last value is: %d\n", i, data);
++
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 3ac4156..75c182b 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
+ return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
+ }
+
++static void skip_metadata(struct pstore *ps)
++{
++ uint32_t stride = ps->exceptions_per_area + 1;
++ chunk_t next_free = ps->next_free;
++ if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
++ ps->next_free++;
++}
++
+ /*
+ * Read or write a metadata area. Remembering to skip the first
+ * chunk which holds the header.
+@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
+
+ ps->current_area--;
+
++ skip_metadata(ps);
++
+ return 0;
+ }
+
+@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ struct dm_exception *e)
+ {
+ struct pstore *ps = get_info(store);
+- uint32_t stride;
+- chunk_t next_free;
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
+
+ /* Is there enough room ? */
+@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ * Move onto the next free pending, making sure to take
+ * into account the location of the metadata chunks.
+ */
+- stride = (ps->exceptions_per_area + 1);
+- next_free = ++ps->next_free;
+- if (sector_div(next_free, stride) == 1)
+- ps->next_free++;
++ ps->next_free++;
++ skip_metadata(ps);
+
+ atomic_inc(&ps->pending_count);
+ return 0;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index a319057..de87f82 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -658,14 +658,14 @@ static size_t can_get_size(const struct net_device *dev)
+ size_t size;
+
+ size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+- size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
++ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+- size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
+- size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
++ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
++ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+- size += sizeof(struct can_berr_counter);
++ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+- size += sizeof(struct can_bittiming_const);
++ size += nla_total_size(sizeof(struct can_bittiming_const));
+
+ return size;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 4c50ac0..bbb6692 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -516,6 +516,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
++ skb_record_rx_queue(skb, fp->index);
+ napi_gro_receive(&fp->napi, skb);
+ } else {
+ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 4236b82..4aa830f 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -1234,6 +1234,7 @@ static void cp_tx_timeout(struct net_device *dev)
+ cp_clean_rings(cp);
+ rc = cp_init_rings(cp);
+ cp_start_hw(cp);
++ cp_enable_irq(cp);
+
+ netif_wake_queue(dev);
+
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index fd8115e..10668eb 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -873,8 +873,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+- }
+- if (!netdev_mc_empty(ndev)) {
++ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index ebb9f24..7a4c491 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
+ }
+
+ i = port->index;
++ memset(&sync, 0, sizeof(sync));
+ sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
+ /* Lucky card and linux use same encoding here */
+ sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
+diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
+index 44b7071..c643d77 100644
+--- a/drivers/net/wan/wanxl.c
++++ b/drivers/net/wan/wanxl.c
+@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
++ memset(&line, 0, sizeof(line));
+ line.clock_type = get_status(port)->clocking;
+ line.clock_rate = 0;
+ line.loopback = 0;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
+index a97a52a..408477d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
+@@ -270,11 +270,6 @@ struct iwl_cfg iwl2000_2bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-struct iwl_cfg iwl2000_2bg_cfg = {
+- .name = "2000 Series 2x2 BG",
+- IWL_DEVICE_2000,
+-};
+-
+ struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ .name = "2000D Series 2x2 BGN",
+ IWL_DEVICE_2000,
+@@ -304,11 +299,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-struct iwl_cfg iwl2030_2bg_cfg = {
+- .name = "2000 Series 2x2 BG/BT",
+- IWL_DEVICE_2030,
+-};
+-
+ #define IWL_DEVICE_105 \
+ .fw_name_pre = IWL105_FW_PRE, \
+ .ucode_api_max = IWL105_UCODE_API_MAX, \
+@@ -326,11 +316,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
+
+-struct iwl_cfg iwl105_bg_cfg = {
+- .name = "105 Series 1x1 BG",
+- IWL_DEVICE_105,
+-};
+-
+ struct iwl_cfg iwl105_bgn_cfg = {
+ .name = "105 Series 1x1 BGN",
+ IWL_DEVICE_105,
+@@ -361,11 +346,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
+
+-struct iwl_cfg iwl135_bg_cfg = {
+- .name = "135 Series 1x1 BG/BT",
+- IWL_DEVICE_135,
+-};
+-
+ struct iwl_cfg iwl135_bgn_cfg = {
+ .name = "135 Series 1x1 BGN/BT",
+ IWL_DEVICE_135,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 4ac4ef0..e1a43c4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -411,6 +411,17 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
+ .ht_params = &iwl6000_ht_params,
+ };
+
++struct iwl_cfg iwl6005_2agn_mow1_cfg = {
++ .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
++ IWL_DEVICE_6005,
++ .ht_params = &iwl6000_ht_params,
++};
++struct iwl_cfg iwl6005_2agn_mow2_cfg = {
++ .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
++ IWL_DEVICE_6005,
++ .ht_params = &iwl6000_ht_params,
++};
++
+ #define IWL_DEVICE_6030 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+@@ -469,14 +480,10 @@ struct iwl_cfg iwl6035_2agn_cfg = {
+ .ht_params = &iwl6000_ht_params,
+ };
+
+-struct iwl_cfg iwl6035_2abg_cfg = {
+- .name = "6035 Series 2x2 ABG/BT",
+- IWL_DEVICE_6030,
+-};
+-
+-struct iwl_cfg iwl6035_2bg_cfg = {
+- .name = "6035 Series 2x2 BG/BT",
+- IWL_DEVICE_6030,
++struct iwl_cfg iwl6035_2agn_sff_cfg = {
++ .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
++ IWL_DEVICE_6035,
++ .ht_params = &iwl6000_ht_params,
+ };
+
+ struct iwl_cfg iwl1030_bgn_cfg = {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
+index 2a2dc45..e786497 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
++++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
+@@ -80,6 +80,8 @@ extern struct iwl_cfg iwl6005_2abg_cfg;
+ extern struct iwl_cfg iwl6005_2bg_cfg;
+ extern struct iwl_cfg iwl6005_2agn_sff_cfg;
+ extern struct iwl_cfg iwl6005_2agn_d_cfg;
++extern struct iwl_cfg iwl6005_2agn_mow1_cfg;
++extern struct iwl_cfg iwl6005_2agn_mow2_cfg;
+ extern struct iwl_cfg iwl1030_bgn_cfg;
+ extern struct iwl_cfg iwl1030_bg_cfg;
+ extern struct iwl_cfg iwl6030_2agn_cfg;
+@@ -101,17 +103,12 @@ extern struct iwl_cfg iwl100_bg_cfg;
+ extern struct iwl_cfg iwl130_bgn_cfg;
+ extern struct iwl_cfg iwl130_bg_cfg;
+ extern struct iwl_cfg iwl2000_2bgn_cfg;
+-extern struct iwl_cfg iwl2000_2bg_cfg;
+ extern struct iwl_cfg iwl2000_2bgn_d_cfg;
+ extern struct iwl_cfg iwl2030_2bgn_cfg;
+-extern struct iwl_cfg iwl2030_2bg_cfg;
+ extern struct iwl_cfg iwl6035_2agn_cfg;
+-extern struct iwl_cfg iwl6035_2abg_cfg;
+-extern struct iwl_cfg iwl6035_2bg_cfg;
+-extern struct iwl_cfg iwl105_bg_cfg;
++extern struct iwl_cfg iwl6035_2agn_sff_cfg;
+ extern struct iwl_cfg iwl105_bgn_cfg;
+ extern struct iwl_cfg iwl105_bgn_d_cfg;
+-extern struct iwl_cfg iwl135_bg_cfg;
+ extern struct iwl_cfg iwl135_bgn_cfg;
+
+ #endif /* __iwl_pci_h__ */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
+index 346dc9b..62a0f81 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
++++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
+@@ -236,13 +236,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+
+ /* 6x00 Series */
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+@@ -250,13 +253,19 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+- {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
++ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
++ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+
+ /* 6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+@@ -326,46 +335,33 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+
+ /* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+ /* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+
+ /* 105 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+
+ /* 135 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
+
+ {0}
+ };
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+index bc33b14..a7e1a2c 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus) {
+- p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
++ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
++ stats->rx_bufshift);
+ rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 9d7f172..093bf0a 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -88,6 +88,7 @@ struct xenvif {
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
++ u64 credit_window_start;
+
+ /* Statistics */
+ unsigned long rx_gso_checksum_fixup;
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 8eaf0e2..2cb9c92 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -272,8 +272,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+- /* Initialize 'expires' now: it's used to track the credit window. */
+- vif->credit_timeout.expires = jiffies;
++ vif->credit_window_start = get_jiffies_64();
+
+ dev->netdev_ops = &xenvif_netdev_ops;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index fd2b92d..9a4626c 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1365,9 +1365,8 @@ out:
+
+ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ {
+- unsigned long now = jiffies;
+- unsigned long next_credit =
+- vif->credit_timeout.expires +
++ u64 now = get_jiffies_64();
++ u64 next_credit = vif->credit_window_start +
+ msecs_to_jiffies(vif->credit_usec / 1000);
+
+ /* Timer could already be pending in rare cases. */
+@@ -1375,8 +1374,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ return true;
+
+ /* Passed the point where we can replenish credit? */
+- if (time_after_eq(now, next_credit)) {
+- vif->credit_timeout.expires = now;
++ if (time_after_eq64(now, next_credit)) {
++ vif->credit_window_start = now;
+ tx_add_credit(vif);
+ }
+
+@@ -1388,6 +1387,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ tx_credit_callback;
+ mod_timer(&vif->credit_timeout,
+ next_credit);
++ vif->credit_window_start = next_credit;
+
+ return true;
+ }
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 8b25f9c..41f08e5 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -188,7 +188,8 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ return ret;
+ }
+
+-static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
++static int _pci_assign_resource(struct pci_dev *dev, int resno,
++ resource_size_t size, resource_size_t min_align)
+ {
+ struct resource *res = dev->resource + resno;
+ struct pci_bus *bus;
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 705e13e..2e658d2 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
+ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+ {
+ struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
+ return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
+ }
+
+diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
+index 2fa658e..391b768 100644
+--- a/drivers/staging/bcm/Bcmchar.c
++++ b/drivers/staging/bcm/Bcmchar.c
+@@ -1932,6 +1932,7 @@ cntrlEnd:
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+
++ memset(&DevInfo, 0, sizeof(DevInfo));
+ DevInfo.MaxRDMBufferSize = BUFFER_4K;
+ DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
+ DevInfo.u32RxAlignmentCorrection = 0;
+diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
+index 260d4f0..b3d2e17 100644
+--- a/drivers/staging/wlags49_h2/wl_priv.c
++++ b/drivers/staging/wlags49_h2/wl_priv.c
+@@ -570,6 +570,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+ ltv_t *pLtv;
+ bool_t ltvAllocated = FALSE;
+ ENCSTRCT sEncryption;
++ size_t len;
+
+ #ifdef USE_WDS
+ hcf_16 hcfPort = HCF_PORT_0;
+@@ -686,7 +687,8 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+ break;
+ case CFG_CNF_OWN_NAME:
+ memset( lp->StationName, 0, sizeof( lp->StationName ));
+- memcpy( (void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
++ len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
++ strlcpy(lp->StationName, &pLtv->u.u8[2], len);
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ break;
+ case CFG_CNF_LOAD_BALANCING:
+@@ -1800,6 +1802,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
+ {
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
++ size_t len;
+ int ret = 0;
+ /*------------------------------------------------------------------------*/
+
+@@ -1810,8 +1813,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
+ wl_lock(lp, &flags);
+
+ memset( lp->StationName, 0, sizeof( lp->StationName ));
+-
+- memcpy( lp->StationName, extra, wrqu->data.length);
++ len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
++ strlcpy(lp->StationName, extra, len);
+
+ /* Commit the adapter parameters */
+ wl_apply( lp );
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index d197b3e..e1a4994 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -553,7 +553,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+- if (unlikely(start >= bound || end >= bound || start > end))
++ if (unlikely(start >= bound || end > bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 5c12137..e813227 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -129,10 +129,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+ * pSCSI Host ID and enable for phba mode
+ */
+ sh = scsi_host_lookup(phv->phv_host_id);
+- if (IS_ERR(sh)) {
++ if (!sh) {
+ pr_err("pSCSI: Unable to locate SCSI Host for"
+ " phv_host_id: %d\n", phv->phv_host_id);
+- return PTR_ERR(sh);
++ return -EINVAL;
+ }
+
+ phv->phv_lld_host = sh;
+@@ -564,10 +564,10 @@ static struct se_device *pscsi_create_virtdevice(
+ sh = phv->phv_lld_host;
+ } else {
+ sh = scsi_host_lookup(pdv->pdv_host_id);
+- if (IS_ERR(sh)) {
++ if (!sh) {
+ pr_err("pSCSI: Unable to locate"
+ " pdv_host_id: %d\n", pdv->pdv_host_id);
+- return ERR_CAST(sh);
++ return ERR_PTR(-EINVAL);
+ }
+ }
+ } else {
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index a783d53..af57648 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -650,16 +650,28 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+ int mi = uio_find_mem_index(vma);
++ struct uio_mem *mem;
+ if (mi < 0)
+ return -EINVAL;
++ mem = idev->info->mem + mi;
+
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++ if (vma->vm_end - vma->vm_start > mem->size)
++ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
++ /*
++ * We cannot use the vm_iomap_memory() helper here,
++ * because vma->vm_pgoff is the map index we looked
++ * up above in uio_find_mem_index(), rather than an
++ * actual page offset into the mmap.
++ *
++ * So we just do the physical mmap without a page
++ * offset.
++ */
+ return remap_pfn_range(vma,
+ vma->vm_start,
+- idev->info->mem[mi].addr >> PAGE_SHIFT,
++ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index f52182d..bcde6f6 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Alcor Micro Corp. Hub */
+ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* MicroTouch Systems touchscreen */
++ { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Broadcom BCM92035DGROM BT dongle */
+ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* MAYA44USB sound device */
++ { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Action Semiconductor flash disk */
+ { USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 24107a7..107e6b4 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1007,20 +1007,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ t1 = xhci_port_state_to_neutral(t1);
+ if (t1 != t2)
+ xhci_writel(xhci, t2, port_array[port_index]);
+-
+- if (hcd->speed != HCD_USB3) {
+- /* enable remote wake up for USB 2.0 */
+- __le32 __iomem *addr;
+- u32 tmp;
+-
+- /* Add one to the port status register address to get
+- * the port power control register address.
+- */
+- addr = port_array[port_index] + 1;
+- tmp = xhci_readl(xhci, addr);
+- tmp |= PORT_RWE;
+- xhci_writel(xhci, tmp, addr);
+- }
+ }
+ hcd->state = HC_STATE_SUSPENDED;
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+@@ -1099,20 +1085,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ xhci_ring_device(xhci, slot_id);
+ } else
+ xhci_writel(xhci, temp, port_array[port_index]);
+-
+- if (hcd->speed != HCD_USB3) {
+- /* disable remote wake up for USB 2.0 */
+- __le32 __iomem *addr;
+- u32 tmp;
+-
+- /* Add one to the port status register address to get
+- * the port power control register address.
+- */
+- addr = port_array[port_index] + 1;
+- tmp = xhci_readl(xhci, addr);
+- tmp &= ~PORT_RWE;
+- xhci_writel(xhci, tmp, addr);
+- }
+ }
+
+ (void) xhci_readl(xhci, &xhci->op_regs->command);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 61b0668..827f933 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -34,6 +34,9 @@
+ #define PCI_VENDOR_ID_ETRON 0x1b6f
+ #define PCI_DEVICE_ID_ASROCK_P67 0x7023
+
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
++
+ static const char hcd_name[] = "xhci_hcd";
+
+ /* called after powerup, by probe or system-pm "wakeup" */
+@@ -67,6 +70,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint\n");
+ }
++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
++ pdev->revision == 0x4) {
++ xhci->quirks |= XHCI_SLOW_SUSPEND;
++ xhci_dbg(xhci,
++ "QUIRK: Fresco Logic xHC revision %u"
++ "must be suspended extra slowly",
++ pdev->revision);
++ }
+ /* Fresco Logic confirms: all revisions of this chip do not
+ * support MSI, even though some of them claim to in their PCI
+ * capabilities.
+@@ -103,6 +114,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_AVOID_BEI;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
++ /* Workaround for occasional spurious wakeups from S5 (or
++ * any other sleep) on Haswell machines with LPT and LPT-LP
++ * with the new Intel BIOS
++ */
++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -202,6 +222,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ usb_put_hcd(xhci->shared_hcd);
+ }
+ usb_hcd_pci_remove(dev);
++
++ /* Workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ pci_set_power_state(dev, PCI_D3hot);
++
+ kfree(xhci);
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 629aa74..03c35da 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -763,12 +763,19 @@ void xhci_shutdown(struct usb_hcd *hcd)
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
++ /* Workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+ xhci_cleanup_msix(xhci);
+
+ xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
++
++ /* Yet another workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
+ }
+
+ #ifdef CONFIG_PM
+@@ -869,6 +876,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ int xhci_suspend(struct xhci_hcd *xhci)
+ {
+ int rc = 0;
++ unsigned int delay = XHCI_MAX_HALT_USEC;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
+
+@@ -887,8 +895,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command &= ~CMD_RUN;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
++
++ /* Some chips from Fresco Logic need an extraordinary delay */
++ delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
++
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
++ STS_HALT, STS_HALT, delay)) {
+ xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8b4cce45..cf4fd24 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1493,6 +1493,8 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
+ #define XHCI_AVOID_BEI (1 << 15)
++#define XHCI_SLOW_SUSPEND (1 << 17)
++#define XHCI_SPURIOUS_WAKEUP (1 << 18)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 536c4ad..d8ace82 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -457,6 +457,10 @@ static void option_instat_callback(struct urb *urb);
+ #define CHANGHONG_VENDOR_ID 0x2077
+ #define CHANGHONG_PRODUCT_CH690 0x7001
+
++/* Inovia */
++#define INOVIA_VENDOR_ID 0x20a6
++#define INOVIA_SEW858 0x1105
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -703,6 +707,222 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+
+
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+@@ -1279,7 +1499,9 @@ static const struct usb_device_id option_ids[] = {
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+- { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist
++ },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+@@ -1367,6 +1589,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
+index 649cb35..1be8b5d 100644
+--- a/drivers/video/au1100fb.c
++++ b/drivers/video/au1100fb.c
+@@ -387,39 +387,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
+ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+ {
+ struct au1100fb_device *fbdev;
+- unsigned int len;
+- unsigned long start=0, off;
+
+ fbdev = to_au1100fb_device(fbi);
+
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+- return -EINVAL;
+- }
+-
+- start = fbdev->fb_phys & PAGE_MASK;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
+-
+- off = vma->vm_pgoff << PAGE_SHIFT;
+-
+- if ((vma->vm_end - vma->vm_start + off) > len) {
+- return -EINVAL;
+- }
+-
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+-
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
+
+- vma->vm_flags |= VM_IO;
+-
+- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot)) {
+- return -EAGAIN;
+- }
+-
+- return 0;
++ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ }
+
+ static struct fb_ops au1100fb_ops =
+diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
+index 7200559..5bd7d88 100644
+--- a/drivers/video/au1200fb.c
++++ b/drivers/video/au1200fb.c
+@@ -1216,38 +1216,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
+ * method mainly to allow the use of the TLB streaming flag (CCA=6)
+ */
+ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+-
+ {
+- unsigned int len;
+- unsigned long start=0, off;
+ struct au1200fb_device *fbdev = info->par;
+
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+- return -EINVAL;
+- }
+-
+- start = fbdev->fb_phys & PAGE_MASK;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
+-
+- off = vma->vm_pgoff << PAGE_SHIFT;
+-
+- if ((vma->vm_end - vma->vm_start + off) > len) {
+- return -EINVAL;
+- }
+-
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+-
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
+
+- vma->vm_flags |= VM_IO;
+-
+- return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot);
+-
+- return 0;
++ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ }
+
+ static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index ac1ad48..5ce56e7 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1151,7 +1151,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ struct ecryptfs_msg_ctx *msg_ctx;
+ struct ecryptfs_message *msg = NULL;
+ char *auth_tok_sig;
+- char *payload;
++ char *payload = NULL;
+ size_t payload_len;
+ int rc;
+
+@@ -1206,6 +1206,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ out:
+ if (msg)
+ kfree(msg);
++ kfree(payload);
+ return rc;
+ }
+
+diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
+index 34f0a07..3268697 100644
+--- a/fs/ext3/dir.c
++++ b/fs/ext3/dir.c
+@@ -25,6 +25,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/buffer_head.h>
++#include <linux/compat.h>
+ #include <linux/slab.h>
+ #include <linux/rbtree.h>
+
+@@ -32,24 +33,8 @@ static unsigned char ext3_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+-static int ext3_readdir(struct file *, void *, filldir_t);
+ static int ext3_dx_readdir(struct file * filp,
+ void * dirent, filldir_t filldir);
+-static int ext3_release_dir (struct inode * inode,
+- struct file * filp);
+-
+-const struct file_operations ext3_dir_operations = {
+- .llseek = generic_file_llseek,
+- .read = generic_read_dir,
+- .readdir = ext3_readdir, /* we take BKL. needed?*/
+- .unlocked_ioctl = ext3_ioctl,
+-#ifdef CONFIG_COMPAT
+- .compat_ioctl = ext3_compat_ioctl,
+-#endif
+- .fsync = ext3_sync_file, /* BKL held */
+- .release = ext3_release_dir,
+-};
+-
+
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+@@ -60,6 +45,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ return (ext3_filetype_table[filetype]);
+ }
+
++/**
++ * Check if the given dir-inode refers to an htree-indexed directory
++ * (or a directory which chould potentially get coverted to use htree
++ * indexing).
++ *
++ * Return 1 if it is a dx dir, 0 if not
++ */
++static int is_dx_dir(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT3_FEATURE_COMPAT_DIR_INDEX) &&
++ ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
++ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
++ return 1;
++
++ return 0;
++}
+
+ int ext3_check_dir_entry (const char * function, struct inode * dir,
+ struct ext3_dir_entry_2 * de,
+@@ -99,18 +103,13 @@ static int ext3_readdir(struct file * filp,
+ unsigned long offset;
+ int i, stored;
+ struct ext3_dir_entry_2 *de;
+- struct super_block *sb;
+ int err;
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int dir_has_error = 0;
+
+- sb = inode->i_sb;
+-
+- if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+- EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+- ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
++ if (is_dx_dir(inode)) {
+ err = ext3_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+@@ -232,22 +231,87 @@ out:
+ return ret;
+ }
+
++static inline int is_32bit_api(void)
++{
++#ifdef CONFIG_COMPAT
++ return is_compat_task();
++#else
++ return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+ * These functions convert from the major/minor hash to an f_pos
+- * value.
++ * value for dx directories
+ *
+- * Currently we only use major hash numer. This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie. Sigh.
++ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
++ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
++ * directly on both 32-bit and 64-bit nodes, under such case, neither
++ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
+ */
+-#define hash2pos(major, minor) (major >> 1)
+-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos) (0)
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return major >> 1;
++ else
++ return ((__u64)(major >> 1) << 32) | (__u64)minor;
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return (pos << 1) & 0xffffffff;
++ else
++ return ((pos >> 32) << 1) & 0xffffffff;
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return 0;
++ else
++ return pos & 0xffffffff;
++}
++
++/*
++ * Return 32- or 64-bit end-of-file for dx directories
++ */
++static inline loff_t ext3_get_htree_eof(struct file *filp)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return EXT3_HTREE_EOF_32BIT;
++ else
++ return EXT3_HTREE_EOF_64BIT;
++}
++
++
++/*
++ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
++ * non-htree and htree directories, where the "offset" is in terms
++ * of the filename hash value instead of the byte offset.
++ *
++ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
++ * we need to pass the max hash as the maximum allowable offset in
++ * the htree directory case.
++ *
++ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
++ * will be invalid once the directory was converted into a dx directory
++ */
++loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct inode *inode = file->f_mapping->host;
++ int dx_dir = is_dx_dir(inode);
++
++ if (likely(dx_dir))
++ return generic_file_llseek_size(file, offset, origin,
++ ext3_get_htree_eof(file));
++ else
++ return generic_file_llseek(file, offset, origin);
++}
+
+ /*
+ * This structure holds the nodes of the red-black tree used to store
+@@ -308,15 +372,16 @@ static void free_rb_tree_fname(struct rb_root *root)
+ }
+
+
+-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
++ loff_t pos)
+ {
+ struct dir_private_info *p;
+
+ p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ if (!p)
+ return NULL;
+- p->curr_hash = pos2maj_hash(pos);
+- p->curr_minor_hash = pos2min_hash(pos);
++ p->curr_hash = pos2maj_hash(filp, pos);
++ p->curr_minor_hash = pos2min_hash(filp, pos);
+ return p;
+ }
+
+@@ -406,7 +471,7 @@ static int call_filldir(struct file * filp, void * dirent,
+ printk("call_filldir: called with null fname?!?\n");
+ return 0;
+ }
+- curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ while (fname) {
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
+@@ -431,13 +496,13 @@ static int ext3_dx_readdir(struct file * filp,
+ int ret;
+
+ if (!info) {
+- info = ext3_htree_create_dir_info(filp->f_pos);
++ info = ext3_htree_create_dir_info(filp, filp->f_pos);
+ if (!info)
+ return -ENOMEM;
+ filp->private_data = info;
+ }
+
+- if (filp->f_pos == EXT3_HTREE_EOF)
++ if (filp->f_pos == ext3_get_htree_eof(filp))
+ return 0; /* EOF */
+
+ /* Some one has messed with f_pos; reset the world */
+@@ -445,8 +510,8 @@ static int ext3_dx_readdir(struct file * filp,
+ free_rb_tree_fname(&info->root);
+ info->curr_node = NULL;
+ info->extra_fname = NULL;
+- info->curr_hash = pos2maj_hash(filp->f_pos);
+- info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ }
+
+ /*
+@@ -478,7 +543,7 @@ static int ext3_dx_readdir(struct file * filp,
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+- filp->f_pos = EXT3_HTREE_EOF;
++ filp->f_pos = ext3_get_htree_eof(filp);
+ break;
+ }
+ info->curr_node = rb_first(&info->root);
+@@ -498,7 +563,7 @@ static int ext3_dx_readdir(struct file * filp,
+ info->curr_minor_hash = fname->minor_hash;
+ } else {
+ if (info->next_hash == ~0) {
+- filp->f_pos = EXT3_HTREE_EOF;
++ filp->f_pos = ext3_get_htree_eof(filp);
+ break;
+ }
+ info->curr_hash = info->next_hash;
+@@ -517,3 +582,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
+
+ return 0;
+ }
++
++const struct file_operations ext3_dir_operations = {
++ .llseek = ext3_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = ext3_readdir,
++ .unlocked_ioctl = ext3_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = ext3_compat_ioctl,
++#endif
++ .fsync = ext3_sync_file,
++ .release = ext3_release_dir,
++};
+diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
+index 7d215b4..d4d3ade 100644
+--- a/fs/ext3/hash.c
++++ b/fs/ext3/hash.c
+@@ -200,8 +200,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+ return -1;
+ }
+ hash = hash & ~1;
+- if (hash == (EXT3_HTREE_EOF << 1))
+- hash = (EXT3_HTREE_EOF-1) << 1;
++ if (hash == (EXT3_HTREE_EOF_32BIT << 1))
++ hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
+ hinfo->hash = hash;
+ hinfo->minor_hash = minor_hash;
+ return 0;
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 164c560..689d1b1 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+-static int ext4_readdir(struct file *, void *, filldir_t);
+ static int ext4_dx_readdir(struct file *filp,
+ void *dirent, filldir_t filldir);
+-static int ext4_release_dir(struct inode *inode,
+- struct file *filp);
+-
+-const struct file_operations ext4_dir_operations = {
+- .llseek = ext4_llseek,
+- .read = generic_read_dir,
+- .readdir = ext4_readdir, /* we take BKL. needed?*/
+- .unlocked_ioctl = ext4_ioctl,
+-#ifdef CONFIG_COMPAT
+- .compat_ioctl = ext4_compat_ioctl,
+-#endif
+- .fsync = ext4_sync_file,
+- .release = ext4_release_dir,
+-};
+-
+
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ return (ext4_filetype_table[filetype]);
+ }
+
++/**
++ * Check if the given dir-inode refers to an htree-indexed directory
++ * (or a directory which chould potentially get coverted to use htree
++ * indexing).
++ *
++ * Return 1 if it is a dx dir, 0 if not
++ */
++static int is_dx_dir(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_COMPAT_DIR_INDEX) &&
++ ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
++ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
++ return 1;
++
++ return 0;
++}
++
+ /*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp,
+ unsigned int offset;
+ int i, stored;
+ struct ext4_dir_entry_2 *de;
+- struct super_block *sb;
+ int err;
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int dir_has_error = 0;
+
+- sb = inode->i_sb;
+-
+- if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_COMPAT_DIR_INDEX) &&
+- ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
+- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
++ if (is_dx_dir(inode)) {
+ err = ext4_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+@@ -254,22 +253,134 @@ out:
+ return ret;
+ }
+
++static inline int is_32bit_api(void)
++{
++#ifdef CONFIG_COMPAT
++ return is_compat_task();
++#else
++ return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+ * These functions convert from the major/minor hash to an f_pos
+- * value.
++ * value for dx directories
++ *
++ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
++ * FMODE_64BITHASH explicitly. On the other hand, we allow ext4 to be mounted
++ * directly on both 32-bit and 64-bit nodes, under such case, neither
++ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
++ */
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return major >> 1;
++ else
++ return ((__u64)(major >> 1) << 32) | (__u64)minor;
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return (pos << 1) & 0xffffffff;
++ else
++ return ((pos >> 32) << 1) & 0xffffffff;
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return 0;
++ else
++ return pos & 0xffffffff;
++}
++
++/*
++ * Return 32- or 64-bit end-of-file for dx directories
++ */
++static inline loff_t ext4_get_htree_eof(struct file *filp)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return EXT4_HTREE_EOF_32BIT;
++ else
++ return EXT4_HTREE_EOF_64BIT;
++}
++
++
++/*
++ * ext4_dir_llseek() based on generic_file_llseek() to handle both
++ * non-htree and htree directories, where the "offset" is in terms
++ * of the filename hash value instead of the byte offset.
+ *
+- * Currently we only use major hash numer. This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie. Sigh.
++ * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
++ * will be invalid once the directory was converted into a dx directory
+ */
+-#define hash2pos(major, minor) (major >> 1)
+-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos) (0)
++loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct inode *inode = file->f_mapping->host;
++ loff_t ret = -EINVAL;
++ int dx_dir = is_dx_dir(inode);
++
++ mutex_lock(&inode->i_mutex);
++
++ /* NOTE: relative offsets with dx directories might not work
++ * as expected, as it is difficult to figure out the
++ * correct offset between dx hashes */
++
++ switch (origin) {
++ case SEEK_END:
++ if (unlikely(offset > 0))
++ goto out_err; /* not supported for directories */
++
++ /* so only negative offsets are left, does that have a
++ * meaning for directories at all? */
++ if (dx_dir)
++ offset += ext4_get_htree_eof(file);
++ else
++ offset += inode->i_size;
++ break;
++ case SEEK_CUR:
++ /*
++ * Here we special-case the lseek(fd, 0, SEEK_CUR)
++ * position-querying operation. Avoid rewriting the "same"
++ * f_pos value back to the file because a concurrent read(),
++ * write() or lseek() might have altered it
++ */
++ if (offset == 0) {
++ offset = file->f_pos;
++ goto out_ok;
++ }
++
++ offset += file->f_pos;
++ break;
++ }
++
++ if (unlikely(offset < 0))
++ goto out_err;
++
++ if (!dx_dir) {
++ if (offset > inode->i_sb->s_maxbytes)
++ goto out_err;
++ } else if (offset > ext4_get_htree_eof(file))
++ goto out_err;
++
++ /* Special lock needed here? */
++ if (offset != file->f_pos) {
++ file->f_pos = offset;
++ file->f_version = 0;
++ }
++
++out_ok:
++ ret = offset;
++out_err:
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
+
+ /*
+ * This structure holds the nodes of the red-black tree used to store
+@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root)
+ }
+
+
+-static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
++ loff_t pos)
+ {
+ struct dir_private_info *p;
+
+ p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ if (!p)
+ return NULL;
+- p->curr_hash = pos2maj_hash(pos);
+- p->curr_minor_hash = pos2min_hash(pos);
++ p->curr_hash = pos2maj_hash(filp, pos);
++ p->curr_minor_hash = pos2min_hash(filp, pos);
+ return p;
+ }
+
+@@ -429,7 +541,7 @@ static int call_filldir(struct file *filp, void *dirent,
+ "null fname?!?\n");
+ return 0;
+ }
+- curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ while (fname) {
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
+@@ -454,13 +566,13 @@ static int ext4_dx_readdir(struct file *filp,
+ int ret;
+
+ if (!info) {
+- info = ext4_htree_create_dir_info(filp->f_pos);
++ info = ext4_htree_create_dir_info(filp, filp->f_pos);
+ if (!info)
+ return -ENOMEM;
+ filp->private_data = info;
+ }
+
+- if (filp->f_pos == EXT4_HTREE_EOF)
++ if (filp->f_pos == ext4_get_htree_eof(filp))
+ return 0; /* EOF */
+
+ /* Some one has messed with f_pos; reset the world */
+@@ -468,8 +580,8 @@ static int ext4_dx_readdir(struct file *filp,
+ free_rb_tree_fname(&info->root);
+ info->curr_node = NULL;
+ info->extra_fname = NULL;
+- info->curr_hash = pos2maj_hash(filp->f_pos);
+- info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ }
+
+ /*
+@@ -501,7 +613,7 @@ static int ext4_dx_readdir(struct file *filp,
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+- filp->f_pos = EXT4_HTREE_EOF;
++ filp->f_pos = ext4_get_htree_eof(filp);
+ break;
+ }
+ info->curr_node = rb_first(&info->root);
+@@ -521,7 +633,7 @@ static int ext4_dx_readdir(struct file *filp,
+ info->curr_minor_hash = fname->minor_hash;
+ } else {
+ if (info->next_hash == ~0) {
+- filp->f_pos = EXT4_HTREE_EOF;
++ filp->f_pos = ext4_get_htree_eof(filp);
+ break;
+ }
+ info->curr_hash = info->next_hash;
+@@ -540,3 +652,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
+
+ return 0;
+ }
++
++const struct file_operations ext4_dir_operations = {
++ .llseek = ext4_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = ext4_readdir,
++ .unlocked_ioctl = ext4_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = ext4_compat_ioctl,
++#endif
++ .fsync = ext4_sync_file,
++ .release = ext4_release_dir,
++};
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 60b6ca5..22c71b9 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1597,7 +1597,11 @@ struct dx_hash_info
+ u32 *seed;
+ };
+
+-#define EXT4_HTREE_EOF 0x7fffffff
++
++/* 32 and 64 bit signed EOF for dx directories */
++#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
++#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
++
+
+ /*
+ * Control parameters used by ext4_htree_next_block
+diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
+index ac8f168..fa8e491 100644
+--- a/fs/ext4/hash.c
++++ b/fs/ext4/hash.c
+@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+ return -1;
+ }
+ hash = hash & ~1;
+- if (hash == (EXT4_HTREE_EOF << 1))
+- hash = (EXT4_HTREE_EOF-1) << 1;
++ if (hash == (EXT4_HTREE_EOF_32BIT << 1))
++ hash = (EXT4_HTREE_EOF_32BIT - 1) << 1;
+ hinfo->hash = hash;
+ hinfo->minor_hash = minor_hash;
+ return 0;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index d5498b2..b4e9f3f 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1269,6 +1269,8 @@ retry:
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
++ kfree(is); is = NULL;
++ kfree(bs); bs = NULL;
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
+index c1a3e60..7f464c5 100644
+--- a/fs/jfs/jfs_inode.c
++++ b/fs/jfs/jfs_inode.c
+@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
+
+ if (insert_inode_locked(inode) < 0) {
+ rc = -EINVAL;
+- goto fail_unlock;
++ goto fail_put;
+ }
+
+ inode_init_owner(inode, parent, mode);
+@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
+ fail_drop:
+ dquot_drop(inode);
+ inode->i_flags |= S_NOQUOTA;
+-fail_unlock:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ fail_put:
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 561a3dc..61b697e 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -726,12 +726,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
+
+ /*
+ * Open an existing file or directory.
+- * The access argument indicates the type of open (read/write/lock)
++ * The may_flags argument indicates the type of open (read/write/lock)
++ * and additional flags.
+ * N.B. After this call fhp needs an fh_put
+ */
+ __be32
+ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+- int access, struct file **filp)
++ int may_flags, struct file **filp)
+ {
+ struct dentry *dentry;
+ struct inode *inode;
+@@ -746,7 +747,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ * and (hopefully) checked permission - so allow OWNER_OVERRIDE
+ * in case a chmod has now revoked permission.
+ */
+- err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE);
++ err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
+ if (err)
+ goto out;
+
+@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ * or any access when mandatory locking enabled
+ */
+ err = nfserr_perm;
+- if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE))
++ if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
+ goto out;
+ /*
+ * We must ignore files (but only files) which might have mandatory
+@@ -770,12 +771,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ if (!inode->i_fop)
+ goto out;
+
+- host_err = nfsd_open_break_lease(inode, access);
++ host_err = nfsd_open_break_lease(inode, may_flags);
+ if (host_err) /* NOMEM or WOULDBLOCK */
+ goto out_nfserr;
+
+- if (access & NFSD_MAY_WRITE) {
+- if (access & NFSD_MAY_READ)
++ if (may_flags & NFSD_MAY_WRITE) {
++ if (may_flags & NFSD_MAY_READ)
+ flags = O_RDWR|O_LARGEFILE;
+ else
+ flags = O_WRONLY|O_LARGEFILE;
+@@ -785,8 +786,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ if (IS_ERR(*filp)) {
+ host_err = PTR_ERR(*filp);
+ *filp = NULL;
+- } else
+- host_err = ima_file_check(*filp, access);
++ } else {
++ host_err = ima_file_check(*filp, may_flags);
++
++ if (may_flags & NFSD_MAY_64BIT_COOKIE)
++ (*filp)->f_mode |= FMODE_64BITHASH;
++ else
++ (*filp)->f_mode |= FMODE_32BITHASH;
++ }
++
+ out_nfserr:
+ err = nfserrno(host_err);
+ out:
+@@ -2016,8 +2024,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
+ __be32 err;
+ struct file *file;
+ loff_t offset = *offsetp;
++ int may_flags = NFSD_MAY_READ;
++
++ /* NFSv2 only supports 32 bit cookies */
++ if (rqstp->rq_vers > 2)
++ may_flags |= NFSD_MAY_64BIT_COOKIE;
+
+- err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file);
++ err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
+ if (err)
+ goto out;
+
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 3f54ad0..85d4d42 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -27,6 +27,8 @@
+ #define NFSD_MAY_BYPASS_GSS 0x400
+ #define NFSD_MAY_READ_IF_EXEC 0x800
+
++#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
++
+ #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
+ #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
+
+diff --git a/fs/statfs.c b/fs/statfs.c
+index 9cf04a1..a133c3e 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -86,7 +86,7 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
+
+ int fd_statfs(int fd, struct kstatfs *st)
+ {
+- struct file *file = fget(fd);
++ struct file *file = fget_raw(fd);
+ int error = -EBADF;
+ if (file) {
+ error = vfs_statfs(&file->f_path, st);
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index 7639f18..8f4ae68 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -184,6 +184,8 @@ struct drm_mode_get_connector {
+ __u32 connection;
+ __u32 mm_width, mm_height; /**< HxW in millimeters */
+ __u32 subpixel;
++
++ __u32 pad;
+ };
+
+ #define DRM_MODE_PROP_PENDING (1<<0)
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 3fd17c2..5633053 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -5,6 +5,9 @@
+ /*
+ * Common definitions for all gcc versions go here.
+ */
++#define GCC_VERSION (__GNUC__ * 10000 \
++ + __GNUC_MINOR__ * 100 \
++ + __GNUC_PATCHLEVEL__)
+
+
+ /* Optimization barrier */
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index dfadc96..643d6c4 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -29,6 +29,21 @@
+ the kernel context */
+ #define __cold __attribute__((__cold__))
+
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#if GCC_VERSION <= 40801
++# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++#else
++# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
++#endif
+
+ #if __GNUC_MINOR__ >= 5
+ /*
+diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
+index dec9911..d59ab12 100644
+--- a/include/linux/ext3_fs.h
++++ b/include/linux/ext3_fs.h
+@@ -781,7 +781,11 @@ struct dx_hash_info
+ u32 *seed;
+ };
+
+-#define EXT3_HTREE_EOF 0x7fffffff
++
++/* 32 and 64 bit signed EOF for dx directories */
++#define EXT3_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
++#define EXT3_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
++
+
+ /*
+ * Control parameters used by ext3_htree_next_block
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index a276817..dd74385 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -92,6 +92,10 @@ struct inodes_stat_t {
+ /* File is opened using open(.., 3, ..) and is writeable only for ioctls
+ (specialy hack for floppy.c) */
+ #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
++/* 32bit hashes as llseek() offset (for directories) */
++#define FMODE_32BITHASH ((__force fmode_t)0x200)
++/* 64bit hashes as llseek() offset (for directories) */
++#define FMODE_64BITHASH ((__force fmode_t)0x400)
+
+ /*
+ * Don't update ctime and mtime.
+@@ -907,9 +911,11 @@ static inline loff_t i_size_read(const struct inode *inode)
+ static inline void i_size_write(struct inode *inode, loff_t i_size)
+ {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++ preempt_disable();
+ write_seqcount_begin(&inode->i_size_seqcount);
+ inode->i_size = i_size;
+ write_seqcount_end(&inode->i_size_seqcount);
++ preempt_enable();
+ #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ inode->i_size = i_size;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index eeb6a29..8d5b91e 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -300,13 +300,15 @@ struct perf_event_mmap_page {
+ /*
+ * Control data for the mmap() data buffer.
+ *
+- * User-space reading the @data_head value should issue an rmb(), on
+- * SMP capable platforms, after reading this value -- see
+- * perf_event_wakeup().
++ * User-space reading the @data_head value should issue an smp_rmb(),
++ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+- * written by userspace to reflect the last read data. In this case
+- * the kernel will not over-write unread data.
++ * written by userspace to reflect the last read data, after issueing
++ * an smp_mb() to separate the data read from the ->data_tail store.
++ * In this case the kernel will not over-write unread data.
++ *
++ * See perf_output_put_handle() for the data ordering.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 29e217a..7e77cee 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -58,6 +58,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
++extern int random_int_secret_init(void);
+
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index efe50af..85180bf 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -737,6 +737,16 @@ static inline int skb_cloned(const struct sk_buff *skb)
+ (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
+ }
+
++static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
++{
++ might_sleep_if(pri & __GFP_WAIT);
++
++ if (skb_cloned(skb))
++ return pskb_expand_head(skb, 0, 0, pri);
++
++ return 0;
++}
++
+ /**
+ * skb_header_cloned - is the header a clone
+ * @skb: buffer to check
+@@ -1157,6 +1167,11 @@ static inline int skb_pagelen(const struct sk_buff *skb)
+ return len + skb_headlen(skb);
+ }
+
++static inline bool skb_has_frags(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->nr_frags;
++}
++
+ /**
+ * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index 08e90fb..5fee575 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -173,6 +173,20 @@ struct timex {
+
+ #include <asm/timex.h>
+
++#ifndef random_get_entropy
++/*
++ * The random_get_entropy() function is used by the /dev/random driver
++ * in order to extract entropy via the relative unpredictability of
++ * when an interrupt takes places versus a high speed, fine-grained
++ * timing source or cycle counter. Since it will be occurred on every
++ * single interrupt, it must have a very low cost/overhead.
++ *
++ * By default we use get_cycles() for this purpose, but individual
++ * architectures may override this in their asm/timex.h header file.
++ */
++#define random_get_entropy() get_cycles()
++#endif
++
+ /*
+ * SHIFT_PLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in PLL mode.
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index a7a683e..a8c2ef6 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char err_offset = 0;
+ u8 opt_len = opt[1];
+ u8 opt_iter;
++ u8 tag_len;
+
+ if (opt_len < 8) {
+ err_offset = 1;
+@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ }
+
+ for (opt_iter = 6; opt_iter < opt_len;) {
+- if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
++ tag_len = opt[opt_iter + 1];
++ if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
+ err_offset = opt_iter + 1;
+ goto out;
+ }
+- opt_iter += opt[opt_iter + 1];
++ opt_iter += tag_len;
+ }
+
+ out:
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 16010d1..86ef78d 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -459,10 +459,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
+ {
+ return dst_orig;
+ }
++
++static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
++{
++ return NULL;
++}
++
+ #else
+ extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags);
++
++/* skb attached with this dst needs transformation if dst->xfrm is valid */
++static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
++{
++ return dst->xfrm;
++}
+ #endif
+
+ #endif /* _NET_DST_H */
+diff --git a/init/main.c b/init/main.c
+index 5d0eb1d..7474450 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/random.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -732,6 +733,7 @@ static void __init do_basic_setup(void)
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
++ random_int_secret_init();
+ }
+
+ static void __init do_pre_smp_initcalls(void)
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 7f3011c..58c3b51 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -75,10 +75,31 @@ again:
+ goto out;
+
+ /*
+- * Publish the known good head. Rely on the full barrier implied
+- * by atomic_dec_and_test() order the rb->head read and this
+- * write.
++ * Since the mmap() consumer (userspace) can run on a different CPU:
++ *
++ * kernel user
++ *
++ * READ ->data_tail READ ->data_head
++ * smp_mb() (A) smp_rmb() (C)
++ * WRITE $data READ $data
++ * smp_wmb() (B) smp_mb() (D)
++ * STORE ->data_head WRITE ->data_tail
++ *
++ * Where A pairs with D, and B pairs with C.
++ *
++ * I don't think A needs to be a full barrier because we won't in fact
++ * write data until we see the store from userspace. So we simply don't
++ * issue the data WRITE until we observe it. Be conservative for now.
++ *
++ * OTOH, D needs to be a full barrier since it separates the data READ
++ * from the tail WRITE.
++ *
++ * For B a WMB is sufficient since it separates two WRITEs, and for C
++ * an RMB is sufficient since it separates two READs.
++ *
++ * See perf_output_begin().
+ */
++ smp_wmb();
+ rb->user_page->data_head = head;
+
+ /*
+@@ -142,9 +163,11 @@ int perf_output_begin(struct perf_output_handle *handle,
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
++ *
++ * See perf_output_put_handle().
+ */
+ tail = ACCESS_ONCE(rb->user_page->data_tail);
+- smp_rmb();
++ smp_mb();
+ offset = head = local_read(&rb->head);
+ head += size;
+ if (unlikely(!perf_output_space(rb, tail, offset, head)))
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ce1067f..c5a12a7 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -534,9 +534,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ if (isspace(ch)) {
+ parser->buffer[parser->idx] = 0;
+ parser->cont = false;
+- } else {
++ } else if (parser->idx < parser->size - 1) {
+ parser->cont = true;
+ parser->buffer[parser->idx++] = ch;
++ } else {
++ ret = -EINVAL;
++ goto out;
+ }
+
+ *ppos += read;
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 4ceb05d..2ffcb3c 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -419,7 +419,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+- if (miter->__flags & SG_MITER_TO_SG)
++ if ((miter->__flags & SG_MITER_TO_SG) &&
++ !PageSlab(miter->page))
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 235c219..c705612 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -152,7 +152,7 @@ static size_t vlan_get_size(const struct net_device *dev)
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+
+ return nla_total_size(2) + /* IFLA_VLAN_ID */
+- sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
++ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
+ vlan_qos_map_size(vlan->nr_ingress_mappings) +
+ vlan_qos_map_size(vlan->nr_egress_mappings);
+ }
+diff --git a/net/compat.c b/net/compat.c
+index 8c979cc..3139ef2 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
++ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
++ return -EINVAL;
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 984ec65..4afcf31 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -268,7 +268,7 @@ begintw:
+ }
+ if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie,
+ saddr, daddr, ports, dif))) {
+- sock_put(sk);
++ inet_twsk_put(inet_twsk(sk));
+ goto begintw;
+ }
+ goto out;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index daf408e..16191f0 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -833,7 +833,7 @@ static int __ip_append_data(struct sock *sk,
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
++ if (((length > mtu) || (skb && skb_has_frags(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index c45a155a3..6768ce2 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2727,7 +2727,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
+ RT_SCOPE_LINK);
+ goto make_route;
+ }
+- if (fl4->saddr) {
++ if (!fl4->saddr) {
+ if (ipv4_is_multicast(fl4->daddr))
+ fl4->saddr = inet_select_addr(dev_out, 0,
+ fl4->flowi4_scope);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 872b41d..c1ed01e 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1469,7 +1469,10 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ tp->lost_cnt_hint -= tcp_skb_pcount(prev);
+ }
+
+- TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
++ TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
++ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++ TCP_SKB_CB(prev)->end_seq++;
++
+ if (skb == tcp_highest_sack(sk))
+ tcp_advance_highest_sack(sk, skb);
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 3add486..0d5a118 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -933,6 +933,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
+ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
+ unsigned int mss_now)
+ {
++ /* Make sure we own this skb before messing gso_size/gso_segs */
++ WARN_ON_ONCE(skb_cloned(skb));
++
+ if (skb->len <= mss_now || !sk_can_gso(sk) ||
+ skb->ip_summed == CHECKSUM_NONE) {
+ /* Avoid the costly divide in the normal
+@@ -1014,9 +1017,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (skb_cloned(skb) &&
+- skb_is_nonlinear(skb) &&
+- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++ if (skb_unclone(skb, GFP_ATOMIC))
+ return -ENOMEM;
+
+ /* Get a new skb... force flag on. */
+@@ -2129,6 +2130,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ int oldpcount = tcp_skb_pcount(skb);
+
+ if (unlikely(oldpcount > 1)) {
++ if (skb_unclone(skb, GFP_ATOMIC))
++ return -ENOMEM;
+ tcp_init_tso_segs(sk, skb, cur_mss);
+ tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
+ }
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 73f1a00..e38290b 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -110,7 +110,7 @@ begintw:
+ goto out;
+ }
+ if (!INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) {
+- sock_put(sk);
++ inet_twsk_put(inet_twsk(sk));
+ goto begintw;
+ }
+ goto out;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 91d0711..97675bf 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1342,7 +1342,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ skb = skb_peek_tail(&sk->sk_write_queue);
+ cork->length += length;
+ if (((length > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ (skb && skb_has_frags(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO)) {
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 18ea73c..bc9103d 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -791,7 +791,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
+ }
+
+ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
+- struct flowi6 *fl6, int flags)
++ struct flowi6 *fl6, int flags, bool input)
+ {
+ struct fib6_node *fn;
+ struct rt6_info *rt, *nrt;
+@@ -799,8 +799,11 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
+ int attempts = 3;
+ int err;
+ int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
++ int local = RTF_NONEXTHOP;
+
+ strict |= flags & RT6_LOOKUP_F_IFACE;
++ if (input)
++ local |= RTF_LOCAL;
+
+ relookup:
+ read_lock_bh(&table->tb6_lock);
+@@ -820,7 +823,7 @@ restart:
+ read_unlock_bh(&table->tb6_lock);
+
+ if (!dst_get_neighbour_raw(&rt->dst)
+- && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
++ && !(rt->rt6i_flags & local))
+ nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ else if (!(rt->dst.flags & DST_HOST))
+ nrt = rt6_alloc_clone(rt, &fl6->daddr);
+@@ -864,7 +867,7 @@ out2:
+ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
+ struct flowi6 *fl6, int flags)
+ {
+- return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
++ return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags, true);
+ }
+
+ void ip6_route_input(struct sk_buff *skb)
+@@ -890,7 +893,7 @@ void ip6_route_input(struct sk_buff *skb)
+ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
+ struct flowi6 *fl6, int flags)
+ {
+- return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
++ return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags, false);
+ }
+
+ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index e579006..8570079 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -357,7 +357,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ goto error_put_sess_tun;
+ }
+
++ local_bh_disable();
+ l2tp_xmit_skb(session, skb, session->hdr_len);
++ local_bh_enable();
+
+ sock_put(ps->tunnel_sock);
+ sock_put(sk);
+@@ -432,7 +434,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+
++ local_bh_disable();
+ l2tp_xmit_skb(session, skb, session->hdr_len);
++ local_bh_enable();
+
+ sock_put(sk_tun);
+ sock_put(sk);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 73495f1..a9cf593 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -708,6 +708,8 @@ struct tpt_led_trigger {
+ * that the scan completed.
+ * @SCAN_ABORTED: Set for our scan work function when the driver reported
+ * a scan complete for an aborted scan.
++ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
++ * cancelled.
+ */
+ enum {
+ SCAN_SW_SCANNING,
+@@ -715,6 +717,7 @@ enum {
+ SCAN_OFF_CHANNEL,
+ SCAN_COMPLETED,
+ SCAN_ABORTED,
++ SCAN_HW_CANCELLED,
+ };
+
+ /**
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 7d882fc..db01d02 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2780,6 +2780,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
+ case NL80211_IFTYPE_ADHOC:
+ if (!bssid)
+ return 0;
++ if (compare_ether_addr(sdata->vif.addr, hdr->addr2) == 0 ||
++ compare_ether_addr(sdata->u.ibss.bssid, hdr->addr2) == 0)
++ return 0;
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ return 1;
+ }
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 5279300..0aeea49 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -224,6 +224,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
+ enum ieee80211_band band;
+ int i, ielen, n_chans;
+
++ if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
++ return false;
++
+ do {
+ if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+ return false;
+@@ -815,7 +818,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ if (!local->scan_req)
+ goto out;
+
++ /*
++ * We have a scan running and the driver already reported completion,
++ * but the worker hasn't run yet or is stuck on the mutex - mark it as
++ * cancelled.
++ */
++ if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
++ test_bit(SCAN_COMPLETED, &local->scanning)) {
++ set_bit(SCAN_HW_CANCELLED, &local->scanning);
++ goto out;
++ }
++
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
++ /*
++ * Make sure that __ieee80211_scan_completed doesn't trigger a
++ * scan on another band.
++ */
++ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ if (local->ops->cancel_hw_scan)
+ drv_cancel_hw_scan(local, local->scan_sdata);
+ goto out;
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 67df50e..1a49354 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -181,6 +181,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
++ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
++ sta->last_rx = jiffies;
++
+ if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 93faf6a..4a8c55b 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1468,7 +1468,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
+
+ msglen = origlen = end - dptr;
+ if (msglen > datalen)
+- return NF_DROP;
++ return NF_ACCEPT;
+
+ ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+ if (ret != NF_ACCEPT)
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 32ba8d0..cf3e22c 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -518,7 +518,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
+ */
+ if (!sctp_checksum_disable) {
+- if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
++ if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
++ (dst_xfrm(dst) != NULL) || packet->ipfragok) {
+ __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+
+ /* 3) Put the resultant value into the checksum field in the
+diff --git a/net/socket.c b/net/socket.c
+index cf546a3..bf7adaa 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1876,6 +1876,16 @@ struct used_address {
+ unsigned int name_len;
+ };
+
++static int copy_msghdr_from_user(struct msghdr *kmsg,
++ struct msghdr __user *umsg)
++{
++ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
++ return -EFAULT;
++ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
++ return -EINVAL;
++ return 0;
++}
++
+ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned flags,
+ struct used_address *used_address)
+@@ -1894,8 +1904,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(msg_sys, msg_compat))
+ return -EFAULT;
+- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+- return -EFAULT;
++ } else {
++ err = copy_msghdr_from_user(msg_sys, msg);
++ if (err)
++ return err;
++ }
+
+ /* do not move before msg_sys is valid */
+ err = -EMSGSIZE;
+@@ -2110,8 +2123,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(msg_sys, msg_compat))
+ return -EFAULT;
+- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+- return -EFAULT;
++ } else {
++ err = copy_msghdr_from_user(msg_sys, msg);
++ if (err)
++ return err;
++ }
+
+ err = -EMSGSIZE;
+ if (msg_sys->msg_iovlen > UIO_MAXIOV)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5611563..5122b22 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1236,6 +1236,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
+ return 0;
+ }
+
++static void unix_sock_inherit_flags(const struct socket *old,
++ struct socket *new)
++{
++ if (test_bit(SOCK_PASSCRED, &old->flags))
++ set_bit(SOCK_PASSCRED, &new->flags);
++ if (test_bit(SOCK_PASSSEC, &old->flags))
++ set_bit(SOCK_PASSSEC, &new->flags);
++}
++
+ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ {
+ struct sock *sk = sock->sk;
+@@ -1270,6 +1279,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ /* attach accepted sock to socket */
+ unix_state_lock(tsk);
+ newsock->state = SS_CONNECTED;
++ unix_sock_inherit_flags(sock, newsock);
+ sock_graft(tsk, newsock);
+ unix_state_unlock(tsk);
+ return 0;
+diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
+index c4ad795..617a310 100644
+--- a/net/wireless/radiotap.c
++++ b/net/wireless/radiotap.c
+@@ -95,6 +95,10 @@ int ieee80211_radiotap_iterator_init(
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
+ {
++ /* check the radiotap header can actually be present */
++ if (max_length < sizeof(struct ieee80211_radiotap_header))
++ return -EINVAL;
++
+ /* Linux only supports version 0 radiotap format */
+ if (radiotap_header->it_version)
+ return -EINVAL;
+@@ -129,7 +133,8 @@ int ieee80211_radiotap_iterator_init(
+ */
+
+ if ((unsigned long)iterator->_arg -
+- (unsigned long)iterator->_rtheader >
++ (unsigned long)iterator->_rtheader +
++ sizeof(uint32_t) >
+ (unsigned long)iterator->_max_length)
+ return -EINVAL;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1b43fde..92c913d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5798,6 +5798,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
++ SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
+ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index 3642e06..e44c0e3 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -414,6 +414,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
+ hubs->hp_startup_mode);
+ break;
+ }
++ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b516488..1d83a40 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1523,7 +1523,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
+ w->active ? "active" : "inactive");
+
+ list_for_each_entry(p, &w->sources, list_sink) {
+- if (p->connected && !p->connected(w, p->sink))
++ if (p->connected && !p->connected(w, p->source))
+ continue;
+
+ if (p->connect)
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index d5724d8..1226631 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
+ usX2Y_clients_stop(usX2Y);
+ }
+
+-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
+- struct snd_usX2Y_substream *subs, struct urb *urb)
+-{
+- snd_printk(KERN_ERR
+-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
+-"Most propably some urb of usb-frame %i is still missing.\n"
+-"Cause could be too long delays in usb-hcd interrupt handling.\n",
+- usb_get_current_frame_number(usX2Y->dev),
+- subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
+- usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
+- usX2Y_clients_stop(usX2Y);
+-}
+-
+ static void i_usX2Y_urb_complete(struct urb *urb)
+ {
+ struct snd_usX2Y_substream *subs = urb->context;
+@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
++
++ subs->completed_urb = urb;
++
+ {
+ struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
+ *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
+index a51340f..83a8b8d 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.c
++++ b/sound/usb/usx2y/usx2yhwdeppcm.c
+@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
+
++ subs->completed_urb = urb;
+ capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
+index 5177964..714fc35 100644
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -14,6 +14,7 @@
+ #include "util/debug.h"
+
+ #include <sys/prctl.h>
++#include <sys/resource.h>
+
+ #include <semaphore.h>
+ #include <pthread.h>
diff --git a/3.2.54/1053_linux-3.2.54.patch b/3.2.54/1053_linux-3.2.54.patch
new file mode 100644
index 0000000..a907496
--- /dev/null
+++ b/3.2.54/1053_linux-3.2.54.patch
@@ -0,0 +1,6825 @@
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 1f24636..2a68089 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -283,13 +283,24 @@ Default value is "/sbin/hotplug".
+ kptr_restrict:
+
+ This toggle indicates whether restrictions are placed on
+-exposing kernel addresses via /proc and other interfaces. When
+-kptr_restrict is set to (0), there are no restrictions. When
+-kptr_restrict is set to (1), the default, kernel pointers
+-printed using the %pK format specifier will be replaced with 0's
+-unless the user has CAP_SYSLOG. When kptr_restrict is set to
+-(2), kernel pointers printed using %pK will be replaced with 0's
+-regardless of privileges.
++exposing kernel addresses via /proc and other interfaces.
++
++When kptr_restrict is set to (0), the default, there are no restrictions.
++
++When kptr_restrict is set to (1), kernel pointers printed using the %pK
++format specifier will be replaced with 0's unless the user has CAP_SYSLOG
++and effective user and group ids are equal to the real ids. This is
++because %pK checks are done at read() time rather than open() time, so
++if permissions are elevated between the open() and the read() (e.g via
++a setuid binary) then %pK will not leak kernel pointers to unprivileged
++users. Note, this is a temporary solution only. The correct long-term
++solution is to do the permission checks at open() time. Consider removing
++world read permissions from files that use %pK, and using dmesg_restrict
++to protect against uses of %pK in dmesg(8) if leaking kernel pointer
++values to unprivileged users is a concern.
++
++When kptr_restrict is set to (2), kernel pointers printed using
++%pK will be replaced with 0's regardless of privileges.
+
+ ==============================================================
+
+diff --git a/Makefile b/Makefile
+index 90f57dc..848be26 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index 7bb8bf9..b7c5d5d 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -307,4 +307,12 @@
+ .size \name , . - \name
+ .endm
+
++ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
++#ifndef CONFIG_CPU_USE_DOMAINS
++ adds \tmp, \addr, #\size - 1
++ sbcccs \tmp, \tmp, \limit
++ bcs \bad
++#endif
++ .endm
++
+ #endif /* __ASM_ASSEMBLER_H__ */
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index b293616..292c3f8 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
+ extern int __get_user_2(void *);
+ extern int __get_user_4(void *);
+
+-#define __get_user_x(__r2,__p,__e,__s,__i...) \
++#define __GUP_CLOBBER_1 "lr", "cc"
++#ifdef CONFIG_CPU_USE_DOMAINS
++#define __GUP_CLOBBER_2 "ip", "lr", "cc"
++#else
++#define __GUP_CLOBBER_2 "lr", "cc"
++#endif
++#define __GUP_CLOBBER_4 "lr", "cc"
++
++#define __get_user_x(__r2,__p,__e,__l,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
++ __asmeq("%3", "r1") \
+ "bl __get_user_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+- : "0" (__p) \
+- : __i, "cc")
++ : "0" (__p), "r" (__l) \
++ : __GUP_CLOBBER_##__s)
+
+ #define get_user(x,p) \
+ ({ \
++ unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __r2 asm("r2"); \
++ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+- __get_user_x(__r2, __p, __e, 1, "lr"); \
+- break; \
++ __get_user_x(__r2, __p, __e, __l, 1); \
++ break; \
+ case 2: \
+- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
++ __get_user_x(__r2, __p, __e, __l, 2); \
+ break; \
+ case 4: \
+- __get_user_x(__r2, __p, __e, 4, "lr"); \
++ __get_user_x(__r2, __p, __e, __l, 4); \
+ break; \
+ default: __e = __get_user_bad(); break; \
+ } \
+@@ -135,31 +146,34 @@ extern int __put_user_2(void *, unsigned int);
+ extern int __put_user_4(void *, unsigned int);
+ extern int __put_user_8(void *, unsigned long long);
+
+-#define __put_user_x(__r2,__p,__e,__s) \
++#define __put_user_x(__r2,__p,__e,__l,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
++ __asmeq("%3", "r1") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+- : "0" (__p), "r" (__r2) \
++ : "0" (__p), "r" (__r2), "r" (__l) \
+ : "ip", "lr", "cc")
+
+ #define put_user(x,p) \
+ ({ \
++ unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ register const typeof(*(p)) __r2 asm("r2") = (x); \
+ register const typeof(*(p)) __user *__p asm("r0") = (p);\
++ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+- __put_user_x(__r2, __p, __e, 1); \
++ __put_user_x(__r2, __p, __e, __l, 1); \
+ break; \
+ case 2: \
+- __put_user_x(__r2, __p, __e, 2); \
++ __put_user_x(__r2, __p, __e, __l, 2); \
+ break; \
+ case 4: \
+- __put_user_x(__r2, __p, __e, 4); \
++ __put_user_x(__r2, __p, __e, __l, 4); \
+ break; \
+ case 8: \
+- __put_user_x(__r2, __p, __e, 8); \
++ __put_user_x(__r2, __p, __e, __l, 8); \
+ break; \
+ default: __e = __put_user_bad(); break; \
+ } \
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index e68d251..d9e3c61 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -468,6 +468,7 @@ EXPORT_SYMBOL(kernel_thread);
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ struct stackframe frame;
++ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+@@ -476,9 +477,11 @@ unsigned long get_wchan(struct task_struct *p)
+ frame.sp = thread_saved_sp(p);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(p);
++ stack_page = (unsigned long)task_stack_page(p);
+ do {
+- int ret = unwind_frame(&frame);
+- if (ret < 0)
++ if (frame.sp < stack_page ||
++ frame.sp >= stack_page + THREAD_SIZE ||
++ unwind_frame(&frame) < 0)
+ return 0;
+ if (!in_sched_functions(frame.pc))
+ return frame.pc;
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index 00f79e5..af4e8c8 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
+ high = ALIGN(low, THREAD_SIZE);
+
+ /* check current frame pointer is within bounds */
+- if (fp < (low + 12) || fp + 4 >= high)
++ if (fp < low + 12 || fp > high - 4)
+ return -EINVAL;
+
+ /* restore the registers from the stack frame */
+diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
+index 1b049cd..4306fbf 100644
+--- a/arch/arm/lib/getuser.S
++++ b/arch/arm/lib/getuser.S
+@@ -16,8 +16,9 @@
+ * __get_user_X
+ *
+ * Inputs: r0 contains the address
++ * r1 contains the address limit, which must be preserved
+ * Outputs: r0 is the error code
+- * r2, r3 contains the zero-extended value
++ * r2 contains the zero-extended value
+ * lr corrupted
+ *
+ * No other registers must be altered. (see <asm/uaccess.h>
+@@ -27,33 +28,39 @@
+ * Note also that it is intended that __get_user_bad is not global.
+ */
+ #include <linux/linkage.h>
++#include <asm/assembler.h>
+ #include <asm/errno.h>
+ #include <asm/domain.h>
+
+ ENTRY(__get_user_1)
++ check_uaccess r0, 1, r1, r2, __get_user_bad
+ 1: T(ldrb) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__get_user_1)
+
+ ENTRY(__get_user_2)
+-#ifdef CONFIG_THUMB2_KERNEL
+-2: T(ldrb) r2, [r0]
+-3: T(ldrb) r3, [r0, #1]
++ check_uaccess r0, 2, r1, r2, __get_user_bad
++#ifdef CONFIG_CPU_USE_DOMAINS
++rb .req ip
++2: ldrbt r2, [r0], #1
++3: ldrbt rb, [r0], #0
+ #else
+-2: T(ldrb) r2, [r0], #1
+-3: T(ldrb) r3, [r0]
++rb .req r0
++2: ldrb r2, [r0]
++3: ldrb rb, [r0, #1]
+ #endif
+ #ifndef __ARMEB__
+- orr r2, r2, r3, lsl #8
++ orr r2, r2, rb, lsl #8
+ #else
+- orr r2, r3, r2, lsl #8
++ orr r2, rb, r2, lsl #8
+ #endif
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__get_user_2)
+
+ ENTRY(__get_user_4)
++ check_uaccess r0, 4, r1, r2, __get_user_bad
+ 4: T(ldr) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
+index c023fc1..9a897fa 100644
+--- a/arch/arm/lib/putuser.S
++++ b/arch/arm/lib/putuser.S
+@@ -16,6 +16,7 @@
+ * __put_user_X
+ *
+ * Inputs: r0 contains the address
++ * r1 contains the address limit, which must be preserved
+ * r2, r3 contains the value
+ * Outputs: r0 is the error code
+ * lr corrupted
+@@ -27,16 +28,19 @@
+ * Note also that it is intended that __put_user_bad is not global.
+ */
+ #include <linux/linkage.h>
++#include <asm/assembler.h>
+ #include <asm/errno.h>
+ #include <asm/domain.h>
+
+ ENTRY(__put_user_1)
++ check_uaccess r0, 1, r1, ip, __put_user_bad
+ 1: T(strb) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__put_user_1)
+
+ ENTRY(__put_user_2)
++ check_uaccess r0, 2, r1, ip, __put_user_bad
+ mov ip, r2, lsr #8
+ #ifdef CONFIG_THUMB2_KERNEL
+ #ifndef __ARMEB__
+@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
+ ENDPROC(__put_user_2)
+
+ ENTRY(__put_user_4)
++ check_uaccess r0, 4, r1, ip, __put_user_bad
+ 4: T(str) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__put_user_4)
+
+ ENTRY(__put_user_8)
++ check_uaccess r0, 8, r1, ip, __put_user_bad
+ #ifdef CONFIG_THUMB2_KERNEL
+ 5: T(str) r2, [r0]
+ 6: T(str) r3, [r0, #4]
+diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
+index 38a44f9..5b91e45 100644
+--- a/arch/arm/mach-footbridge/common.c
++++ b/arch/arm/mach-footbridge/common.c
+@@ -15,6 +15,7 @@
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
++#include <video/vga.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
+@@ -197,6 +198,8 @@ void __init footbridge_map_io(void)
+ */
+ if (footbridge_cfn_mode())
+ iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
++
++ vga_base = PCIMEM_BASE;
+ }
+
+ #ifdef CONFIG_FOOTBRIDGE_ADDIN
+diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
+index 18c32a5..a8dfa00 100644
+--- a/arch/arm/mach-footbridge/dc21285.c
++++ b/arch/arm/mach-footbridge/dc21285.c
+@@ -18,7 +18,6 @@
+ #include <linux/irq.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
+-#include <video/vga.h>
+
+ #include <asm/irq.h>
+ #include <asm/system.h>
+@@ -297,7 +296,6 @@ void __init dc21285_preinit(void)
+ int cfn_mode;
+
+ pcibios_min_mem = 0x81000000;
+- vga_base = PCIMEM_BASE;
+
+ mem_size = (unsigned int)high_memory - PAGE_OFFSET;
+ for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
+diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
+index 5de49c3..892d0d6 100644
+--- a/arch/arm/mach-integrator/integrator_cp.c
++++ b/arch/arm/mach-integrator/integrator_cp.c
+@@ -384,7 +384,8 @@ static struct amba_device aaci_device = {
+ static void cp_clcd_enable(struct clcd_fb *fb)
+ {
+ struct fb_var_screeninfo *var = &fb->fb.var;
+- u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
++ u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2
++ | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1;
+
+ if (var->bits_per_pixel <= 8 ||
+ (var->bits_per_pixel == 16 && var->green.length == 5))
+diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
+index 01e9d64..0e25348 100644
+--- a/arch/arm/mach-pxa/reset.c
++++ b/arch/arm/mach-pxa/reset.c
+@@ -12,6 +12,7 @@
+
+ #include <mach/regs-ost.h>
+ #include <mach/reset.h>
++#include <mach/smemc.h>
+
+ unsigned int reset_status;
+ EXPORT_SYMBOL(reset_status);
+@@ -79,6 +80,12 @@ static void do_hw_reset(void)
+ OWER = OWER_WME;
+ OSSR = OSSR_M3;
+ OSMR3 = OSCR + 368640; /* ... in 100 ms */
++ /*
++ * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
++ * we put SDRAM into self-refresh to prevent that
++ */
++ while (1)
++ writel_relaxed(MDREFR_SLFRSH, MDREFR);
+ }
+
+ void arch_reset(char mode, const char *cmd)
+@@ -99,4 +106,3 @@ void arch_reset(char mode, const char *cmd)
+ break;
+ }
+ }
+-
+diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
+index 402b0c96..33dd57c 100644
+--- a/arch/arm/mach-pxa/tosa.c
++++ b/arch/arm/mach-pxa/tosa.c
+@@ -424,57 +424,57 @@ static struct platform_device tosa_power_device = {
+ * Tosa Keyboard
+ */
+ static const uint32_t tosakbd_keymap[] = {
+- KEY(0, 2, KEY_W),
+- KEY(0, 6, KEY_K),
+- KEY(0, 7, KEY_BACKSPACE),
+- KEY(0, 8, KEY_P),
+- KEY(1, 1, KEY_Q),
+- KEY(1, 2, KEY_E),
+- KEY(1, 3, KEY_T),
+- KEY(1, 4, KEY_Y),
+- KEY(1, 6, KEY_O),
+- KEY(1, 7, KEY_I),
+- KEY(1, 8, KEY_COMMA),
+- KEY(2, 1, KEY_A),
+- KEY(2, 2, KEY_D),
+- KEY(2, 3, KEY_G),
+- KEY(2, 4, KEY_U),
+- KEY(2, 6, KEY_L),
+- KEY(2, 7, KEY_ENTER),
+- KEY(2, 8, KEY_DOT),
+- KEY(3, 1, KEY_Z),
+- KEY(3, 2, KEY_C),
+- KEY(3, 3, KEY_V),
+- KEY(3, 4, KEY_J),
+- KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
+- KEY(3, 6, TOSA_KEY_CANCEL),
+- KEY(3, 7, TOSA_KEY_CENTER),
+- KEY(3, 8, TOSA_KEY_OK),
+- KEY(3, 9, KEY_LEFTSHIFT),
+- KEY(4, 1, KEY_S),
+- KEY(4, 2, KEY_R),
+- KEY(4, 3, KEY_B),
+- KEY(4, 4, KEY_N),
+- KEY(4, 5, TOSA_KEY_CALENDAR),
+- KEY(4, 6, TOSA_KEY_HOMEPAGE),
+- KEY(4, 7, KEY_LEFTCTRL),
+- KEY(4, 8, TOSA_KEY_LIGHT),
+- KEY(4, 10, KEY_RIGHTSHIFT),
+- KEY(5, 1, KEY_TAB),
+- KEY(5, 2, KEY_SLASH),
+- KEY(5, 3, KEY_H),
+- KEY(5, 4, KEY_M),
+- KEY(5, 5, TOSA_KEY_MENU),
+- KEY(5, 7, KEY_UP),
+- KEY(5, 11, TOSA_KEY_FN),
+- KEY(6, 1, KEY_X),
+- KEY(6, 2, KEY_F),
+- KEY(6, 3, KEY_SPACE),
+- KEY(6, 4, KEY_APOSTROPHE),
+- KEY(6, 5, TOSA_KEY_MAIL),
+- KEY(6, 6, KEY_LEFT),
+- KEY(6, 7, KEY_DOWN),
+- KEY(6, 8, KEY_RIGHT),
++ KEY(0, 1, KEY_W),
++ KEY(0, 5, KEY_K),
++ KEY(0, 6, KEY_BACKSPACE),
++ KEY(0, 7, KEY_P),
++ KEY(1, 0, KEY_Q),
++ KEY(1, 1, KEY_E),
++ KEY(1, 2, KEY_T),
++ KEY(1, 3, KEY_Y),
++ KEY(1, 5, KEY_O),
++ KEY(1, 6, KEY_I),
++ KEY(1, 7, KEY_COMMA),
++ KEY(2, 0, KEY_A),
++ KEY(2, 1, KEY_D),
++ KEY(2, 2, KEY_G),
++ KEY(2, 3, KEY_U),
++ KEY(2, 5, KEY_L),
++ KEY(2, 6, KEY_ENTER),
++ KEY(2, 7, KEY_DOT),
++ KEY(3, 0, KEY_Z),
++ KEY(3, 1, KEY_C),
++ KEY(3, 2, KEY_V),
++ KEY(3, 3, KEY_J),
++ KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
++ KEY(3, 5, TOSA_KEY_CANCEL),
++ KEY(3, 6, TOSA_KEY_CENTER),
++ KEY(3, 7, TOSA_KEY_OK),
++ KEY(3, 8, KEY_LEFTSHIFT),
++ KEY(4, 0, KEY_S),
++ KEY(4, 1, KEY_R),
++ KEY(4, 2, KEY_B),
++ KEY(4, 3, KEY_N),
++ KEY(4, 4, TOSA_KEY_CALENDAR),
++ KEY(4, 5, TOSA_KEY_HOMEPAGE),
++ KEY(4, 6, KEY_LEFTCTRL),
++ KEY(4, 7, TOSA_KEY_LIGHT),
++ KEY(4, 9, KEY_RIGHTSHIFT),
++ KEY(5, 0, KEY_TAB),
++ KEY(5, 1, KEY_SLASH),
++ KEY(5, 2, KEY_H),
++ KEY(5, 3, KEY_M),
++ KEY(5, 4, TOSA_KEY_MENU),
++ KEY(5, 6, KEY_UP),
++ KEY(5, 10, TOSA_KEY_FN),
++ KEY(6, 0, KEY_X),
++ KEY(6, 1, KEY_F),
++ KEY(6, 2, KEY_SPACE),
++ KEY(6, 3, KEY_APOSTROPHE),
++ KEY(6, 4, TOSA_KEY_MAIL),
++ KEY(6, 5, KEY_LEFT),
++ KEY(6, 6, KEY_DOWN),
++ KEY(6, 7, KEY_RIGHT),
+ };
+
+ static struct matrix_keymap_data tosakbd_keymap_data = {
+diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
+index 3dd133f..ef8d9d8 100644
+--- a/arch/arm/mach-sa1100/assabet.c
++++ b/arch/arm/mach-sa1100/assabet.c
+@@ -411,6 +411,9 @@ static void __init assabet_map_io(void)
+ * Its called GPCLKR0 in my SA1110 manual.
+ */
+ Ser1SDCR0 |= SDCR0_SUS;
++ MSC1 = (MSC1 & ~0xffff) |
++ MSC_NonBrst | MSC_32BitStMem |
++ MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
+
+ if (machine_has_neponset()) {
+ #ifdef CONFIG_ASSABET_NEPONSET
+diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S
+index 4488fa2..2ffc298 100644
+--- a/arch/avr32/boot/u-boot/head.S
++++ b/arch/avr32/boot/u-boot/head.S
+@@ -8,6 +8,8 @@
+ * published by the Free Software Foundation.
+ */
+ #include <asm/setup.h>
++#include <asm/thread_info.h>
++#include <asm/sysreg.h>
+
+ /*
+ * The kernel is loaded where we want it to be and all caches
+@@ -20,11 +22,6 @@
+ .section .init.text,"ax"
+ .global _start
+ _start:
+- /* Check if the boot loader actually provided a tag table */
+- lddpc r0, magic_number
+- cp.w r12, r0
+- brne no_tag_table
+-
+ /* Initialize .bss */
+ lddpc r2, bss_start_addr
+ lddpc r3, end_addr
+@@ -34,6 +31,25 @@ _start:
+ cp r2, r3
+ brlo 1b
+
++ /* Initialize status register */
++ lddpc r0, init_sr
++ mtsr SYSREG_SR, r0
++
++ /* Set initial stack pointer */
++ lddpc sp, stack_addr
++ sub sp, -THREAD_SIZE
++
++#ifdef CONFIG_FRAME_POINTER
++ /* Mark last stack frame */
++ mov lr, 0
++ mov r7, 0
++#endif
++
++ /* Check if the boot loader actually provided a tag table */
++ lddpc r0, magic_number
++ cp.w r12, r0
++ brne no_tag_table
++
+ /*
+ * Save the tag table address for later use. This must be done
+ * _after_ .bss has been initialized...
+@@ -53,8 +69,15 @@ bss_start_addr:
+ .long __bss_start
+ end_addr:
+ .long _end
++init_sr:
++ .long 0x007f0000 /* Supervisor mode, everything masked */
++stack_addr:
++ .long init_thread_union
++panic_addr:
++ .long panic
+
+ no_tag_table:
+ sub r12, pc, (. - 2f)
+- bral panic
++ /* branch to panic() which can be far away with that construct */
++ lddpc pc, panic_addr
+ 2: .asciz "Boot loader didn't provide correct magic number\n"
+diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
+index 169268c..a91e898 100644
+--- a/arch/avr32/kernel/entry-avr32b.S
++++ b/arch/avr32/kernel/entry-avr32b.S
+@@ -399,9 +399,10 @@ handle_critical:
+ /* We should never get here... */
+ bad_return:
+ sub r12, pc, (. - 1f)
+- bral panic
++ lddpc pc, 2f
+ .align 2
+ 1: .asciz "Return from critical exception!"
++2: .long panic
+
+ .align 1
+ do_bus_error_write:
+diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
+index 6163bd0..59eae6d 100644
+--- a/arch/avr32/kernel/head.S
++++ b/arch/avr32/kernel/head.S
+@@ -10,33 +10,13 @@
+ #include <linux/linkage.h>
+
+ #include <asm/page.h>
+-#include <asm/thread_info.h>
+-#include <asm/sysreg.h>
+
+ .section .init.text,"ax"
+ .global kernel_entry
+ kernel_entry:
+- /* Initialize status register */
+- lddpc r0, init_sr
+- mtsr SYSREG_SR, r0
+-
+- /* Set initial stack pointer */
+- lddpc sp, stack_addr
+- sub sp, -THREAD_SIZE
+-
+-#ifdef CONFIG_FRAME_POINTER
+- /* Mark last stack frame */
+- mov lr, 0
+- mov r7, 0
+-#endif
+-
+ /* Start the show */
+ lddpc pc, kernel_start_addr
+
+ .align 2
+-init_sr:
+- .long 0x007f0000 /* Supervisor mode, everything masked */
+-stack_addr:
+- .long init_thread_union
+ kernel_start_addr:
+ .long start_kernel
+diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
+index ac12ae2..db9a16c 100644
+--- a/arch/cris/include/asm/io.h
++++ b/arch/cris/include/asm/io.h
+@@ -3,6 +3,7 @@
+
+ #include <asm/page.h> /* for __va, __pa */
+ #include <arch/io.h>
++#include <asm-generic/iomap.h>
+ #include <linux/kernel.h>
+
+ struct cris_io_operations
+diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
+index d9f397f..fba7696 100644
+--- a/arch/ia64/include/asm/processor.h
++++ b/arch/ia64/include/asm/processor.h
+@@ -320,7 +320,7 @@ struct thread_struct {
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+- if (unlikely(!get_dumpable(current->mm))) { \
++ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 836a5a1..fa1e56b 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ #endif /* CONFIG_ALTIVEC */
+ if (copy_fpr_to_user(&frame->mc_fregs, current))
+ return 1;
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSR 0-31 upper half from thread_struct to local
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index a50b5ec..60d1f75 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -116,6 +116,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ flush_fp_to_thread(current);
+ /* copy fpr regs and fpscr */
+ err |= copy_fpr_to_user(&sc->fp_regs, current);
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSX low doubleword to local buffer for formatting,
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index e74f86e..304680a 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -235,8 +235,6 @@ static u64 scan_dispatch_log(u64 stop_tb)
+ if (i == vpa->dtl_idx)
+ return 0;
+ while (i < vpa->dtl_idx) {
+- if (dtl_consumer)
+- dtl_consumer(dtl, i);
+ dtb = dtl->timebase;
+ tb_delta = dtl->enqueue_to_dispatch_time +
+ dtl->ready_to_enqueue_time;
+@@ -249,6 +247,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
+ }
+ if (dtb > stop_tb)
+ break;
++ if (dtl_consumer)
++ dtl_consumer(dtl, i);
+ stolen += tb_delta;
+ ++i;
+ ++dtl;
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index f65af61..dfb1c19 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -1351,11 +1351,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ const char *cp;
+
+ dn = dev->of_node;
+- if (!dn)
+- return -ENODEV;
++ if (!dn) {
++ strcpy(buf, "\n");
++ return strlen(buf);
++ }
+ cp = of_get_property(dn, "compatible", NULL);
+- if (!cp)
+- return -ENODEV;
++ if (!cp) {
++ strcpy(buf, "\n");
++ return strlen(buf);
++ }
+
+ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+ }
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index a9ce135..3ec8b39 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -35,7 +35,6 @@ static u8 *ctrblk;
+ static char keylen_flag;
+
+ struct s390_aes_ctx {
+- u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ long enc;
+ long dec;
+@@ -56,8 +55,7 @@ struct pcc_param {
+
+ struct s390_xts_ctx {
+ u8 key[32];
+- u8 xts_param[16];
+- struct pcc_param pcc;
++ u8 pcc_key[32];
+ long enc;
+ long dec;
+ int key_len;
+@@ -442,29 +440,35 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ return aes_set_key(tfm, in_key, key_len);
+ }
+
+-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
++static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct blkcipher_walk *walk)
+ {
++ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
++ struct {
++ u8 iv[AES_BLOCK_SIZE];
++ u8 key[AES_MAX_KEY_SIZE];
++ } param;
+
+ if (!nbytes)
+ goto out;
+
+- memcpy(param, walk->iv, AES_BLOCK_SIZE);
++ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
++ memcpy(param.key, sctx->key, sctx->key_len);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+- ret = crypt_s390_kmc(func, param, out, in, n);
++ ret = crypt_s390_kmc(func, &param, out, in, n);
+ BUG_ON((ret < 0) || (ret != n));
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+- memcpy(walk->iv, param, AES_BLOCK_SIZE);
++ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
+
+ out:
+ return ret;
+@@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
++ return cbc_aes_crypt(desc, sctx->enc, &walk);
+ }
+
+ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+@@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
++ return cbc_aes_crypt(desc, sctx->dec, &walk);
+ }
+
+ static struct crypto_alg cbc_aes_alg = {
+@@ -587,7 +591,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ xts_ctx->enc = KM_XTS_128_ENCRYPT;
+ xts_ctx->dec = KM_XTS_128_DECRYPT;
+ memcpy(xts_ctx->key + 16, in_key, 16);
+- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
++ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
+ break;
+ case 48:
+ xts_ctx->enc = 0;
+@@ -598,7 +602,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ xts_ctx->enc = KM_XTS_256_ENCRYPT;
+ xts_ctx->dec = KM_XTS_256_DECRYPT;
+ memcpy(xts_ctx->key, in_key, 32);
+- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
++ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
+ break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+@@ -617,28 +621,32 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ unsigned int nbytes = walk->nbytes;
+ unsigned int n;
+ u8 *in, *out;
+- void *param;
++ struct pcc_param pcc_param;
++ struct {
++ u8 key[32];
++ u8 init[16];
++ } xts_param;
+
+ if (!nbytes)
+ goto out;
+
+- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
+- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
+- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
+- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
+- param = xts_ctx->pcc.key + offset;
+- ret = crypt_s390_pcc(func, param);
++ memset(pcc_param.block, 0, sizeof(pcc_param.block));
++ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
++ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
++ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
++ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
++ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+ BUG_ON(ret < 0);
+
+- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
+- param = xts_ctx->key + offset;
++ memcpy(xts_param.key, xts_ctx->key, 32);
++ memcpy(xts_param.init, pcc_param.xts, 16);
+ do {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+
+- ret = crypt_s390_km(func, param, out, in, n);
++ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+ BUG_ON(ret < 0 || ret != n);
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 425162e..2f53b89 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -15,6 +15,8 @@
+ #include <sys/mman.h>
+ #include <sys/stat.h>
+ #include <sys/wait.h>
++#include <sys/time.h>
++#include <sys/resource.h>
+ #include <asm/unistd.h>
+ #include "init.h"
+ #include "os.h"
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 95365a8..e80542b 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -51,18 +51,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
+
+ # How to compile the 16-bit code. Note we always compile for -march=i386,
+ # that way we can complain to the user if the CPU is insufficient.
+-KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
++KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
+ -DDISABLE_BRANCH_PROFILING \
+ -Wall -Wstrict-prototypes \
+ -march=i386 -mregparm=3 \
+ -include $(srctree)/$(src)/code16gcc.h \
+ -fno-strict-aliasing -fomit-frame-pointer \
++ -mno-mmx -mno-sse \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-toplevel-reorder,\
+- $(call cc-option, -fno-unit-at-a-time)) \
++ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+-KBUILD_CFLAGS += $(call cc-option, -m32)
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 77453c6..cda5cef 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -12,6 +12,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+ cflags-$(CONFIG_X86_32) := -march=i386
+ cflags-$(CONFIG_X86_64) := -mcmodel=small
+ KBUILD_CFLAGS += $(cflags-y)
++KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 13ad899..69e231b 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -95,10 +95,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ cpu_emergency_vmxoff();
+ cpu_emergency_svm_disable();
+
+- lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
++ lapic_shutdown();
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index 1ef962b..f9b9eaa 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -331,7 +331,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
+ snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
+
+ if (request_firmware(&fw, (const char *)fw_name, device)) {
+- pr_err("failed to load file %s\n", fw_name);
++ pr_debug("failed to load file %s\n", fw_name);
+ goto out;
+ }
+
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index a4e1b4b..f411aca 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -652,6 +652,13 @@ void native_machine_shutdown(void)
+
+ /* The boot cpu is always logical cpu 0 */
+ int reboot_cpu_id = 0;
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ disable_IO_APIC();
++#endif
++
++#ifdef CONFIG_SMP
+
+ #ifdef CONFIG_X86_32
+ /* See if there has been given a command line override */
+@@ -675,10 +682,6 @@ void native_machine_shutdown(void)
+
+ lapic_shutdown();
+
+-#ifdef CONFIG_X86_IO_APIC
+- disable_IO_APIC();
+-#endif
+-
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 54abb40..43e7753 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -537,7 +537,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
+ ASSERT(apic != NULL);
+
+ /* if initial count is 0, current count should also be 0 */
+- if (apic_get_reg(apic, APIC_TMICT) == 0)
++ if (apic_get_reg(apic, APIC_TMICT) == 0 ||
++ apic->lapic_timer.period == 0)
+ return 0;
+
+ remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index a18d20d..bee75a6 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -614,11 +614,6 @@ void __init efi_init(void)
+
+ set_bit(EFI_MEMMAP, &x86_efi_facility);
+
+-#ifdef CONFIG_X86_32
+- x86_platform.get_wallclock = efi_get_time;
+- x86_platform.set_wallclock = efi_set_rtc_mmss;
+-#endif
+-
+ #if EFI_DEBUG
+ print_efi_memmap();
+ #endif
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 49d9e91..a219c89 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -483,6 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ }
+
+ if (blk_throtl_init(q)) {
++ bdi_destroy(&q->backing_dev_info);
+ kmem_cache_free(blk_requestq_cachep, q);
+ return NULL;
+ }
+@@ -2015,6 +2016,7 @@ void blk_start_request(struct request *req)
+ if (unlikely(blk_bidi_rq(req)))
+ req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+
++ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+ blk_add_timer(req);
+ }
+ EXPORT_SYMBOL(blk_start_request);
+diff --git a/block/blk-timeout.c b/block/blk-timeout.c
+index 7803548..b1182ea 100644
+--- a/block/blk-timeout.c
++++ b/block/blk-timeout.c
+@@ -90,8 +90,8 @@ static void blk_rq_timed_out(struct request *req)
+ __blk_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+- blk_clear_rq_complete(req);
+ blk_add_timer(req);
++ blk_clear_rq_complete(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ /*
+@@ -173,7 +173,6 @@ void blk_add_timer(struct request *req)
+ return;
+
+ BUG_ON(!list_empty(&req->timeout_list));
+- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent a
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 0262210..8502462 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ struct hash_ctx *ctx = ask->private;
+ int err;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+@@ -161,8 +164,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a1c4f0a..a19c027 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+ struct skcipher_sg_list *sgl;
+ int err = -EINVAL;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+@@ -432,7 +435,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ long copied = 0;
+
+ lock_sock(sk);
+- msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
+index ffa0245..6056178 100644
+--- a/crypto/ansi_cprng.c
++++ b/crypto/ansi_cprng.c
+@@ -230,11 +230,11 @@ remainder:
+ */
+ if (byte_count < DEFAULT_BLK_SZ) {
+ empty_rbuf:
+- for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
+- ctx->rand_data_valid++) {
++ while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
+ *ptr = ctx->rand_data[ctx->rand_data_valid];
+ ptr++;
+ byte_count--;
++ ctx->rand_data_valid++;
+ if (byte_count == 0)
+ goto done;
+ }
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index 5ef7ba6..d21da2f 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -368,9 +368,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
+ if (!err) {
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+- struct ablkcipher_request *abreq = aead_request_ctx(areq);
+- u8 *iv = (u8 *)(abreq + 1) +
+- crypto_ablkcipher_reqsize(ctx->enc);
++ struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
++ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
++ + ctx->reqoff);
++ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+
+ err = crypto_authenc_genicv(areq, iv, 0);
+ }
+diff --git a/crypto/ccm.c b/crypto/ccm.c
+index c36d654..2002ca7 100644
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
+ }
+
+ /* compute plaintext into mac */
+- get_data_to_compute(cipher, pctx, plain, cryptlen);
++ if (cryptlen)
++ get_data_to_compute(cipher, pctx, plain, cryptlen);
+
+ out:
+ return err;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 0445f52..d29f6d5 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -303,6 +303,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -437,6 +441,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(0x1b4b, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
++ .driver_data = board_ahci_yes_fbs },
+
+ /* Promise */
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 43b8758..6692108 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -204,6 +204,7 @@ static int __devexit ahci_remove(struct platform_device *pdev)
+
+ static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "calxeda,hb-ahci", },
++ { .compatible = "ibm,476gtr-ahci", },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ahci_of_match);
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 60def03..de2802c 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1247,9 +1247,11 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ {
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
++ struct ahci_port_priv *pp = ap->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
++ bool fbs_disabled = false;
+ int rc;
+
+ DPRINTK("ENTER\n");
+@@ -1259,6 +1261,16 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+
++ /*
++ * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
++ * clear PxFBS.EN to '0' prior to issuing software reset to devices
++ * that is attached to port multiplier.
++ */
++ if (!ata_is_host_link(link) && pp->fbs_enabled) {
++ ahci_disable_fbs(ap);
++ fbs_disabled = true;
++ }
++
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+@@ -1299,6 +1311,10 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ } else
+ *class = ahci_dev_classify(ap);
+
++ /* re-enable FBS if disabled before */
++ if (fbs_disabled)
++ ahci_enable_fbs(ap);
++
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d54b7d6..a0a3987 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4067,6 +4067,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
++ { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index ce9dc62..c01f040 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -312,25 +312,25 @@ int ata_tport_add(struct device *parent,
+ /*
+ * ATA link attributes
+ */
++static int noop(int x) { return x; }
+
+-
+-#define ata_link_show_linkspeed(field) \
++#define ata_link_show_linkspeed(field, format) \
+ static ssize_t \
+ show_ata_link_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct ata_link *link = transport_class_to_link(dev); \
+ \
+- return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
++ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
+ }
+
+-#define ata_link_linkspeed_attr(field) \
+- ata_link_show_linkspeed(field) \
++#define ata_link_linkspeed_attr(field, format) \
++ ata_link_show_linkspeed(field, format) \
+ static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
+
+-ata_link_linkspeed_attr(hw_sata_spd_limit);
+-ata_link_linkspeed_attr(sata_spd_limit);
+-ata_link_linkspeed_attr(sata_spd);
++ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
++ata_link_linkspeed_attr(sata_spd_limit, fls);
++ata_link_linkspeed_attr(sata_spd, noop);
+
+
+ static DECLARE_TRANSPORT_CLASS(ata_link_class,
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 1c05212..b0e75ce 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -3513,7 +3513,7 @@ init_card(struct atm_dev *dev)
+ tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
+ if (tmp) {
+ memcpy(card->atmdev->esi, tmp->dev_addr, 6);
+-
++ dev_put(tmp);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
+ }
+ /*
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 968a0d4..f35975f 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -547,7 +547,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
+
+ mutex_lock(&brd_devices_mutex);
+ brd = brd_init_one(MINOR(dev) >> part_shift);
+- kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
++ kobj = brd ? get_disk(brd->brd_disk) : NULL;
+ mutex_unlock(&brd_devices_mutex);
+
+ *part = 0;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index a365562..d659135 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1635,7 +1635,7 @@ static int loop_add(struct loop_device **l, int i)
+
+ lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!lo->lo_queue)
+- goto out_free_dev;
++ goto out_free_idr;
+
+ disk = lo->lo_disk = alloc_disk(1 << part_shift);
+ if (!disk)
+@@ -1679,6 +1679,8 @@ static int loop_add(struct loop_device **l, int i)
+
+ out_free_queue:
+ blk_cleanup_queue(lo->lo_queue);
++out_free_idr:
++ idr_remove(&loop_index_idr, i);
+ out_free_dev:
+ kfree(lo);
+ out:
+@@ -1742,7 +1744,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ if (err < 0)
+ err = loop_add(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+- kobj = ERR_PTR(err);
++ kobj = NULL;
+ else
+ kobj = get_disk(lo->lo_disk);
+ mutex_unlock(&loop_index_mutex);
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index 6e40072..51efcbc 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
+ },
+ },
++ {
++ .ident = "Dell XPS421",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 66d5384..094a710 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -31,11 +31,23 @@
+ #include <linux/ptrace.h>
+ #include <linux/atomic.h>
+
+-#include <asm/unaligned.h>
+-
+ #include <linux/cn_proc.h>
+
+-#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
++/*
++ * Size of a cn_msg followed by a proc_event structure. Since the
++ * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
++ * add one 4-byte word to the size here, and then start the actual
++ * cn_msg structure 4 bytes into the stack buffer. The result is that
++ * the immediately following proc_event structure is aligned to 8 bytes.
++ */
++#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
++
++/* See comment above; we test our assumption about sizeof struct cn_msg here. */
++static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
++{
++ BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
++ return (struct cn_msg *)(buffer + 4);
++}
+
+ static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
+ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
+@@ -55,19 +67,19 @@ void proc_fork_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ struct task_struct *parent;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_FORK;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+@@ -90,17 +102,17 @@ void proc_exec_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXEC;
+ ev->event_data.exec.process_pid = task->pid;
+ ev->event_data.exec.process_tgid = task->tgid;
+@@ -116,14 +128,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ const struct cred *cred;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+@@ -144,7 +156,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ rcu_read_unlock();
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+@@ -158,17 +170,17 @@ void proc_sid_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_SID;
+ ev->event_data.sid.process_pid = task->pid;
+ ev->event_data.sid.process_tgid = task->tgid;
+@@ -185,17 +197,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+@@ -220,17 +232,17 @@ void proc_comm_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COMM;
+ ev->event_data.comm.process_pid = task->pid;
+ ev->event_data.comm.process_tgid = task->tgid;
+@@ -247,18 +259,18 @@ void proc_exit_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXIT;
+ ev->event_data.exit.process_pid = task->pid;
+ ev->event_data.exit.process_tgid = task->tgid;
+@@ -284,18 +296,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->cpu = -1;
+ ev->what = PROC_EVENT_NONE;
+ ev->event_data.ack.err = err;
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index edcffd6..34be13b 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -69,10 +69,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+ u32 val;
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
++ u32 out_mask, out_shadow;
+
+- val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
++ out_mask = in_be32(mm->regs + GPIO_DIR);
+
+- return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
++ val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
++ out_shadow = mpc8xxx_gc->data & out_mask;
++
++ return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
+ }
+
+ static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index ee29c1f..6d36695 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -6063,7 +6063,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
++ POSTING_READ(CURCNTR(pipe));
+ I915_WRITE(CURBASE(pipe), base);
++ POSTING_READ(CURBASE(pipe));
+ }
+
+ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+@@ -6088,7 +6090,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
++ POSTING_READ(CURCNTR_IVB(pipe));
+ I915_WRITE(CURBASE_IVB(pipe), base);
++ POSTING_READ(CURBASE_IVB(pipe));
+ }
+
+ /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 7ce3fde..bd0b1fc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -281,7 +281,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
+ list_for_each_safe(entry, tmp, list) {
+ nvbo = list_entry(entry, struct nouveau_bo, entry);
+
+- nouveau_bo_fence(nvbo, fence);
++ if (likely(fence))
++ nouveau_bo_fence(nvbo, fence);
+
+ if (unlikely(nvbo->validate_mapped)) {
+ ttm_bo_kunmap(&nvbo->kmap);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index daadf21..a9238b0 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -416,12 +416,40 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
++retry:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
++
++ /* On old GPU like RN50 with little vram pining can fails because
++ * current fb is taking all space needed. So instead of unpining
++ * the old buffer after pining the new one, first unpin old one
++ * and then retry pining new one.
++ *
++ * As only master can set mode only master can pin and it is
++ * unlikely the master client will race with itself especialy
++ * on those old gpu with single crtc.
++ *
++ * We don't shutdown the display controller because new buffer
++ * will end up in same spot.
++ */
++ if (!atomic && fb && fb != crtc->fb) {
++ struct radeon_bo *old_rbo;
++ unsigned long nsize, osize;
++
++ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
++ osize = radeon_bo_size(old_rbo);
++ nsize = radeon_bo_size(rbo);
++ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
++ radeon_bo_unpin(old_rbo);
++ radeon_bo_unreserve(old_rbo);
++ fb = NULL;
++ goto retry;
++ }
++ }
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 0d27bff..22a89cd 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1101,24 +1101,32 @@ out_unlock:
+ return ret;
+ }
+
+-static int ttm_bo_mem_compat(struct ttm_placement *placement,
+- struct ttm_mem_reg *mem)
++static bool ttm_bo_mem_compat(struct ttm_placement *placement,
++ struct ttm_mem_reg *mem,
++ uint32_t *new_flags)
+ {
+ int i;
+
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
+- return -1;
++ return false;
+
+ for (i = 0; i < placement->num_placement; i++) {
+- if ((placement->placement[i] & mem->placement &
+- TTM_PL_MASK_CACHING) &&
+- (placement->placement[i] & mem->placement &
+- TTM_PL_MASK_MEM))
+- return i;
++ *new_flags = placement->placement[i];
++ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
++ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
++ return true;
++ }
++
++ for (i = 0; i < placement->num_busy_placement; i++) {
++ *new_flags = placement->busy_placement[i];
++ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
++ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
++ return true;
+ }
+- return -1;
++
++ return false;
+ }
+
+ int ttm_bo_validate(struct ttm_buffer_object *bo,
+@@ -1127,6 +1135,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ bool no_wait_gpu)
+ {
+ int ret;
++ uint32_t new_flags;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ /* Check that range is valid */
+@@ -1137,8 +1146,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ /*
+ * Check whether we need to move buffer.
+ */
+- ret = ttm_bo_mem_compat(placement, &bo->mem);
+- if (ret < 0) {
++ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+ if (ret)
+ return ret;
+@@ -1147,7 +1155,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
++ ttm_flag_masked(&bo->mem.placement, new_flags,
+ ~TTM_PL_MASK_MEMTYPE);
+ }
+ /*
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 13af0f1..a605ba1 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -66,7 +66,7 @@ struct mt_device {
+ unsigned last_field_index; /* last field index of the report */
+ unsigned last_slot_field; /* the last field of a slot */
+ int last_mt_collection; /* last known mt-related collection */
+- __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
++ __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __u8 num_received; /* how many contacts we received */
+ __u8 num_expected; /* expected last contact index */
+ __u8 maxcontacts;
+diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
+index 6df0b46..a42a7b0 100644
+--- a/drivers/hwmon/lm78.c
++++ b/drivers/hwmon/lm78.c
+@@ -90,6 +90,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+ if (rpm <= 0)
+ return 255;
++ if (rpm > 1350000)
++ return 1;
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ }
+
+diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
+index 615bc4f..6d5ece1 100644
+--- a/drivers/hwmon/lm90.c
++++ b/drivers/hwmon/lm90.c
+@@ -268,7 +268,7 @@ static const struct lm90_params lm90_params[] = {
+ [max6696] = {
+ .flags = LM90_HAVE_EMERGENCY
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+- .alert_alarms = 0x187c,
++ .alert_alarms = 0x1c7c,
+ .max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ },
+@@ -1474,19 +1474,22 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
+ if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
+ dev_info(&client->dev, "Everything OK\n");
+ } else {
+- if (alarms & 0x61)
++ if ((alarms & 0x61) || (alarms2 & 0x80))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 1);
+- if (alarms & 0x1a)
++ if ((alarms & 0x1a) || (alarms2 & 0x20))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 2);
+ if (alarms & 0x04)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 2);
+
+- if (alarms2 & 0x18)
++ if (alarms2 & 0x5a)
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 3);
++ if (alarms2 & 0x04)
++ dev_warn(&client->dev,
++ "temp%d diode open, please check!\n", 3);
+
+ /* Disable ALERT# output, because these chips don't implement
+ SMBus alert correctly; they should only hold the alert line
+diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
+index 47d7ce9..5ab6953 100644
+--- a/drivers/hwmon/sis5595.c
++++ b/drivers/hwmon/sis5595.c
+@@ -133,6 +133,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+ if (rpm <= 0)
+ return 255;
++ if (rpm > 1350000)
++ return 1;
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ }
+
+diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
+index db3b2e8..6df67a9 100644
+--- a/drivers/hwmon/vt8231.c
++++ b/drivers/hwmon/vt8231.c
+@@ -139,7 +139,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
+ */
+ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+- if (rpm == 0)
++ if (rpm <= 0 || rpm > 1310720)
+ return 0;
+ return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255);
+ }
+diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
+index 0254e18..b9c0a7f 100644
+--- a/drivers/hwmon/w83l786ng.c
++++ b/drivers/hwmon/w83l786ng.c
+@@ -447,8 +447,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
+ struct w83l786ng_data *data = i2c_get_clientdata(client);
+ u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
+
++ val = DIV_ROUND_CLOSEST(val, 0x11);
++
+ mutex_lock(&data->update_lock);
+- data->pwm[nr] = val;
++ data->pwm[nr] = val * 0x11;
++ val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
+ w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
+ mutex_unlock(&data->update_lock);
+ return count;
+@@ -471,7 +474,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
+ mutex_lock(&data->update_lock);
+ reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
+ data->pwm_enable[nr] = val;
+- reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
++ reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
+ w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
+ mutex_unlock(&data->update_lock);
+@@ -740,9 +743,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
+ ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
+ ? 0 : 1;
+ data->pwm_enable[i] =
+- ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1;
+- data->pwm[i] = w83l786ng_read_value(client,
+- W83L786NG_REG_PWM[i]);
++ ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
++ data->pwm[i] =
++ (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
++ & 0x0f) * 0x11;
+ }
+
+
+diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+index f5cb13b..cc04b7b 100644
+--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
++++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+@@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
+ int j;
+ int ret;
+
+- ret = get_user_pages(current, current->mm, addr,
+- npages, 0, 1, pages, NULL);
+-
++ ret = get_user_pages_fast(addr, npages, 0, pages);
+ if (ret != npages) {
+ int i;
+
+@@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
+ while (dim) {
+ const int mxp = 8;
+
+- down_write(&current->mm->mmap_sem);
+ ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+- up_write(&current->mm->mmap_sem);
+-
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
+index 8244208..573b460 100644
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -284,8 +284,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ int j;
+ int ret;
+
+- ret = get_user_pages(current, current->mm, addr,
+- npages, 0, 1, pages, NULL);
++ ret = get_user_pages_fast(addr, npages, 0, pages);
+
+ if (ret != npages) {
+ int i;
+@@ -830,10 +829,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+ while (dim) {
+ const int mxp = 8;
+
+- down_write(&current->mm->mmap_sem);
+ ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+- up_write(&current->mm->mmap_sem);
+-
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index 509135f..4df80fb 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- for (i = 0; i < 3; i++)
+- strcpy(card->s0num[i], sdef.num[i]);
++ for (i = 0; i < 3; i++) {
++ strlcpy(card->s0num[i], sdef.num[i],
++ sizeof(card->s0num[0]));
++ }
+ break;
+ case ISDN_PTYPE_1TR6:
+ if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
+@@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- strcpy(card->s0num[0], sdef.num[0]);
++ strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
+ card->s0num[1][0] = '\0';
+ card->s0num[2][0] = '\0';
+ break;
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index 738ea8d..98e8274 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ {
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+- struct sockaddr_mISDN *maddr;
+
+ int copied, err;
+
+@@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return err;
+
+- if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
+- msg->msg_namelen = sizeof(struct sockaddr_mISDN);
+- maddr = (struct sockaddr_mISDN *)msg->msg_name;
++ if (msg->msg_name) {
++ struct sockaddr_mISDN *maddr = msg->msg_name;
++
+ maddr->family = AF_ISDN;
+ maddr->dev = _pms(sk)->dev->id;
+ if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
+@@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ maddr->sapi = _pms(sk)->ch.addr & 0xFF;
+ maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
+ }
+- } else {
+- if (msg->msg_namelen)
+- printk(KERN_WARNING "%s: too small namelen %d\n",
+- __func__, msg->msg_namelen);
+- msg->msg_namelen = 0;
++ msg->msg_namelen = sizeof(*maddr);
+ }
+
+ copied = skb->len + MISDN_HEADER_LEN;
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index a5dfcc0..910d2f8 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1611,6 +1611,11 @@ static int __init dm_bufio_init(void)
+ {
+ __u64 mem;
+
++ dm_bufio_allocated_kmem_cache = 0;
++ dm_bufio_allocated_get_free_pages = 0;
++ dm_bufio_allocated_vmalloc = 0;
++ dm_bufio_current_allocated = 0;
++
+ memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
+ memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
+
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 11431ac..3f123f1 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -20,6 +20,7 @@
+ struct delay_c {
+ struct timer_list delay_timer;
+ struct mutex timer_lock;
++ struct workqueue_struct *kdelayd_wq;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+ atomic_t may_delay;
+@@ -45,14 +46,13 @@ struct dm_delay_info {
+
+ static DEFINE_MUTEX(delayed_bios_lock);
+
+-static struct workqueue_struct *kdelayd_wq;
+ static struct kmem_cache *delayed_cache;
+
+ static void handle_delayed_timer(unsigned long data)
+ {
+ struct delay_c *dc = (struct delay_c *)data;
+
+- queue_work(kdelayd_wq, &dc->flush_expired_bios);
++ queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
+ }
+
+ static void queue_timeout(struct delay_c *dc, unsigned long expires)
+@@ -190,6 +190,12 @@ out:
+ goto bad_dev_write;
+ }
+
++ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
++ if (!dc->kdelayd_wq) {
++ DMERR("Couldn't start kdelayd");
++ goto bad_queue;
++ }
++
+ setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
+
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+@@ -202,6 +208,8 @@ out:
+ ti->private = dc;
+ return 0;
+
++bad_queue:
++ mempool_destroy(dc->delayed_pool);
+ bad_dev_write:
+ if (dc->dev_write)
+ dm_put_device(ti, dc->dev_write);
+@@ -216,7 +224,7 @@ static void delay_dtr(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- flush_workqueue(kdelayd_wq);
++ destroy_workqueue(dc->kdelayd_wq);
+
+ dm_put_device(ti, dc->dev_read);
+
+@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
+ {
+ int r = -ENOMEM;
+
+- kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+- if (!kdelayd_wq) {
+- DMERR("Couldn't start kdelayd");
+- goto bad_queue;
+- }
+-
+ delayed_cache = KMEM_CACHE(dm_delay_info, 0);
+ if (!delayed_cache) {
+ DMERR("Couldn't create delayed bio cache.");
+@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
+ bad_register:
+ kmem_cache_destroy(delayed_cache);
+ bad_memcache:
+- destroy_workqueue(kdelayd_wq);
+-bad_queue:
+ return r;
+ }
+
+@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
+ {
+ dm_unregister_target(&delay_target);
+ kmem_cache_destroy(delayed_cache);
+- destroy_workqueue(kdelayd_wq);
+ }
+
+ /* Module hooks */
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 7e766f92..84ad530 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -84,6 +84,7 @@ struct multipath {
+ unsigned queue_io; /* Must we queue all I/O? */
+ unsigned queue_if_no_path; /* Queue I/O if last path fails? */
+ unsigned saved_queue_if_no_path;/* Saved state during suspension */
++ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
+ unsigned pg_init_retries; /* Number of times to retry pg_init */
+ unsigned pg_init_count; /* Number of times pg_init called */
+ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+@@ -473,7 +474,8 @@ static void process_queued_ios(struct work_struct *work)
+ (!pgpath && !m->queue_if_no_path))
+ must_queue = 0;
+
+- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
++ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
++ !m->pg_init_disabled)
+ __pg_init_all_paths(m);
+
+ out:
+@@ -887,10 +889,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
+
+ static void flush_multipath_work(struct multipath *m)
+ {
++ unsigned long flags;
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 1;
++ spin_unlock_irqrestore(&m->lock, flags);
++
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+ flush_workqueue(kmultipathd);
+ flush_work_sync(&m->trigger_event);
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 0;
++ spin_unlock_irqrestore(&m->lock, flags);
+ }
+
+ static void multipath_dtr(struct dm_target *ti)
+@@ -1111,7 +1123,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
+
+ spin_lock_irqsave(&m->lock, flags);
+
+- if (m->pg_init_count <= m->pg_init_retries)
++ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
+ m->pg_init_required = 1;
+ else
+ limit_reached = 1;
+@@ -1621,7 +1633,7 @@ out:
+ *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+ .name = "multipath",
+- .version = {1, 3, 1},
++ .version = {1, 3, 2},
+ .module = THIS_MODULE,
+ .ctr = multipath_ctr,
+ .dtr = multipath_dtr,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 5c30316..fec79e7 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -66,6 +66,18 @@ struct dm_snapshot {
+
+ atomic_t pending_exceptions_count;
+
++ /* Protected by "lock" */
++ sector_t exception_start_sequence;
++
++ /* Protected by kcopyd single-threaded callback */
++ sector_t exception_complete_sequence;
++
++ /*
++ * A list of pending exceptions that completed out of order.
++ * Protected by kcopyd single-threaded callback.
++ */
++ struct list_head out_of_order_list;
++
+ mempool_t *pending_pool;
+
+ struct dm_exception_table pending;
+@@ -171,6 +183,14 @@ struct dm_snap_pending_exception {
+ */
+ int started;
+
++ /* There was copying error. */
++ int copy_error;
++
++ /* A sequence number, it is used for in-order completion. */
++ sector_t exception_sequence;
++
++ struct list_head out_of_order_entry;
++
+ /*
+ * For writing a complete chunk, bypassing the copy.
+ */
+@@ -1090,6 +1110,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->valid = 1;
+ s->active = 0;
+ atomic_set(&s->pending_exceptions_count, 0);
++ s->exception_start_sequence = 0;
++ s->exception_complete_sequence = 0;
++ INIT_LIST_HEAD(&s->out_of_order_list);
+ init_rwsem(&s->lock);
+ INIT_LIST_HEAD(&s->list);
+ spin_lock_init(&s->pe_lock);
+@@ -1448,6 +1471,19 @@ static void commit_callback(void *context, int success)
+ pending_complete(pe, success);
+ }
+
++static void complete_exception(struct dm_snap_pending_exception *pe)
++{
++ struct dm_snapshot *s = pe->snap;
++
++ if (unlikely(pe->copy_error))
++ pending_complete(pe, 0);
++
++ else
++ /* Update the metadata if we are persistent */
++ s->store->type->commit_exception(s->store, &pe->e,
++ commit_callback, pe);
++}
++
+ /*
+ * Called when the copy I/O has finished. kcopyd actually runs
+ * this code so don't block.
+@@ -1457,13 +1493,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
+ struct dm_snap_pending_exception *pe = context;
+ struct dm_snapshot *s = pe->snap;
+
+- if (read_err || write_err)
+- pending_complete(pe, 0);
++ pe->copy_error = read_err || write_err;
+
+- else
+- /* Update the metadata if we are persistent */
+- s->store->type->commit_exception(s->store, &pe->e,
+- commit_callback, pe);
++ if (pe->exception_sequence == s->exception_complete_sequence) {
++ s->exception_complete_sequence++;
++ complete_exception(pe);
++
++ while (!list_empty(&s->out_of_order_list)) {
++ pe = list_entry(s->out_of_order_list.next,
++ struct dm_snap_pending_exception, out_of_order_entry);
++ if (pe->exception_sequence != s->exception_complete_sequence)
++ break;
++ s->exception_complete_sequence++;
++ list_del(&pe->out_of_order_entry);
++ complete_exception(pe);
++ }
++ } else {
++ struct list_head *lh;
++ struct dm_snap_pending_exception *pe2;
++
++ list_for_each_prev(lh, &s->out_of_order_list) {
++ pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
++ if (pe2->exception_sequence < pe->exception_sequence)
++ break;
++ }
++ list_add(&pe->out_of_order_entry, lh);
++ }
+ }
+
+ /*
+@@ -1558,6 +1613,8 @@ __find_pending_exception(struct dm_snapshot *s,
+ return NULL;
+ }
+
++ pe->exception_sequence = s->exception_start_sequence++;
++
+ dm_insert_exception(&s->pending, &pe->e);
+
+ return pe;
+@@ -2200,7 +2257,7 @@ static struct target_type origin_target = {
+
+ static struct target_type snapshot_target = {
+ .name = "snapshot",
+- .version = {1, 10, 1},
++ .version = {1, 10, 2},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 52848ab..5c52582 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -215,6 +215,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
+
+ num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+
++ if (!num_targets) {
++ kfree(t);
++ return -ENOMEM;
++ }
++
+ if (alloc_targets(t, num_targets)) {
+ kfree(t);
+ t = NULL;
+@@ -581,14 +586,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)
+
+ /*
+ * Used to dynamically allocate the arg array.
++ *
++ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
++ * process messages even if some device is suspended. These messages have a
++ * small fixed number of arguments.
++ *
++ * On the other hand, dm-switch needs to process bulk data using messages and
++ * excessive use of GFP_NOIO could cause trouble.
+ */
+ static char **realloc_argv(unsigned *array_size, char **old_argv)
+ {
+ char **argv;
+ unsigned new_size;
++ gfp_t gfp;
+
+- new_size = *array_size ? *array_size * 2 : 64;
+- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
++ if (*array_size) {
++ new_size = *array_size * 2;
++ gfp = GFP_KERNEL;
++ } else {
++ new_size = 8;
++ gfp = GFP_NOIO;
++ }
++ argv = kmalloc(new_size * sizeof(*argv), gfp);
+ if (argv) {
+ memcpy(argv, old_argv, *array_size * sizeof(*argv));
+ *array_size = new_size;
+diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
+index 3b7d7b4..8f3c47e 100644
+--- a/drivers/media/video/saa7164/saa7164-core.c
++++ b/drivers/media/video/saa7164/saa7164-core.c
+@@ -1386,9 +1386,11 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
+ if (fw_debug) {
+ dev->kthread = kthread_run(saa7164_thread_function, dev,
+ "saa7164 debug");
+- if (!dev->kthread)
++ if (IS_ERR(dev->kthread)) {
++ dev->kthread = NULL;
+ printk(KERN_ERR "%s() Failed to create "
+ "debug kernel thread\n", __func__);
++ }
+ }
+
+ } /* != BOARD_UNKNOWN */
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index 00e5fcac8..cbee842 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
+ {
+ char name[ENCLOSURE_NAME_SIZE];
+
++ /*
++ * In odd circumstances, like multipath devices, something else may
++ * already have removed the links, so check for this condition first.
++ */
++ if (!cdev->dev->kobj.sd)
++ return;
++
+ enclosure_link_name(cdev, name);
+ sysfs_remove_link(&cdev->dev->kobj, name);
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 74793af..4802f7f 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -634,7 +634,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ * Otherwise we don't understand what happened, so abort.
+ */
+ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+- struct mmc_blk_request *brq, int *ecc_err)
++ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+ {
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+@@ -665,6 +665,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
++ /* Flag General errors */
++ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
++ if ((status & R1_ERROR) ||
++ (brq->stop.resp[0] & R1_ERROR)) {
++ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
++ req->rq_disk->disk_name, __func__,
++ brq->stop.resp[0], status);
++ *gen_err = 1;
++ }
++
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+@@ -684,6 +694,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ return ERR_ABORT;
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
++ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
++ if (stop_status & R1_ERROR) {
++ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
++ req->rq_disk->disk_name, __func__,
++ stop_status);
++ *gen_err = 1;
++ }
+ }
+
+ /* Check for set block count errors */
+@@ -933,7 +950,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
+- int ecc_err = 0;
++ int ecc_err = 0, gen_err = 0;
+
+ /*
+ * sbc.error indicates a problem with the set block count
+@@ -947,7 +964,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ */
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
++ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+@@ -975,6 +992,15 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ u32 status;
++
++ /* Check stop command response */
++ if (brq->stop.resp[0] & R1_ERROR) {
++ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
++ req->rq_disk->disk_name, __func__,
++ brq->stop.resp[0]);
++ gen_err = 1;
++ }
++
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+@@ -982,6 +1008,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
++
++ if (status & R1_ERROR) {
++ pr_err("%s: %s: general error sending status command, card status %#x\n",
++ req->rq_disk->disk_name, __func__,
++ status);
++ gen_err = 1;
++ }
++
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+@@ -991,6 +1025,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ }
+
++ /* if general error occurs, retry the write operation. */
++ if (gen_err) {
++ pr_warning("%s: retrying write for general error\n",
++ req->rq_disk->disk_name);
++ return MMC_BLK_RETRY;
++ }
++
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index 9f9982f..3d6beb7 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -71,7 +71,7 @@
+
+ /* Define max times to check status register before we give up. */
+ #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+-#define MAX_CMD_SIZE 5
++#define MAX_CMD_SIZE 6
+
+ #ifdef CONFIG_M25PXX_USE_FAST_READ
+ #define OPCODE_READ OPCODE_FAST_READ
+@@ -874,14 +874,13 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ }
+ }
+
+- flash = kzalloc(sizeof *flash, GFP_KERNEL);
++ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+- if (!flash->command) {
+- kfree(flash);
++
++ flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
++ if (!flash->command)
+ return -ENOMEM;
+- }
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+@@ -978,14 +977,10 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ static int __devexit m25p_remove(struct spi_device *spi)
+ {
+ struct m25p *flash = dev_get_drvdata(&spi->dev);
+- int status;
+
+ /* Clean up MTD stuff. */
+- status = mtd_device_unregister(&flash->mtd);
+- if (status == 0) {
+- kfree(flash->command);
+- kfree(flash);
+- }
++ mtd_device_unregister(&flash->mtd);
++
+ return 0;
+ }
+
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+index f39f83e..d6a7764 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+@@ -227,8 +227,6 @@ static void dma_irq_callback(void *param)
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+- complete(dma_c);
+-
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+@@ -253,6 +251,8 @@ static void dma_irq_callback(void *param)
+ default:
+ pr_err("in wrong DMA operation.\n");
+ }
++
++ complete(dma_c);
+ }
+
+ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index daed698..46ed296 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2895,10 +2895,22 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
++
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
++
++ /*
++ * pages_per_block and blocks_per_lun may not be a power-of-2 size
++ * (don't ask me who thought of this...). MTD assumes that these
++ * dimensions will be power-of-2, so just truncate the remaining area.
++ */
++ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
++ mtd->erasesize *= mtd->writesize;
++
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+- chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
++
++ /* See erasesize comment */
++ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
++ chip->chipsize *= (uint64_t)mtd->erasesize;
+ *busw = 0;
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index 8ed48c2..cf95bd8 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -534,8 +534,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ goto out;
+ }
+ if (bond->params.mode == BOND_MODE_ALB ||
+- bond->params.mode == BOND_MODE_TLB) {
+- pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
++ bond->params.mode == BOND_MODE_TLB ||
++ bond->params.mode == BOND_MODE_8023AD) {
++ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
+ bond->dev->name, bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+@@ -693,6 +694,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -726,6 +729,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
+@@ -748,6 +752,8 @@ static ssize_t bonding_store_updelay(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -781,6 +787,7 @@ static ssize_t bonding_store_updelay(struct device *d,
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 64647d4..91d1b5a 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -764,9 +764,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ msg_ctrl_save = priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl);
+
+- if (msg_ctrl_save & IF_MCONT_EOB)
+- return num_rx_pkts;
+-
+ if (msg_ctrl_save & IF_MCONT_MSGLST) {
+ c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+ num_rx_pkts++;
+@@ -774,6 +771,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ continue;
+ }
+
++ if (msg_ctrl_save & IF_MCONT_EOB)
++ return num_rx_pkts;
++
+ if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ continue;
+
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 6a1acfe..568b821 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -488,19 +488,19 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ uint8_t isrc, status;
+ int n = 0;
+
+- /* Shared interrupts and IRQ off? */
+- if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
+- return IRQ_NONE;
+-
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
++ /* Shared interrupts and IRQ off? */
++ if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
++ goto out;
++
+ while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+- n++;
++
+ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller due to hw unplug */
+ if (status == 0xFF && sja1000_is_absent(priv))
+- return IRQ_NONE;
++ goto out;
+
+ if (isrc & IRQ_WUI)
+ dev_warn(dev->dev.parent, "wakeup interrupt\n");
+@@ -519,7 +519,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller */
+ if (status == 0xFF && sja1000_is_absent(priv))
+- return IRQ_NONE;
++ goto out;
+ }
+ }
+ if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
+@@ -527,8 +527,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ if (sja1000_err(dev, isrc, status))
+ break;
+ }
++ n++;
+ }
+-
++out:
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index 5f53fbb..ff1af41 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -46,7 +46,8 @@
+ defined(CONFIG_MACH_LITTLETON) ||\
+ defined(CONFIG_MACH_ZYLONITE2) ||\
+ defined(CONFIG_ARCH_VIPER) ||\
+- defined(CONFIG_MACH_STARGATE2)
++ defined(CONFIG_MACH_STARGATE2) ||\
++ defined(CONFIG_ARCH_VERSATILE)
+
+ #include <asm/mach-types.h>
+
+@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ #define SMC_outl(v, a, r) writel(v, (a) + (r))
+ #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+ #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
++#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
++#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+ #define SMC_IRQ_FLAGS (-1) /* from resource */
+
+ /* We actually can't write halfwords properly if not word aligned */
+@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ #define RPC_LSA_DEFAULT RPC_LED_TX_RX
+ #define RPC_LSB_DEFAULT RPC_LED_100_10
+
+-#elif defined(CONFIG_ARCH_VERSATILE)
+-
+-#define SMC_CAN_USE_8BIT 1
+-#define SMC_CAN_USE_16BIT 1
+-#define SMC_CAN_USE_32BIT 1
+-#define SMC_NOWAIT 1
+-
+-#define SMC_inb(a, r) readb((a) + (r))
+-#define SMC_inw(a, r) readw((a) + (r))
+-#define SMC_inl(a, r) readl((a) + (r))
+-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+-#define SMC_outw(v, a, r) writew(v, (a) + (r))
+-#define SMC_outl(v, a, r) writel(v, (a) + (r))
+-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+-#define SMC_IRQ_FLAGS (-1) /* from resource */
+-
+ #elif defined(CONFIG_MN10300)
+
+ /*
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 1161584..2f319d1 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -985,8 +985,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (error < 0)
+ goto end;
+
+- m->msg_namelen = 0;
+-
+ if (skb) {
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
+index d8d8f0d..35d86fa 100644
+--- a/drivers/net/wireless/libertas/debugfs.c
++++ b/drivers/net/wireless/libertas/debugfs.c
+@@ -919,7 +919,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
+ char *p2;
+ struct debug_data *d = f->private_data;
+
+- pdata = kmalloc(cnt, GFP_KERNEL);
++ if (cnt == 0)
++ return 0;
++
++ pdata = kmalloc(cnt + 1, GFP_KERNEL);
+ if (pdata == NULL)
+ return 0;
+
+@@ -928,6 +931,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
+ kfree(pdata);
+ return 0;
+ }
++ pdata[cnt] = '\0';
+
+ p0 = pdata;
+ for (i = 0; i < num_of_items; i++) {
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 17f8720..72b253d2 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -936,7 +936,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, u32 upld_typ)
+ {
+ u8 *cmd_buf;
++ __le16 *curr_ptr = (__le16 *)skb->data;
++ u16 pkt_len = le16_to_cpu(*curr_ptr);
+
++ skb_trim(skb, pkt_len);
+ skb_pull(skb, INTF_HEADER_LEN);
+
+ switch (upld_typ) {
+diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
+index 5d0f615..e2fa538 100644
+--- a/drivers/net/wireless/prism54/islpci_dev.c
++++ b/drivers/net/wireless/prism54/islpci_dev.c
+@@ -812,6 +812,10 @@ static const struct net_device_ops islpci_netdev_ops = {
+ .ndo_validate_addr = eth_validate_addr,
+ };
+
++static struct device_type wlan_type = {
++ .name = "wlan",
++};
++
+ struct net_device *
+ islpci_setup(struct pci_dev *pdev)
+ {
+@@ -822,9 +826,8 @@ islpci_setup(struct pci_dev *pdev)
+ return ndev;
+
+ pci_set_drvdata(pdev, ndev);
+-#if defined(SET_NETDEV_DEV)
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+-#endif
++ SET_NETDEV_DEVTYPE(ndev, &wlan_type);
+
+ /* setup the structure members */
+ ndev->base_addr = pci_resource_start(pdev, 0);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
+index 0ea85f4..131b22b 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1253,7 +1253,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
+ */
+ rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
+ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
+- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
++ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 921da9a..5c38281 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -771,6 +771,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+
++ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
++ return;
++
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+ }
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index b4ce934..a917a22 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -31,6 +31,7 @@
+
+ #include <linux/ip.h>
+ #include <linux/module.h>
++#include <linux/udp.h>
+ #include "wifi.h"
+ #include "rc.h"
+ #include "base.h"
+@@ -956,60 +957,51 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ if (!ieee80211_is_data(fc))
+ return false;
+
++ ip = (const struct iphdr *)(skb->data + mac_hdr_len +
++ SNAP_SIZE + PROTOC_TYPE_SIZE);
++ ether_type = be16_to_cpup((__be16 *)
++ (skb->data + mac_hdr_len + SNAP_SIZE));
+
+- ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+- SNAP_SIZE + PROTOC_TYPE_SIZE);
+- ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+- /* ether_type = ntohs(ether_type); */
+-
+- if (ETH_P_IP == ether_type) {
+- if (IPPROTO_UDP == ip->protocol) {
+- struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+- (ip->ihl << 2));
+- if (((((u8 *) udp)[1] == 68) &&
+- (((u8 *) udp)[3] == 67)) ||
+- ((((u8 *) udp)[1] == 67) &&
+- (((u8 *) udp)[3] == 68))) {
+- /*
+- * 68 : UDP BOOTP client
+- * 67 : UDP BOOTP server
+- */
+- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
+- DBG_DMESG, ("dhcp %s !!\n",
+- (is_tx) ? "Tx" : "Rx"));
+-
+- if (is_tx) {
+- rtl_lps_leave(hw);
+- ppsc->last_delaylps_stamp_jiffies =
+- jiffies;
+- }
+-
+- return true;
+- }
+- }
+- } else if (ETH_P_ARP == ether_type) {
+- if (is_tx) {
+- rtl_lps_leave(hw);
+- ppsc->last_delaylps_stamp_jiffies = jiffies;
+- }
++ switch (ether_type) {
++ case ETH_P_IP: {
++ struct udphdr *udp;
++ u16 src;
++ u16 dst;
+
+- return true;
+- } else if (ETH_P_PAE == ether_type) {
+- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+- ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
++ if (ip->protocol != IPPROTO_UDP)
++ return false;
++ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
++ src = be16_to_cpu(udp->source);
++ dst = be16_to_cpu(udp->dest);
+
+- if (is_tx) {
+- rtl_lps_leave(hw);
+- ppsc->last_delaylps_stamp_jiffies = jiffies;
+- }
++ /* If this case involves port 68 (UDP BOOTP client) connecting
++ * with port 67 (UDP BOOTP server), then return true so that
++ * the lowest speed is used.
++ */
++ if (!((src == 68 && dst == 67) || (src == 67 && dst == 68)))
++ return false;
+
+- return true;
+- } else if (ETH_P_IPV6 == ether_type) {
+- /* IPv6 */
+- return true;
++ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
++ ("dhcp %s !!\n", is_tx ? "Tx" : "Rx"));
++ break;
+ }
+-
+- return false;
++ case ETH_P_ARP:
++ break;
++ case ETH_P_PAE:
++ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
++ ("802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"));
++ break;
++ case ETH_P_IPV6:
++ /* TODO: Is this right? */
++ return false;
++ default:
++ return false;
++ }
++ if (is_tx) {
++ rtl_lps_leave(hw);
++ ppsc->last_delaylps_stamp_jiffies = jiffies;
++ }
++ return true;
+ }
+
+ /*********************************************************
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+index 060a06f..5515215 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+@@ -782,7 +782,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+
+ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+- struct rx_desc_92c *pdesc,
++ struct rx_desc_92c *p_desc,
+ struct rx_fwinfo_92c *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+@@ -797,11 +797,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ u32 rssi, total_rssi = 0;
+ bool in_powersavemode = false;
+ bool is_cck_rate;
++ u8 *pdesc = (u8 *)p_desc;
+
+- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
++ is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+- pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->RX_SIGQ[0] = -1;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+index a7e1a2c..a6ea2d9 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+@@ -303,10 +303,10 @@ out:
+ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+- u8 *p_desc, struct sk_buff *skb)
++ u8 *pdesc, struct sk_buff *skb)
+ {
+ struct rx_fwinfo_92c *p_drvinfo;
+- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
++ struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc;
+ u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+@@ -345,11 +345,11 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+ stats->rx_bufshift);
+- rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
++ rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+index 3637c0c..639b57b 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+@@ -529,7 +529,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+index 0ad50fe..13081d9 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+@@ -274,7 +274,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
+ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1]) {
+ pwrdiff_limit[i] =
+- rtlefuse->pwrgroup_ht20
++ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1];
+ }
+ } else {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+index fbebe3e..542a871 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+@@ -582,7 +582,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ }
+
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+
+ return true;
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index 82baaa2..5764ef7 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -73,11 +73,7 @@
+ #define RTL_SLOT_TIME_9 9
+ #define RTL_SLOT_TIME_20 20
+
+-/*related with tcp/ip. */
+-/*if_ehther.h*/
+-#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
+-#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+-#define ETH_P_ARP 0x0806 /*Address Resolution packet */
++/*related to tcp/ip. */
+ #define SNAP_SIZE 6
+ #define PROTOC_TYPE_SIZE 2
+
+diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
+index e0610bd..7e41b70 100644
+--- a/drivers/pci/pcie/portdrv_pci.c
++++ b/drivers/pci/pcie/portdrv_pci.c
+@@ -151,7 +151,6 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
+ static void pcie_portdrv_remove(struct pci_dev *dev)
+ {
+ pcie_port_device_remove(dev);
+- pci_disable_device(dev);
+ }
+
+ static int error_detected_iter(struct device *device, void *data)
+diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
+index e39b77a..15406d5 100644
+--- a/drivers/rtc/rtc-at91rm9200.c
++++ b/drivers/rtc/rtc-at91rm9200.c
+@@ -156,6 +156,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+
+ at91_alarm_year = tm.tm_year;
+
++ tm.tm_mon = alrm->time.tm_mon;
++ tm.tm_mday = alrm->time.tm_mday;
+ tm.tm_hour = alrm->time.tm_hour;
+ tm.tm_min = alrm->time.tm_min;
+ tm.tm_sec = alrm->time.tm_sec;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index fff57de..55f6488 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4322,7 +4322,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_snmp_ureq *ureq;
+- int req_len;
++ unsigned int req_len;
+ struct qeth_arp_query_info qinfo = {0, };
+ int rc = 0;
+
+@@ -4338,6 +4338,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ /* skip 4 bytes (data_len struct member) to get req_len */
+ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ return -EFAULT;
++ if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
++ sizeof(struct qeth_ipacmd_hdr) -
++ sizeof(struct qeth_ipacmd_setadpparms_hdr)))
++ return -EINVAL;
+ ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+ if (IS_ERR(ureq)) {
+ QETH_CARD_TEXT(card, 2, "snmpnome");
+diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
+index 8a0b330..1254431 100644
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -508,7 +508,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+ goto cleanup;
+ }
+
+- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
++ if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
++ (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 0f48550..5b7e1bf 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1186,7 +1186,7 @@ static void complete_scsi_command(struct CommandList *cp)
+ "has check condition: aborted command: "
+ "ASC: 0x%x, ASCQ: 0x%x\n",
+ cp, asc, ascq);
+- cmd->result = DID_SOFT_ERROR << 16;
++ cmd->result |= DID_SOFT_ERROR << 16;
+ break;
+ }
+ /* Must be some other type of check condition */
+@@ -4465,7 +4465,7 @@ reinit_after_soft_reset:
+ hpsa_hba_inquiry(h);
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+ start_controller_lockup_detector(h);
+- return 1;
++ return 0;
+
+ clean4:
+ hpsa_free_sg_chain_blocks(h);
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 4868fc9..5e170e3 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -197,7 +197,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ qc->tf.nsect = 0;
+ }
+
+- ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
++ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
+ task->uldd_task = qc;
+ if (ata_is_atapi(qc->tf.protocol)) {
+ memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
+index 93de4f2..b27d9aa 100644
+--- a/drivers/staging/tidspbridge/Kconfig
++++ b/drivers/staging/tidspbridge/Kconfig
+@@ -4,7 +4,7 @@
+
+ menuconfig TIDSPBRIDGE
+ tristate "DSP Bridge driver"
+- depends on ARCH_OMAP3
++ depends on ARCH_OMAP3 && BROKEN
+ select OMAP_MBOX_FWK
+ help
+ DSP/BIOS Bridge is designed for platforms that contain a GPP and
+diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
+index 1fae1e9..fc552d8 100644
+--- a/drivers/staging/zram/zram_sysfs.c
++++ b/drivers/staging/zram/zram_sysfs.c
+@@ -95,20 +95,27 @@ static ssize_t reset_store(struct device *dev,
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
++ if (!bdev)
++ return -ENOMEM;
++
+ /* Do not reset an active device! */
+- if (bdev->bd_holders)
+- return -EBUSY;
++ if (bdev->bd_holders) {
++ ret = -EBUSY;
++ goto out;
++ }
+
+ ret = strict_strtoul(buf, 10, &do_reset);
+ if (ret)
+- return ret;
++ goto out;
+
+- if (!do_reset)
+- return -EINVAL;
++ if (!do_reset) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ /* Make sure all pending I/O is finished */
+- if (bdev)
+- fsync_bdev(bdev);
++ fsync_bdev(bdev);
++ bdput(bdev);
+
+ down_write(&zram->init_lock);
+ if (zram->init_done)
+@@ -116,6 +123,10 @@ static ssize_t reset_store(struct device *dev,
+ up_write(&zram->init_lock);
+
+ return len;
++
++out:
++ bdput(bdev);
++ return ret;
+ }
+
+ static ssize_t num_reads_show(struct device *dev,
+diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
+index 1cd6ce3..59e7378 100644
+--- a/drivers/target/iscsi/iscsi_target_auth.c
++++ b/drivers/target/iscsi/iscsi_target_auth.c
+@@ -172,6 +172,7 @@ static int chap_server_compute_md5(
+ unsigned char client_digest[MD5_SIGNATURE_SIZE];
+ unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
++ size_t compare_len;
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+@@ -210,7 +211,9 @@ static int chap_server_compute_md5(
+ goto out;
+ }
+
+- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
++ /* Include the terminating NULL in the compare */
++ compare_len = strlen(auth->userid) + 1;
++ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
+ pr_err("CHAP_N values do not match!\n");
+ goto out;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 7d85f88..3486d12 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -89,7 +89,7 @@ int extract_param(
+ if (len < 0)
+ return -1;
+
+- if (len > max_length) {
++ if (len >= max_length) {
+ pr_err("Length of input: %d exeeds max_length:"
+ " %d\n", len, max_length);
+ return -1;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0cdff38..636ee9e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1448,6 +1448,8 @@ static int acm_reset_resume(struct usb_interface *intf)
+
+ static const struct usb_device_id acm_ids[] = {
+ /* quirky and broken devices */
++ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
++ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
+ { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a5ea85f..7013165 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -900,6 +900,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+ }
++ if (portchange & USB_PORT_STAT_C_RESET) {
++ need_debounce_delay = true;
++ clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_RESET);
++ }
+
+ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+ hub_is_superspeed(hub->hdev)) {
+@@ -3749,8 +3754,9 @@ static void hub_events(void)
+ hub->hdev->children[i - 1];
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- if (!udev || !(portstatus &
+- USB_PORT_STAT_CONNECTION)) {
++ if (!udev ||
++ !(portstatus & USB_PORT_STAT_CONNECTION) ||
++ udev->state == USB_STATE_NOTATTACHED) {
+ status = hub_port_reset(hub, i,
+ NULL, HUB_BH_RESET_TIME,
+ true);
+@@ -4018,6 +4024,12 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ }
+ parent_hub = hdev_to_hub(parent_hdev);
+
++ /* Disable USB2 hardware LPM.
++ * It will be re-enabled by the enumeration process.
++ */
++ if (udev->usb2_hw_lpm_enabled == 1)
++ usb_set_usb2_hardware_lpm(udev, 0);
++
+ set_bit(port1, parent_hub->busy_bits);
+ for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index c0dcf69..c4134e8 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -394,6 +394,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
+ return -EINVAL;
++ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
++ break;
+ ret = __dwc3_gadget_ep_set_halt(dep, set);
+ if (ret)
+ return -EINVAL;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 619ee19..5f2e3d0 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -903,9 +903,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ else
+ dep->flags |= DWC3_EP_STALL;
+ } else {
+- if (dep->flags & DWC3_EP_WEDGE)
+- return 0;
+-
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_CLEARSTALL, &params);
+ if (ret)
+@@ -913,7 +910,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ value ? "set" : "clear",
+ dep->name);
+ else
+- dep->flags &= ~DWC3_EP_STALL;
++ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ }
+
+ return ret;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index f71b078..4484ef1 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -585,6 +585,7 @@ static void reset_config(struct usb_composite_dev *cdev)
+ bitmap_zero(f->endpoints, 32);
+ }
+ cdev->config = NULL;
++ cdev->delayed_status = 0;
+ }
+
+ static int set_config(struct usb_composite_dev *cdev,
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index a3f6fe0..85504bb 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2192,6 +2192,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ termios->c_cflag |= CRTSCTS;
+ }
+
++ /*
++ * All FTDI UART chips are limited to CS7/8. We won't pretend to
++ * support CS5/6 and revert the CSIZE setting instead.
++ */
++ if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
++ dev_warn(&port->dev, "requested CSIZE setting not supported\n");
++
++ termios->c_cflag &= ~CSIZE;
++ if (old_termios)
++ termios->c_cflag |= old_termios->c_cflag & CSIZE;
++ else
++ termios->c_cflag |= CS8;
++ }
++
+ cflag = termios->c_cflag;
+
+ if (!old_termios)
+@@ -2228,13 +2242,16 @@ no_skip:
+ } else {
+ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+ }
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS7: urb_value |= 7; dbg("Setting CS7"); break;
+- case CS8: urb_value |= 8; dbg("Setting CS8"); break;
+- default:
+- dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
+- }
++ switch (cflag & CSIZE) {
++ case CS7:
++ urb_value |= 7;
++ dev_dbg(&port->dev, "Setting CS7\n");
++ break;
++ default:
++ case CS8:
++ urb_value |= 8;
++ dev_dbg(&port->dev, "Setting CS8\n");
++ break;
+ }
+
+ /* This is needed by the break command since it uses the same command
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index 9f0b2bf..c0e6486 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -228,14 +228,7 @@ retry:
+ return result;
+ }
+
+- /* Try sending off another urb, unless in irq context (in which case
+- * there will be no free urb). */
+- if (!in_irq())
+- goto retry;
+-
+- clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+-
+- return 0;
++ goto retry; /* try sending off another urb */
+ }
+
+ /**
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5d2501e..80fc40a 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1689,7 +1689,11 @@ static int mos7840_tiocmget(struct tty_struct *tty)
+ return -ENODEV;
+
+ status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
++ if (status != 1)
++ return -EIO;
+ status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
++ if (status != 1)
++ return -EIO;
+ result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+ | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+ | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
+@@ -1983,25 +1987,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
+ iflag = tty->termios->c_iflag;
+
+ /* Change the number of bits */
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- lData = LCR_BITS_5;
+- break;
++ switch (cflag & CSIZE) {
++ case CS5:
++ lData = LCR_BITS_5;
++ break;
+
+- case CS6:
+- lData = LCR_BITS_6;
+- break;
++ case CS6:
++ lData = LCR_BITS_6;
++ break;
+
+- case CS7:
+- lData = LCR_BITS_7;
+- break;
+- default:
+- case CS8:
+- lData = LCR_BITS_8;
+- break;
+- }
++ case CS7:
++ lData = LCR_BITS_7;
++ break;
++
++ default:
++ case CS8:
++ lData = LCR_BITS_8;
++ break;
+ }
++
+ /* Change the Parity bit */
+ if (cflag & PARENB) {
+ if (cflag & PARODD) {
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d8ace82..d6d0fb4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
++#define HUAWEI_PRODUCT_E173S6 0x1C07
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -586,6 +587,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+@@ -648,6 +651,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+@@ -702,6 +709,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+@@ -756,6 +767,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+@@ -810,6 +825,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+@@ -864,6 +883,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+@@ -918,6 +941,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+@@ -1391,6 +1418,23 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 317e503..e3936c1 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -290,24 +290,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ dbg("0xa1:0x21:0:0 %d - %x %x %x %x %x %x %x", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- buf[6] = 5;
+- break;
+- case CS6:
+- buf[6] = 6;
+- break;
+- case CS7:
+- buf[6] = 7;
+- break;
+- default:
+- case CS8:
+- buf[6] = 8;
+- break;
+- }
+- dbg("%s - data bits = %d", __func__, buf[6]);
++ switch (C_CSIZE(tty)) {
++ case CS5:
++ buf[6] = 5;
++ break;
++ case CS6:
++ buf[6] = 6;
++ break;
++ case CS7:
++ buf[6] = 7;
++ break;
++ default:
++ case CS8:
++ buf[6] = 8;
+ }
++ dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
+
+ /* For reference buf[0]:buf[3] baud rate value */
+ /* NOTE: Only the values defined in baud_sup are supported !
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index f3179b0..2f67b99 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -394,22 +394,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ }
+
+ /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- buf[1] |= SET_UART_FORMAT_SIZE_5;
+- break;
+- case CS6:
+- buf[1] |= SET_UART_FORMAT_SIZE_6;
+- break;
+- case CS7:
+- buf[1] |= SET_UART_FORMAT_SIZE_7;
+- break;
+- default:
+- case CS8:
+- buf[1] |= SET_UART_FORMAT_SIZE_8;
+- break;
+- }
++ switch (cflag & CSIZE) {
++ case CS5:
++ buf[1] |= SET_UART_FORMAT_SIZE_5;
++ break;
++ case CS6:
++ buf[1] |= SET_UART_FORMAT_SIZE_6;
++ break;
++ case CS7:
++ buf[1] |= SET_UART_FORMAT_SIZE_7;
++ break;
++ default:
++ case CS8:
++ buf[1] |= SET_UART_FORMAT_SIZE_8;
++ break;
+ }
+
+ /* Set Stop bit2 : 0:1bit 1:2bit */
+diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
+index f0d546c..ca1031b 100644
+--- a/drivers/usb/wusbcore/wa-rpipe.c
++++ b/drivers/usb/wusbcore/wa-rpipe.c
+@@ -332,7 +332,10 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
+ /* FIXME: compute so seg_size > ep->maxpktsize */
+ rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
+ /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
+- rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
++ if (usb_endpoint_xfer_isoc(&ep->desc))
++ rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
++ else
++ rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
+ rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
+ rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
+ /* FIXME: use maximum speed as supported or recommended by device */
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 57c01ab..5f6df6e 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -90,7 +90,8 @@
+ #include "wusbhc.h"
+
+ enum {
+- WA_SEGS_MAX = 255,
++ /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
++ WA_SEGS_MAX = 128,
+ };
+
+ enum wa_seg_status {
+@@ -444,7 +445,7 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+ xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
+ / xfer->seg_size;
+- if (xfer->segs >= WA_SEGS_MAX) {
++ if (xfer->segs > WA_SEGS_MAX) {
+ dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
+ (int)(urb->transfer_buffer_length / xfer->seg_size),
+ WA_SEGS_MAX);
+diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
+index 0443a4f..dab3a0c 100644
+--- a/drivers/video/backlight/atmel-pwm-bl.c
++++ b/drivers/video/backlight/atmel-pwm-bl.c
+@@ -70,7 +70,7 @@ static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
+ static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
+ {
+ struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
+- u8 intensity;
++ u32 intensity;
+
+ if (pwmbl->pdata->pwm_active_low) {
+ intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
+@@ -80,7 +80,7 @@ static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
+ pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
+ }
+
+- return intensity;
++ return intensity & 0xffff;
+ }
+
+ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
+@@ -211,7 +211,8 @@ static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
+ struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
+
+ if (pwmbl->gpio_on != -1) {
+- gpio_set_value(pwmbl->gpio_on, 0);
++ gpio_set_value(pwmbl->gpio_on,
++ 0 ^ pwmbl->pdata->on_active_low);
+ gpio_free(pwmbl->gpio_on);
+ }
+ pwm_channel_disable(&pwmbl->pwmc);
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index c858a29..969f74f 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -3437,11 +3437,13 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+ return 0;
+ }
+ cifs_acl->version = cpu_to_le16(1);
+- if (acl_type == ACL_TYPE_ACCESS)
++ if (acl_type == ACL_TYPE_ACCESS) {
+ cifs_acl->access_entry_count = cpu_to_le16(count);
+- else if (acl_type == ACL_TYPE_DEFAULT)
++ cifs_acl->default_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else if (acl_type == ACL_TYPE_DEFAULT) {
+ cifs_acl->default_entry_count = cpu_to_le16(count);
+- else {
++ cifs_acl->access_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else {
+ cFYI(1, "unknown ACL type %d", acl_type);
+ return 0;
+ }
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 9a37a9b..5ef72c8 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry,
+ struct configfs_dirent *sd = dentry->d_fsdata;
+
+ if (sd) {
+- BUG_ON(sd->s_dentry != dentry);
+ /* Coordinate with configfs_readdir */
+ spin_lock(&configfs_dirent_lock);
+- sd->s_dentry = NULL;
++ /* Coordinate with configfs_attach_attr where will increase
++ * sd->s_count and update sd->s_dentry to new allocated one.
++ * Only set sd->dentry to null when this dentry is the only
++ * sd owner.
++ * If not do so, configfs_d_iput may run just after
++ * configfs_attach_attr and set sd->s_dentry to null
++ * even it's still in use.
++ */
++ if (atomic_read(&sd->s_count) <= 2)
++ sd->s_dentry = NULL;
++
+ spin_unlock(&configfs_dirent_lock);
+ configfs_put(sd);
+ }
+@@ -436,8 +445,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
+ struct configfs_attribute * attr = sd->s_element;
+ int error;
+
++ spin_lock(&configfs_dirent_lock);
+ dentry->d_fsdata = configfs_get(sd);
+ sd->s_dentry = dentry;
++ spin_unlock(&configfs_dirent_lock);
++
+ error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
+ configfs_init_file);
+ if (error) {
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index d5d5297..2a95047 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -413,6 +413,7 @@ static void devpts_kill_sb(struct super_block *sb)
+ {
+ struct pts_fs_info *fsi = DEVPTS_SB(sb);
+
++ ida_destroy(&fsi->allocated_ptys);
+ kfree(fsi);
+ kill_litter_super(sb);
+ }
+diff --git a/fs/exec.c b/fs/exec.c
+index a2d0e51..78199eb 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -2032,6 +2032,12 @@ static int __get_dumpable(unsigned long mm_flags)
+ return (ret >= 2) ? 2 : ret;
+ }
+
++/*
++ * This returns the actual value of the suid_dumpable flag. For things
++ * that are using this for checking for privilege transitions, it must
++ * test against SUID_DUMP_USER rather than treating it as a boolean
++ * value.
++ */
+ int get_dumpable(struct mm_struct *mm)
+ {
+ return __get_dumpable(mm->flags);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index b4e9f3f..05617bd 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1271,6 +1271,7 @@ retry:
+ new_extra_isize = s_min_extra_isize;
+ kfree(is); is = NULL;
+ kfree(bs); bs = NULL;
++ brelse(bh);
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5639efd..3d02931 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3764,8 +3764,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ nfs4_schedule_session_recovery(clp->cl_session);
+- task->tk_status = 0;
+- return -EAGAIN;
++ goto wait_on_recovery;
+ #endif /* CONFIG_NFS_V4_1 */
+ case -NFS4ERR_DELAY:
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
+@@ -3887,11 +3886,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+ return;
+
+ switch (task->tk_status) {
+- case -NFS4ERR_STALE_STATEID:
+- case -NFS4ERR_EXPIRED:
+ case 0:
+ renew_lease(data->res.server, data->timestamp);
+ break;
++ case -NFS4ERR_ADMIN_REVOKED:
++ case -NFS4ERR_DELEG_REVOKED:
++ case -NFS4ERR_BAD_STATEID:
++ case -NFS4ERR_OLD_STATEID:
++ case -NFS4ERR_STALE_STATEID:
++ case -NFS4ERR_EXPIRED:
++ task->tk_status = 0;
++ break;
+ default:
+ if (nfs4_async_handle_error(task, data->res.server, NULL) ==
+ -EAGAIN) {
+@@ -4052,6 +4057,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
+ status = 0;
+ }
+ request->fl_ops->fl_release_private(request);
++ request->fl_ops = NULL;
+ out:
+ return status;
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 99625b8..ade5316 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -177,8 +177,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
+ */
+ memcpy(p, argp->p, avail);
+ /* step to next page */
+- argp->pagelist++;
+ argp->p = page_address(argp->pagelist[0]);
++ argp->pagelist++;
+ if (argp->pagelen < PAGE_SIZE) {
+ argp->end = argp->p + (argp->pagelen>>2);
+ argp->pagelen = 0;
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 61b697e..6a66fc0 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -297,41 +297,12 @@ commit_metadata(struct svc_fh *fhp)
+ }
+
+ /*
+- * Set various file attributes.
+- * N.B. After this call fhp needs an fh_put
++ * Go over the attributes and take care of the small differences between
++ * NFS semantics and what Linux expects.
+ */
+-__be32
+-nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+- int check_guard, time_t guardtime)
++static void
++nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
+ {
+- struct dentry *dentry;
+- struct inode *inode;
+- int accmode = NFSD_MAY_SATTR;
+- int ftype = 0;
+- __be32 err;
+- int host_err;
+- int size_change = 0;
+-
+- if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+- accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
+- if (iap->ia_valid & ATTR_SIZE)
+- ftype = S_IFREG;
+-
+- /* Get inode */
+- err = fh_verify(rqstp, fhp, ftype, accmode);
+- if (err)
+- goto out;
+-
+- dentry = fhp->fh_dentry;
+- inode = dentry->d_inode;
+-
+- /* Ignore any mode updates on symlinks */
+- if (S_ISLNK(inode->i_mode))
+- iap->ia_valid &= ~ATTR_MODE;
+-
+- if (!iap->ia_valid)
+- goto out;
+-
+ /*
+ * NFSv2 does not differentiate between "set-[ac]time-to-now"
+ * which only requires access, and "set-[ac]time-to-X" which
+@@ -341,8 +312,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ * convert to "set to now" instead of "set to explicit time"
+ *
+ * We only call inode_change_ok as the last test as technically
+- * it is not an interface that we should be using. It is only
+- * valid if the filesystem does not define it's own i_op->setattr.
++ * it is not an interface that we should be using.
+ */
+ #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
+ #define MAX_TOUCH_TIME_ERROR (30*60)
+@@ -368,30 +338,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ iap->ia_valid &= ~BOTH_TIME_SET;
+ }
+ }
+-
+- /*
+- * The size case is special.
+- * It changes the file as well as the attributes.
+- */
+- if (iap->ia_valid & ATTR_SIZE) {
+- if (iap->ia_size < inode->i_size) {
+- err = nfsd_permission(rqstp, fhp->fh_export, dentry,
+- NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
+- if (err)
+- goto out;
+- }
+-
+- host_err = get_write_access(inode);
+- if (host_err)
+- goto out_nfserr;
+-
+- size_change = 1;
+- host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
+- if (host_err) {
+- put_write_access(inode);
+- goto out_nfserr;
+- }
+- }
+
+ /* sanitize the mode change */
+ if (iap->ia_valid & ATTR_MODE) {
+@@ -414,32 +360,111 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
+ }
+ }
++}
+
+- /* Change the attributes. */
++static __be32
++nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
++ struct iattr *iap)
++{
++ struct inode *inode = fhp->fh_dentry->d_inode;
++ int host_err;
+
+- iap->ia_valid |= ATTR_CTIME;
++ if (iap->ia_size < inode->i_size) {
++ __be32 err;
+
+- err = nfserr_notsync;
+- if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+- host_err = nfsd_break_lease(inode);
+- if (host_err)
+- goto out_nfserr;
+- fh_lock(fhp);
++ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
++ NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
++ if (err)
++ return err;
++ }
+
+- host_err = notify_change(dentry, iap);
+- err = nfserrno(host_err);
+- fh_unlock(fhp);
++ host_err = get_write_access(inode);
++ if (host_err)
++ goto out_nfserrno;
++
++ host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
++ if (host_err)
++ goto out_put_write_access;
++ return 0;
++
++out_put_write_access:
++ put_write_access(inode);
++out_nfserrno:
++ return nfserrno(host_err);
++}
++
++/*
++ * Set various file attributes. After this call fhp needs an fh_put.
++ */
++__be32
++nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
++ int check_guard, time_t guardtime)
++{
++ struct dentry *dentry;
++ struct inode *inode;
++ int accmode = NFSD_MAY_SATTR;
++ int ftype = 0;
++ __be32 err;
++ int host_err;
++ int size_change = 0;
++
++ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
++ accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
++ if (iap->ia_valid & ATTR_SIZE)
++ ftype = S_IFREG;
++
++ /* Get inode */
++ err = fh_verify(rqstp, fhp, ftype, accmode);
++ if (err)
++ goto out;
++
++ dentry = fhp->fh_dentry;
++ inode = dentry->d_inode;
++
++ /* Ignore any mode updates on symlinks */
++ if (S_ISLNK(inode->i_mode))
++ iap->ia_valid &= ~ATTR_MODE;
++
++ if (!iap->ia_valid)
++ goto out;
++
++ nfsd_sanitize_attrs(inode, iap);
++
++ /*
++ * The size case is special, it changes the file in addition to the
++ * attributes.
++ */
++ if (iap->ia_valid & ATTR_SIZE) {
++ err = nfsd_get_write_access(rqstp, fhp, iap);
++ if (err)
++ goto out;
++ size_change = 1;
+ }
++
++ iap->ia_valid |= ATTR_CTIME;
++
++ if (check_guard && guardtime != inode->i_ctime.tv_sec) {
++ err = nfserr_notsync;
++ goto out_put_write_access;
++ }
++
++ host_err = nfsd_break_lease(inode);
++ if (host_err)
++ goto out_put_write_access_nfserror;
++
++ fh_lock(fhp);
++ host_err = notify_change(dentry, iap);
++ fh_unlock(fhp);
++
++out_put_write_access_nfserror:
++ err = nfserrno(host_err);
++out_put_write_access:
+ if (size_change)
+ put_write_access(inode);
+ if (!err)
+ commit_metadata(fhp);
+ out:
+ return err;
+-
+-out_nfserr:
+- err = nfserrno(host_err);
+- goto out;
+ }
+
+ #if defined(CONFIG_NFSD_V2_ACL) || \
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index d99a905..eb519de 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -404,7 +404,8 @@ xfs_attrlist_by_handle(
+ return -XFS_ERROR(EPERM);
+ if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
+ return -XFS_ERROR(EFAULT);
+- if (al_hreq.buflen > XATTR_LIST_MAX)
++ if (al_hreq.buflen < sizeof(struct attrlist) ||
++ al_hreq.buflen > XATTR_LIST_MAX)
+ return -XFS_ERROR(EINVAL);
+
+ /*
+diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
+index 54e623b..0d685b3 100644
+--- a/fs/xfs/xfs_ioctl32.c
++++ b/fs/xfs/xfs_ioctl32.c
+@@ -361,7 +361,8 @@ xfs_compat_attrlist_by_handle(
+ if (copy_from_user(&al_hreq, arg,
+ sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
+ return -XFS_ERROR(EFAULT);
+- if (al_hreq.buflen > XATTR_LIST_MAX)
++ if (al_hreq.buflen < sizeof(struct attrlist) ||
++ al_hreq.buflen > XATTR_LIST_MAX)
+ return -XFS_ERROR(EINVAL);
+
+ /*
+diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
+index 4fd95a3..0532279 100644
+--- a/include/crypto/scatterwalk.h
++++ b/include/crypto/scatterwalk.h
+@@ -58,6 +58,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
+ {
+ sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+ sg1[num - 1].page_link &= ~0x02;
++ sg1[num - 1].page_link |= 0x01;
+ }
+
+ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
+@@ -65,7 +66,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
+ if (sg_is_last(sg))
+ return NULL;
+
+- return (++sg)->length ? sg : (void *)sg_page(sg);
++ return (++sg)->length ? sg : sg_chain_ptr(sg);
+ }
+
+ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index acd8d4b..d337419 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -112,9 +112,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
+ extern void would_dump(struct linux_binprm *, struct file *);
+
+ extern int suid_dumpable;
+-#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+-#define SUID_DUMP_USER 1 /* Dump as user of process */
+-#define SUID_DUMP_ROOT 2 /* Dump as root */
+
+ /* Stack area protections */
+ #define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index d8e636e..cba9593 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -27,5 +27,3 @@
+ #define __must_be_array(a) 0
+
+ #endif
+-
+-#define uninitialized_var(x) x
+diff --git a/include/linux/msg.h b/include/linux/msg.h
+index 56abf155..70fc369 100644
+--- a/include/linux/msg.h
++++ b/include/linux/msg.h
+@@ -76,9 +76,9 @@ struct msginfo {
+
+ /* one msg_msg structure for each message */
+ struct msg_msg {
+- struct list_head m_list;
+- long m_type;
+- int m_ts; /* message text size */
++ struct list_head m_list;
++ long m_type;
++ size_t m_ts; /* message text size */
+ struct msg_msgseg* next;
+ void *security;
+ /* the actual message follows immediately */
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
+index daad4e6..3887901 100644
+--- a/include/linux/mtd/map.h
++++ b/include/linux/mtd/map.h
+@@ -361,7 +361,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
+ bitpos = (map_bankwidth(map)-1-i)*8;
+ #endif
+ orig.x[0] &= ~(0xff << bitpos);
+- orig.x[0] |= buf[i-start] << bitpos;
++ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
+ }
+ }
+ return orig;
+@@ -380,7 +380,7 @@ static inline map_word map_word_ff(struct map_info *map)
+
+ if (map_bankwidth(map) < MAP_FF_LIMIT) {
+ int bw = 8 * map_bankwidth(map);
+- r.x[0] = (1 << bw) - 1;
++ r.x[0] = (1UL << bw) - 1;
+ } else {
+ for (i=0; i<map_words(map); i++)
+ r.x[i] = ~0UL;
+diff --git a/include/linux/net.h b/include/linux/net.h
+index b7ca08e..bd4f6c7 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -197,6 +197,14 @@ struct proto_ops {
+ #endif
+ int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len);
++ /* Notes for implementing recvmsg:
++ * ===============================
++ * msg->msg_namelen should get updated by the recvmsg handlers
++ * iff msg_name != NULL. It is by default 0 to prevent
++ * returning uninitialized memory to user space. The recvfrom
++ * handlers can assume that msg.msg_name is either NULL or has
++ * a minimum size of sizeof(struct sockaddr_storage).
++ */
+ int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 3db3da1..d93f417 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1579,6 +1579,7 @@
+ #define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334
+
+ #define PCI_VENDOR_ID_MARVELL 0x11ab
++#define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b
+ #define PCI_DEVICE_ID_MARVELL_GT64111 0x4146
+ #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
+ #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 7e77cee..f5e1311 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -89,9 +89,9 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed)
+ {
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+- state->s1 = __seed(i, 1);
+- state->s2 = __seed(i, 7);
+- state->s3 = __seed(i, 15);
++ state->s1 = __seed(i, 2);
++ state->s2 = __seed(i, 8);
++ state->s3 = __seed(i, 16);
+ }
+
+ #ifdef CONFIG_ARCH_RANDOM
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8204898..312d047 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -403,6 +403,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+ extern void set_dumpable(struct mm_struct *mm, int value);
+ extern int get_dumpable(struct mm_struct *mm);
+
++#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
++#define SUID_DUMP_USER 1 /* Dump as user of process */
++#define SUID_DUMP_ROOT 2 /* Dump as root */
++
+ /* mm flags */
+ /* dumpable bits */
+ #define MMF_DUMPABLE 0 /* core dump is permitted */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 06aed72..b935e6c 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -466,7 +466,7 @@ extern int compat_ip_getsockopt(struct sock *sk, int level,
+ int optname, char __user *optval, int __user *optlen);
+ extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
+
+-extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
++extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+ extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
+ extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 4d549cf..0580673 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -602,8 +602,10 @@ extern int compat_ipv6_getsockopt(struct sock *sk,
+ extern int ip6_datagram_connect(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
+-extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+-extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
++extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
++extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
+ extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+ extern void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
+index c425062..ab240bb 100644
+--- a/include/sound/memalloc.h
++++ b/include/sound/memalloc.h
+@@ -101,7 +101,7 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
+ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_sg_buf *sgbuf, size_t offset)
+ {
+ dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+- addr &= PAGE_MASK;
++ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+ return addr + offset % PAGE_SIZE;
+ }
+
+diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
+index 7697249..763bf05 100644
+--- a/include/trace/ftrace.h
++++ b/include/trace/ftrace.h
+@@ -379,7 +379,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+ __data_size += (len) * sizeof(type);
+
+ #undef __string
+-#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
++#define __string(item, src) __dynamic_array(char, item, \
++ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+ #undef DECLARE_EVENT_CLASS
+ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+@@ -504,7 +505,7 @@ static inline notrace int ftrace_get_offsets_##call( \
+
+ #undef __assign_str
+ #define __assign_str(dst, src) \
+- strcpy(__get_str(dst), src);
++ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+ #undef TP_fast_assign
+ #define TP_fast_assign(args...) args
+diff --git a/ipc/msgutil.c b/ipc/msgutil.c
+index 5652101..fc6fded 100644
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -37,15 +37,15 @@ struct msg_msgseg {
+ /* the next part of the message follows immediately */
+ };
+
+-#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg))
+-#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg))
++#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg))
++#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
+
+-struct msg_msg *load_msg(const void __user *src, int len)
++struct msg_msg *load_msg(const void __user *src, size_t len)
+ {
+ struct msg_msg *msg;
+ struct msg_msgseg **pseg;
+ int err;
+- int alen;
++ size_t alen;
+
+ alen = len;
+ if (alen > DATALEN_MSG)
+@@ -99,9 +99,9 @@ out_err:
+ return ERR_PTR(err);
+ }
+
+-int store_msg(void __user *dest, struct msg_msg *msg, int len)
++int store_msg(void __user *dest, struct msg_msg *msg, size_t len)
+ {
+- int alen;
++ size_t alen;
+ struct msg_msgseg *seg;
+
+ alen = len;
+diff --git a/ipc/util.h b/ipc/util.h
+index 6f5c20b..0bfc934 100644
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -138,8 +138,8 @@ int ipc_parse_version (int *cmd);
+ #endif
+
+ extern void free_msg(struct msg_msg *msg);
+-extern struct msg_msg *load_msg(const void __user *src, int len);
+-extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
++extern struct msg_msg *load_msg(const void __user *src, size_t len);
++extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
+
+ extern void recompute_msgmni(struct ipc_namespace *);
+
+diff --git a/kernel/audit.c b/kernel/audit.c
+index d4bc594..e14bc74 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -625,7 +625,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
+ char *ctx = NULL;
+ u32 len;
+
+- if (!audit_enabled) {
++ if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
+ *ab = NULL;
+ return rc;
+ }
+@@ -684,6 +684,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (msg_type) {
+ case AUDIT_GET:
++ status_set.mask = 0;
+ status_set.enabled = audit_enabled;
+ status_set.failure = audit_failure;
+ status_set.pid = audit_pid;
+@@ -695,7 +696,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ &status_set, sizeof(status_set));
+ break;
+ case AUDIT_SET:
+- if (nlh->nlmsg_len < sizeof(struct audit_status))
++ if (nlmsg_len(nlh) < sizeof(struct audit_status))
+ return -EINVAL;
+ status_get = (struct audit_status *)data;
+ if (status_get->mask & AUDIT_STATUS_ENABLED) {
+@@ -899,7 +900,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct task_struct *tsk;
+ unsigned long flags;
+
+- if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
++ if (nlmsg_len(nlh) < sizeof(struct audit_tty_status))
+ return -EINVAL;
+ s = data;
+ if (s->enabled != 0 && s->enabled != 1)
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 835eee6..57eb98d 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -983,8 +983,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
+
+- if (need_loop)
++ if (need_loop) {
++ local_irq_disable();
+ write_seqcount_begin(&tsk->mems_allowed_seq);
++ }
+
+ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+@@ -992,8 +994,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ tsk->mems_allowed = *newmems;
+
+- if (need_loop)
++ if (need_loop) {
+ write_seqcount_end(&tsk->mems_allowed_seq);
++ local_irq_enable();
++ }
+
+ task_unlock(tsk);
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 1d0538e..8888815 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -285,7 +285,7 @@ again:
+ put_page(page);
+ /* serialize against __split_huge_page_splitting() */
+ local_irq_disable();
+- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
++ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
+ page_head = compound_head(page);
+ /*
+ * page_head is valid pointer but we must pin
+diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
+index 15e53b1..dcd3f97 100644
+--- a/kernel/irq/pm.c
++++ b/kernel/irq/pm.c
+@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
+ bool is_early = desc->action &&
+ desc->action->flags & IRQF_EARLY_RESUME;
+
+- if (is_early != want_early)
++ if (!is_early && want_early)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index cbe2c14..380291e 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1390,7 +1390,11 @@ int hibernate_preallocate_memory(void)
+ * highmem and non-highmem zones separately.
+ */
+ pages_highmem = preallocate_image_highmem(highmem / 2);
+- alloc = (count - max_size) - pages_highmem;
++ alloc = count - max_size;
++ if (alloc > pages_highmem)
++ alloc -= pages_highmem;
++ else
++ alloc = 0;
+ pages = preallocate_image_memory(alloc, avail_normal);
+ if (pages < alloc) {
+ /* We have exhausted non-highmem pages, try highmem. */
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 67fedad..f79803a 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -246,7 +246,8 @@ ok:
+ smp_rmb();
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
++ if (dumpable != SUID_DUMP_USER &&
++ !task_ns_capable(task, CAP_SYS_PTRACE))
+ return -EPERM;
+
+ return security_ptrace_access_check(task, mode);
+diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
+index a6710a1..f4010e2 100644
+--- a/kernel/sched_debug.c
++++ b/kernel/sched_debug.c
+@@ -213,6 +213,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ SEQ_printf(m, " .%-30s: %d\n", "load_tg",
+ atomic_read(&cfs_rq->tg->load_weight));
+ #endif
++#ifdef CONFIG_CFS_BANDWIDTH
++ SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
++ cfs_rq->tg->cfs_bandwidth.timer_active);
++ SEQ_printf(m, " .%-30s: %d\n", "throttled",
++ cfs_rq->throttled);
++ SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
++ cfs_rq->throttle_count);
++#endif
+
+ print_cfs_group_stats(m, cpu, cfs_rq->tg);
+ #endif
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index c261da7..5b9e456 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -1527,6 +1527,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ cfs_rq->throttled_timestamp = rq->clock;
+ raw_spin_lock(&cfs_b->lock);
+ list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
++ if (!cfs_b->timer_active)
++ __start_cfs_bandwidth(cfs_b);
+ raw_spin_unlock(&cfs_b->lock);
+ }
+
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 8a46f5d..0907e43 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -468,7 +468,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
+ clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EINVAL;
+
+ return hrtimer_get_res(baseid, tp);
+ }
+@@ -485,7 +485,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
+ struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EINVAL;
+
+ *tp = ktime_to_timespec(base->gettime());
+ return 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 226776b..d40d7f6 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -259,9 +259,6 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
+
+ static int __register_ftrace_function(struct ftrace_ops *ops)
+ {
+- if (ftrace_disabled)
+- return -ENODEV;
+-
+ if (FTRACE_WARN_ON(ops == &global_ops))
+ return -EINVAL;
+
+@@ -290,9 +287,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ {
+ int ret;
+
+- if (ftrace_disabled)
+- return -ENODEV;
+-
+ if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
+ return -EBUSY;
+
+@@ -1017,6 +1011,11 @@ static struct ftrace_page *ftrace_pages;
+
+ static struct dyn_ftrace *ftrace_free_records;
+
++static bool ftrace_hash_empty(struct ftrace_hash *hash)
++{
++ return !hash || !hash->count;
++}
++
+ static struct ftrace_func_entry *
+ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+ {
+@@ -1025,7 +1024,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+ struct hlist_head *hhd;
+ struct hlist_node *n;
+
+- if (!hash->count)
++ if (ftrace_hash_empty(hash))
+ return NULL;
+
+ if (hash->size_bits > 0)
+@@ -1169,7 +1168,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
+ return NULL;
+
+ /* Empty hash? */
+- if (!hash || !hash->count)
++ if (ftrace_hash_empty(hash))
+ return new_hash;
+
+ size = 1 << hash->size_bits;
+@@ -1294,9 +1293,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ filter_hash = rcu_dereference_raw(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+
+- if ((!filter_hash || !filter_hash->count ||
++ if ((ftrace_hash_empty(filter_hash) ||
+ ftrace_lookup_ip(filter_hash, ip)) &&
+- (!notrace_hash || !notrace_hash->count ||
++ (ftrace_hash_empty(notrace_hash) ||
+ !ftrace_lookup_ip(notrace_hash, ip)))
+ ret = 1;
+ else
+@@ -1348,7 +1347,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ if (filter_hash) {
+ hash = ops->filter_hash;
+ other_hash = ops->notrace_hash;
+- if (!hash || !hash->count)
++ if (ftrace_hash_empty(hash))
+ all = 1;
+ } else {
+ inc = !inc;
+@@ -1358,7 +1357,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ * If the notrace hash has no items,
+ * then there's nothing to do.
+ */
+- if (hash && !hash->count)
++ if (ftrace_hash_empty(hash))
+ return;
+ }
+
+@@ -1375,8 +1374,8 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
+ match = 1;
+ } else {
+- in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
+- in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
++ in_hash = !!ftrace_lookup_ip(hash, rec->ip);
++ in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
+
+ /*
+ *
+@@ -1384,7 +1383,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ if (filter_hash && in_hash && !in_other_hash)
+ match = 1;
+ else if (!filter_hash && in_hash &&
+- (in_other_hash || !other_hash->count))
++ (in_other_hash || ftrace_hash_empty(other_hash)))
+ match = 1;
+ }
+ if (!match)
+@@ -1698,10 +1697,15 @@ static void ftrace_startup_enable(int command)
+ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ {
+ bool hash_enable = true;
++ int ret;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
++ ret = __register_ftrace_function(ops);
++ if (ret)
++ return ret;
++
+ ftrace_start_up++;
+ command |= FTRACE_UPDATE_CALLS;
+
+@@ -1723,12 +1727,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ return 0;
+ }
+
+-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
++static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ {
+ bool hash_disable = true;
++ int ret;
+
+ if (unlikely(ftrace_disabled))
+- return;
++ return -ENODEV;
++
++ ret = __unregister_ftrace_function(ops);
++ if (ret)
++ return ret;
+
+ ftrace_start_up--;
+ /*
+@@ -1763,9 +1772,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+ }
+
+ if (!command || !ftrace_enabled)
+- return;
++ return 0;
+
+ ftrace_run_update_code(command);
++ return 0;
+ }
+
+ static void ftrace_startup_sysctl(void)
+@@ -1794,12 +1804,57 @@ static cycle_t ftrace_update_time;
+ static unsigned long ftrace_update_cnt;
+ unsigned long ftrace_update_tot_cnt;
+
+-static int ops_traces_mod(struct ftrace_ops *ops)
++static inline int ops_traces_mod(struct ftrace_ops *ops)
+ {
+- struct ftrace_hash *hash;
++ /*
++ * Filter_hash being empty will default to trace module.
++ * But notrace hash requires a test of individual module functions.
++ */
++ return ftrace_hash_empty(ops->filter_hash) &&
++ ftrace_hash_empty(ops->notrace_hash);
++}
+
+- hash = ops->filter_hash;
+- return !!(!hash || !hash->count);
++/*
++ * Check if the current ops references the record.
++ *
++ * If the ops traces all functions, then it was already accounted for.
++ * If the ops does not trace the current record function, skip it.
++ * If the ops ignores the function via notrace filter, skip it.
++ */
++static inline bool
++ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
++{
++ /* If ops isn't enabled, ignore it */
++ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
++ return 0;
++
++ /* If ops traces all mods, we already accounted for it */
++ if (ops_traces_mod(ops))
++ return 0;
++
++ /* The function must be in the filter */
++ if (!ftrace_hash_empty(ops->filter_hash) &&
++ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
++ return 0;
++
++ /* If in notrace hash, we ignore it too */
++ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
++ return 0;
++
++ return 1;
++}
++
++static int referenced_filters(struct dyn_ftrace *rec)
++{
++ struct ftrace_ops *ops;
++ int cnt = 0;
++
++ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
++ if (ops_references_rec(ops, rec))
++ cnt++;
++ }
++
++ return cnt;
+ }
+
+ static int ftrace_update_code(struct module *mod)
+@@ -1807,6 +1862,7 @@ static int ftrace_update_code(struct module *mod)
+ struct dyn_ftrace *p;
+ cycle_t start, stop;
+ unsigned long ref = 0;
++ bool test = false;
+
+ /*
+ * When adding a module, we need to check if tracers are
+@@ -1819,9 +1875,12 @@ static int ftrace_update_code(struct module *mod)
+
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next) {
+- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
+- ops_traces_mod(ops))
+- ref++;
++ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
++ if (ops_traces_mod(ops))
++ ref++;
++ else
++ test = true;
++ }
+ }
+ }
+
+@@ -1829,6 +1888,7 @@ static int ftrace_update_code(struct module *mod)
+ ftrace_update_cnt = 0;
+
+ while (ftrace_new_addrs) {
++ int cnt = ref;
+
+ /* If something went wrong, bail without enabling anything */
+ if (unlikely(ftrace_disabled))
+@@ -1836,7 +1896,9 @@ static int ftrace_update_code(struct module *mod)
+
+ p = ftrace_new_addrs;
+ ftrace_new_addrs = p->newlist;
+- p->flags = ref;
++ if (test)
++ cnt += referenced_filters(p);
++ p->flags = cnt;
+
+ /*
+ * Do the initial record conversion from mcount jump
+@@ -1859,7 +1921,7 @@ static int ftrace_update_code(struct module *mod)
+ * conversion puts the module to the correct state, thus
+ * passing the ftrace_make_call check.
+ */
+- if (ftrace_start_up && ref) {
++ if (ftrace_start_up && cnt) {
+ int failed = __ftrace_replace_code(p, 1);
+ if (failed) {
+ ftrace_bug(failed, p->ip);
+@@ -2112,7 +2174,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
+ * off, we can short cut and just print out that all
+ * functions are enabled.
+ */
+- if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
++ if (iter->flags & FTRACE_ITER_FILTER &&
++ ftrace_hash_empty(ops->filter_hash)) {
+ if (*pos > 0)
+ return t_hash_start(m, pos);
+ iter->flags |= FTRACE_ITER_PRINTALL;
+@@ -2564,16 +2627,13 @@ static void __enable_ftrace_function_probe(void)
+ if (i == FTRACE_FUNC_HASHSIZE)
+ return;
+
+- ret = __register_ftrace_function(&trace_probe_ops);
+- if (!ret)
+- ret = ftrace_startup(&trace_probe_ops, 0);
++ ret = ftrace_startup(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 1;
+ }
+
+ static void __disable_ftrace_function_probe(void)
+ {
+- int ret;
+ int i;
+
+ if (!ftrace_probe_registered)
+@@ -2586,9 +2646,7 @@ static void __disable_ftrace_function_probe(void)
+ }
+
+ /* no more funcs left */
+- ret = __unregister_ftrace_function(&trace_probe_ops);
+- if (!ret)
+- ftrace_shutdown(&trace_probe_ops, 0);
++ ftrace_shutdown(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 0;
+ }
+@@ -3561,12 +3619,15 @@ device_initcall(ftrace_nodyn_init);
+ static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
+ static inline void ftrace_startup_enable(int command) { }
+ /* Keep as macros so we do not need to define the commands */
+-# define ftrace_startup(ops, command) \
+- ({ \
+- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+- 0; \
++# define ftrace_startup(ops, command) \
++ ({ \
++ int ___ret = __register_ftrace_function(ops); \
++ if (!___ret) \
++ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
++ ___ret; \
+ })
+-# define ftrace_shutdown(ops, command) do { } while (0)
++# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
++
+ # define ftrace_startup_sysctl() do { } while (0)
+ # define ftrace_shutdown_sysctl() do { } while (0)
+
+@@ -3906,15 +3967,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
+
+ mutex_lock(&ftrace_lock);
+
+- if (unlikely(ftrace_disabled))
+- goto out_unlock;
+-
+- ret = __register_ftrace_function(ops);
+- if (!ret)
+- ret = ftrace_startup(ops, 0);
+-
++ ret = ftrace_startup(ops, 0);
+
+- out_unlock:
+ mutex_unlock(&ftrace_lock);
+ return ret;
+ }
+@@ -3931,9 +3985,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
+ int ret;
+
+ mutex_lock(&ftrace_lock);
+- ret = __unregister_ftrace_function(ops);
+- if (!ret)
+- ftrace_shutdown(ops, 0);
++ ret = ftrace_shutdown(ops, 0);
+ mutex_unlock(&ftrace_lock);
+
+ return ret;
+@@ -4127,6 +4179,12 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+ return NOTIFY_DONE;
+ }
+
++/* Just a place holder for function graph */
++static struct ftrace_ops fgraph_ops __read_mostly = {
++ .func = ftrace_stub,
++ .flags = FTRACE_OPS_FL_GLOBAL,
++};
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+ {
+@@ -4153,7 +4211,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ ftrace_graph_return = retfunc;
+ ftrace_graph_entry = entryfunc;
+
+- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
++ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
+
+ out:
+ mutex_unlock(&ftrace_lock);
+@@ -4170,7 +4228,7 @@ void unregister_ftrace_graph(void)
+ ftrace_graph_active--;
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
+- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
++ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+
+diff --git a/lib/random32.c b/lib/random32.c
+index fc3545a..1f44bdc 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -92,7 +92,7 @@ void srandom32(u32 entropy)
+ */
+ for_each_possible_cpu (i) {
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
+- state->s1 = __seed(state->s1 ^ entropy, 1);
++ state->s1 = __seed(state->s1 ^ entropy, 2);
+ }
+ }
+ EXPORT_SYMBOL(srandom32);
+@@ -109,9 +109,9 @@ static int __init random32_init(void)
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+ #define LCG(x) ((x) * 69069) /* super-duper LCG */
+- state->s1 = __seed(LCG(i + jiffies), 1);
+- state->s2 = __seed(LCG(state->s1), 7);
+- state->s3 = __seed(LCG(state->s2), 15);
++ state->s1 = __seed(LCG(i + jiffies), 2);
++ state->s2 = __seed(LCG(state->s1), 8);
++ state->s3 = __seed(LCG(state->s2), 16);
+
+ /* "warm it up" */
+ prandom32(state);
+@@ -138,9 +138,9 @@ static int __init random32_reseed(void)
+ u32 seeds[3];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+- state->s1 = __seed(seeds[0], 1);
+- state->s2 = __seed(seeds[1], 7);
+- state->s3 = __seed(seeds[2], 15);
++ state->s1 = __seed(seeds[0], 2);
++ state->s2 = __seed(seeds[1], 8);
++ state->s3 = __seed(seeds[2], 16);
+
+ /* mix it in */
+ prandom32(state);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index d74c317..ae02e42 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -25,6 +25,7 @@
+ #include <linux/kallsyms.h>
+ #include <linux/uaccess.h>
+ #include <linux/ioport.h>
++#include <linux/cred.h>
+ #include <net/addrconf.h>
+
+ #include <asm/page.h> /* for PAGE_SIZE */
+@@ -892,10 +893,35 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ spec.field_width = 2 * sizeof(void *);
+ return string(buf, end, "pK-error", spec);
+ }
+- if (!((kptr_restrict == 0) ||
+- (kptr_restrict == 1 &&
+- has_capability_noaudit(current, CAP_SYSLOG))))
++
++ switch (kptr_restrict) {
++ case 0:
++ /* Always print %pK values */
++ break;
++ case 1: {
++ /*
++ * Only print the real pointer value if the current
++ * process has CAP_SYSLOG and is running with the
++ * same credentials it started with. This is because
++ * access to files is checked at open() time, but %pK
++ * checks permission at read() time. We don't want to
++ * leak pointer values if a binary opens a file using
++ * %pK and then elevates privileges before reading it.
++ */
++ const struct cred *cred = current_cred();
++
++ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
++ cred->euid != cred->uid ||
++ cred->egid != cred->gid)
++ ptr = NULL;
++ break;
++ }
++ case 2:
++ default:
++ /* Always print 0's for %pK */
+ ptr = NULL;
++ break;
++ }
+ break;
+ }
+ spec.flags |= SMALL;
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index bfa9ab93..334d4cd 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1740,7 +1740,6 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ size_t size, int flags)
+ {
+ struct sock *sk = sock->sk;
+- struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
+ struct ddpehdr *ddp;
+ int copied = 0;
+ int offset = 0;
+@@ -1769,14 +1768,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ }
+ err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+
+- if (!err) {
+- if (sat) {
+- sat->sat_family = AF_APPLETALK;
+- sat->sat_port = ddp->deh_sport;
+- sat->sat_addr.s_node = ddp->deh_snode;
+- sat->sat_addr.s_net = ddp->deh_snet;
+- }
+- msg->msg_namelen = sizeof(*sat);
++ if (!err && msg->msg_name) {
++ struct sockaddr_at *sat = msg->msg_name;
++ sat->sat_family = AF_APPLETALK;
++ sat->sat_port = ddp->deh_sport;
++ sat->sat_addr.s_node = ddp->deh_snode;
++ sat->sat_addr.s_net = ddp->deh_snet;
++ msg->msg_namelen = sizeof(*sat);
+ }
+
+ skb_free_datagram(sk, skb); /* Free the datagram. */
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 43b6bfe..0ca06e8 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -500,8 +500,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
+- msg->msg_namelen = 0;
+-
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+ if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 86ac37f..7b8db0e 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1635,11 +1635,11 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (msg->msg_namelen != 0) {
+- struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
++ if (msg->msg_name) {
+ ax25_digi digi;
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
++ struct sockaddr_ax25 *sax = msg->msg_name;
+
+ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 838f113..0938f6b 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -245,8 +245,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -311,8 +309,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ BT_DBG("sk %p size %zu", sk, size);
+
+ lock_sock(sk);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 8361ee4..bb78c75 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -448,8 +448,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return err;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 82ce164..14c4864 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -627,7 +627,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
+- msg->msg_namelen = 0;
+ return 0;
+ }
+
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index f3f75ad..56693c3 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -170,6 +170,8 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
+ del_nbp(p);
+ }
+
++ br_fdb_delete_by_port(br, NULL, 1);
++
+ del_timer_sync(&br->gc_timer);
+
+ br_sysfs_delbr(br->dev);
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 53a8e37..7fac75f 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -320,8 +320,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
+- m->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+@@ -395,8 +393,6 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+diff --git a/net/compat.c b/net/compat.c
+index 3139ef2..41724c9 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+@@ -93,7 +93,8 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ if (err < 0)
+ return err;
+ }
+- kern_msg->msg_name = kern_address;
++ if (kern_msg->msg_name)
++ kern_msg->msg_name = kern_address;
+ } else
+ kern_msg->msg_name = NULL;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 8e455b8..7bcf37d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2609,6 +2609,8 @@ ip:
+ goto done;
+
+ ip = (const struct iphdr *) (skb->data + nhoff);
++ if (ip->ihl < 5)
++ goto done;
+ if (ip_is_fragment(ip))
+ ip_proto = 0;
+ else
+@@ -4515,7 +4517,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+- if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
++ if (ops->ndo_change_rx_flags)
+ ops->ndo_change_rx_flags(dev, flags);
+ }
+
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index c02e63c..c0c21b1 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -443,7 +443,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (frh->action && (frh->action != rule->action))
+ continue;
+
+- if (frh->table && (frh_get_table(frh, tb) != rule->table))
++ if (frh_get_table(frh, tb) &&
++ (frh_get_table(frh, tb) != rule->table))
+ continue;
+
+ if (tb[FRA_PRIORITY] &&
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index c40f27e..139ef93 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
+ if (err < 0)
+ return err;
+ }
+- m->msg_name = address;
++ if (m->msg_name)
++ m->msg_name = address;
+ } else {
+ m->msg_name = NULL;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 2ef7da0..80aeac9 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2524,6 +2524,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
+ if (x) {
+ int ret;
+ __u8 *eth;
++ struct iphdr *iph;
++
+ nhead = x->props.header_len - skb_headroom(skb);
+ if (nhead > 0) {
+ ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
+@@ -2545,6 +2547,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
+ eth = (__u8 *) skb_push(skb, ETH_HLEN);
+ memcpy(eth, pkt_dev->hh, 12);
+ *(u16 *) &eth[12] = protocol;
++
++ /* Update IPv4 header len as well as checksum value */
++ iph = ip_hdr(skb);
++ iph->tot_len = htons(skb->len - ETH_HLEN);
++ ip_send_check(iph);
+ }
+ }
+ return 1;
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index 19d6aef..5d42df2 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -563,7 +563,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+- case 1: /* 10b */
++ case 2: /* 10b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+@@ -576,7 +576,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+- case 2: /* 01b */
++ case 1: /* 01b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 424fafb..ec07510 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 3b36002..542a9c1 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -374,7 +374,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+@@ -411,6 +411,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ serr->addr_offset);
+ sin->sin_port = serr->port;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 294a380..00975b6 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -567,7 +567,7 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+@@ -623,7 +623,6 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len)
+ {
+ struct inet_sock *isk = inet_sk(sk);
+- struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+ struct sk_buff *skb;
+ int copied, err;
+
+@@ -632,11 +631,8 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+@@ -656,11 +652,14 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+- if (sin) {
++ if (msg->msg_name) {
++ struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
++
+ sin->sin_family = AF_INET;
+ sin->sin_port = 0 /* skb->h.uh->source */;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (isk->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 2815014..cfded93 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -686,11 +686,8 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE) {
+- err = ip_recv_error(sk, msg, len);
++ err = ip_recv_error(sk, msg, len, addr_len);
+ goto out;
+ }
+
+@@ -716,6 +713,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index a97c9ad..92d7138 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -182,7 +182,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ return err;
+ }
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5decc93..8c2e259 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -937,7 +937,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+@@ -1036,6 +1036,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
+ struct udp_sock *up = udp_sk(sk);
+ int ret;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ if (!up->pending) {
+ struct msghdr msg = { .msg_flags = flags|MSG_MORE };
+
+@@ -1171,14 +1174,8 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ int is_udplite = IS_UDPLITE(sk);
+ bool slow;
+
+- /*
+- * Check any passed addresses
+- */
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+@@ -1231,6 +1228,7 @@ try_again:
+ sin->sin_port = udp_hdr(skb)->source;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index e248069..3c7c948 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -315,7 +315,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sock_exterr_skb *serr;
+@@ -366,6 +366,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
+ &sin->sin6_addr);
+ }
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+@@ -374,6 +375,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
++ sin->sin6_port = 0;
+ sin->sin6_scope_id = 0;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+@@ -418,7 +420,8 @@ out:
+ /*
+ * Handle IPV6_RECVPATHMTU
+ */
+-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sk_buff *skb;
+@@ -452,6 +455,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
+ sin->sin6_port = 0;
+ sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
+ ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
++ *addr_len = sizeof(*sin);
+ }
+
+ put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 97675bf..d3fde7e 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -144,8 +144,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
+ return res;
+ }
+ rcu_read_unlock();
+- IP6_INC_STATS_BH(dev_net(dst->dev),
+- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
++ IP6_INC_STATS(dev_net(dst->dev),
++ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 6e6c2c4..9ecbc84 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -456,14 +456,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- if (addr_len)
+- *addr_len=sizeof(*sin6);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+@@ -498,6 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ sin6->sin6_scope_id = 0;
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ sin6->sin6_scope_id = IP6CB(skb)->iif;
++ *addr_len = sizeof(*sin6);
+ }
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index bc9103d..1768238 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -592,8 +592,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+ prefix = &prefix_buf;
+ }
+
+- rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+- dev->ifindex);
++ if (rinfo->prefix_len == 0)
++ rt = rt6_get_dflt_router(gwaddr, dev);
++ else
++ rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
++ gwaddr, dev->ifindex);
+
+ if (rt && !lifetime) {
+ ip6_del_rt(rt);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index f9e496b..f8bec1e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -347,14 +347,11 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ int is_udp4;
+ bool slow;
+
+- if (addr_len)
+- *addr_len=sizeof(struct sockaddr_in6);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+@@ -423,7 +420,7 @@ try_again:
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ sin6->sin6_scope_id = IP6CB(skb)->iif;
+ }
+-
++ *addr_len = sizeof(*sin6);
+ }
+ if (is_udp4) {
+ if (inet->cmsg_flags)
+diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
+index 9680226..8c06a50 100644
+--- a/net/ipx/af_ipx.c
++++ b/net/ipx/af_ipx.c
+@@ -1835,8 +1835,6 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (skb->tstamp.tv64)
+ sk->sk_stamp = skb->tstamp;
+
+- msg->msg_namelen = sizeof(*sipx);
+-
+ if (sipx) {
+ sipx->sipx_family = AF_IPX;
+ sipx->sipx_port = ipx->ipx_source.sock;
+@@ -1844,6 +1842,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
+ sipx->sipx_type = ipx->ipx_type;
+ sipx->sipx_zero = 0;
++ msg->msg_namelen = sizeof(*sipx);
+ }
+ rc = copied;
+
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 91821e9..f5d011a 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1386,8 +1386,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+@@ -1452,8 +1450,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, noblock);
+
+- msg->msg_namelen = 0;
+-
+ do {
+ int chunk;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index e836140..cf98d62 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1356,8 +1356,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int blen;
+ int err = 0;
+
+- msg->msg_namelen = 0;
+-
+ if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 8dbdb8e..dc8d7ef 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -3595,7 +3595,6 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
+ if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
+ goto out;
+
+- msg->msg_namelen = 0;
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto out;
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 6c7e609..334a93d 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -568,9 +568,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+@@ -593,6 +590,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 8570079..969cd3e 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -200,8 +200,6 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+- msg->msg_namelen = 0;
+-
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index e5565c7..99a60d5 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int target; /* Read at least this many bytes */
+ long timeo;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index db01d02..71d8564 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -764,7 +764,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
+ u16 sc;
+ int tid;
+
+- if (!ieee80211_is_data_qos(hdr->frame_control))
++ if (!ieee80211_is_data_qos(hdr->frame_control) ||
++ is_multicast_ether_addr(hdr->addr1))
+ goto dont_reorder;
+
+ /*
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 3d1d55d..2369e96 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1445,8 +1445,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ }
+ #endif
+
+- msg->msg_namelen = 0;
+-
+ copied = data_skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 3df7c5a..b4d889b 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1182,10 +1182,9 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
++ msg->msg_namelen = sizeof(*sax);
+ }
+
+- msg->msg_namelen = sizeof(*sax);
+-
+ skb_free_datagram(sk, skb);
+
+ release_sock(sk);
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 7a167fc..0d570d3 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -248,8 +248,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return rc;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a2ac2c3..4f19bf2 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -295,6 +295,7 @@ struct packet_sock {
+ unsigned int tp_reserve;
+ unsigned int tp_loss:1;
+ unsigned int tp_tstamp;
++ struct net_device __rcu *cached_dev;
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ };
+
+@@ -350,11 +351,15 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
+ static void register_prot_hook(struct sock *sk)
+ {
+ struct packet_sock *po = pkt_sk(sk);
++
+ if (!po->running) {
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_link(sk, po);
+- else
++ } else {
+ dev_add_pack(&po->prot_hook);
++ rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
++ }
++
+ sock_hold(sk);
+ po->running = 1;
+ }
+@@ -372,10 +377,13 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
+ struct packet_sock *po = pkt_sk(sk);
+
+ po->running = 0;
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_unlink(sk, po);
+- else
++ } else {
+ __dev_remove_pack(&po->prot_hook);
++ RCU_INIT_POINTER(po->cached_dev, NULL);
++ }
++
+ __sock_put(sk);
+
+ if (sync) {
+@@ -497,9 +505,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+
+- spin_lock(&rb_queue->lock);
++ spin_lock_bh(&rb_queue->lock);
+ pkc->delete_blk_timer = 1;
+- spin_unlock(&rb_queue->lock);
++ spin_unlock_bh(&rb_queue->lock);
+
+ prb_del_retire_blk_timer(pkc);
+ }
+@@ -2032,12 +2040,24 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ return tp_len;
+ }
+
++static struct net_device *packet_cached_dev_get(struct packet_sock *po)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = rcu_dereference(po->cached_dev);
++ if (dev)
++ dev_hold(dev);
++ rcu_read_unlock();
++
++ return dev;
++}
++
+ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ int err, reserve = 0;
+ void *ph;
+ struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
+@@ -2050,7 +2070,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+
+ err = -EBUSY;
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2064,19 +2084,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+ if (unlikely(dev == NULL))
+ goto out;
+-
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_put;
+
++ reserve = dev->hard_header_len;
++
+ size_max = po->tx_ring.frame_size
+ - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
+
+@@ -2152,8 +2170,7 @@ out_status:
+ __packet_set_status(po, ph, status);
+ kfree_skb(skb);
+ out_put:
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+ out:
+ mutex_unlock(&po->pg_vec_lock);
+ return err;
+@@ -2191,7 +2208,6 @@ static int packet_snd(struct socket *sock,
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ unsigned char *addr;
+ int err, reserve = 0;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+@@ -2205,7 +2221,7 @@ static int packet_snd(struct socket *sock,
+ */
+
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2217,19 +2233,17 @@ static int packet_snd(struct socket *sock,
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+- if (dev == NULL)
++ if (unlikely(dev == NULL))
+ goto out_unlock;
+- if (sock->type == SOCK_RAW)
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+- if (!(dev->flags & IFF_UP))
++ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_unlock;
+
++ if (sock->type == SOCK_RAW)
++ reserve = dev->hard_header_len;
+ if (po->has_vnet_hdr) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+@@ -2350,15 +2364,14 @@ static int packet_snd(struct socket *sock,
+ if (err > 0 && (err = net_xmit_errno(err)) != 0)
+ goto out_unlock;
+
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+
+ return len;
+
+ out_free:
+ kfree_skb(skb);
+ out_unlock:
+- if (dev && need_rls_dev)
++ if (dev)
+ dev_put(dev);
+ out:
+ return err;
+@@ -2575,6 +2588,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ po = pkt_sk(sk);
+ sk->sk_family = PF_PACKET;
+ po->num = proto;
++ RCU_INIT_POINTER(po->cached_dev, NULL);
+
+ sk->sk_destruct = packet_sock_destruct;
+ sk_refcnt_debug_inc(sk);
+@@ -2663,7 +2677,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int copied, err;
+- struct sockaddr_ll *sll;
+ int vnet_hdr_len = 0;
+
+ err = -EINVAL;
+@@ -2746,22 +2759,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out_free;
+ }
+
+- /*
+- * If the address length field is there to be filled in, we fill
+- * it in now.
++ /* You lose any data beyond the buffer you gave. If it worries
++ * a user program they can ask the device for its MTU
++ * anyway.
+ */
+-
+- sll = &PACKET_SKB_CB(skb)->sa.ll;
+- if (sock->type == SOCK_PACKET)
+- msg->msg_namelen = sizeof(struct sockaddr_pkt);
+- else
+- msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
+-
+- /*
+- * You lose any data beyond the buffer you gave. If it worries a
+- * user program they can ask the device for its MTU anyway.
+- */
+-
+ copied = skb->len;
+ if (copied > len) {
+ copied = len;
+@@ -2774,9 +2775,20 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+- if (msg->msg_name)
++ if (msg->msg_name) {
++ /* If the address length field is there to be filled
++ * in, we fill it in now.
++ */
++ if (sock->type == SOCK_PACKET) {
++ msg->msg_namelen = sizeof(struct sockaddr_pkt);
++ } else {
++ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
++ msg->msg_namelen = sll->sll_halen +
++ offsetof(struct sockaddr_ll, sll_addr);
++ }
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
+ msg->msg_namelen);
++ }
+
+ if (pkt_sk(sk)->auxdata) {
+ struct tpacket_auxdata aux;
+diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
+index bf35b4e..b25f2d3 100644
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+ MSG_CMSG_COMPAT))
+ goto out_nofree;
+
+- if (addr_len)
+- *addr_len = sizeof(sa);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &rval);
+ if (skb == NULL)
+ goto out_nofree;
+@@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+
+ rval = (flags & MSG_TRUNC) ? skb->len : copylen;
+
+- if (msg->msg_name != NULL)
+- memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
++ if (msg->msg_name != NULL) {
++ memcpy(msg->msg_name, &sa, sizeof(sa));
++ *addr_len = sizeof(sa);
++ }
+
+ out:
+ skb_free_datagram(sk, skb);
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index fc57d31..96a1239 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -410,8 +410,6 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
+- msg->msg_namelen = 0;
+-
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 1f96fb9..bf76dec7 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1221,7 +1221,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ {
+ struct sock *sk = sock->sk;
+ struct rose_sock *rose = rose_sk(sk);
+- struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
+ size_t copied;
+ unsigned char *asmptr;
+ struct sk_buff *skb;
+@@ -1257,8 +1256,11 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (srose != NULL) {
+- memset(srose, 0, msg->msg_namelen);
++ if (msg->msg_name) {
++ struct sockaddr_rose *srose;
++
++ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
++ srose = msg->msg_name;
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 4b48687..898492a 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -143,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ /* copy the peer address and timestamp */
+ if (!continue_call) {
+- if (msg->msg_name && msg->msg_namelen > 0)
++ if (msg->msg_name) {
++ size_t len =
++ sizeof(call->conn->trans->peer->srx);
+ memcpy(msg->msg_name,
+- &call->conn->trans->peer->srx,
+- sizeof(call->conn->trans->peer->srx));
++ &call->conn->trans->peer->srx, len);
++ msg->msg_namelen = len;
++ }
+ sock_recv_ts_and_drops(msg, &rx->sk, skb);
+ }
+
+diff --git a/net/socket.c b/net/socket.c
+index bf7adaa..d4faade 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -215,12 +215,13 @@ static int move_addr_to_user(struct sockaddr *kaddr, int klen,
+ int err;
+ int len;
+
++ BUG_ON(klen > sizeof(struct sockaddr_storage));
+ err = get_user(len, ulen);
+ if (err)
+ return err;
+ if (len > klen)
+ len = klen;
+- if (len < 0 || len > sizeof(struct sockaddr_storage))
++ if (len < 0)
+ return -EINVAL;
+ if (len) {
+ if (audit_sockaddr(klen, kaddr))
+@@ -1752,8 +1753,10 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ msg.msg_iov = &iov;
+ iov.iov_len = size;
+ iov.iov_base = ubuf;
+- msg.msg_name = (struct sockaddr *)&address;
+- msg.msg_namelen = sizeof(address);
++ /* Save some cycles and don't copy the address if not needed */
++ msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg.msg_namelen = 0;
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg, size, flags);
+@@ -1882,7 +1885,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+ }
+
+@@ -2142,18 +2145,16 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ goto out;
+ }
+
+- /*
+- * Save the user-mode address (verify_iovec will change the
+- * kernel msghdr to use the kernel address space)
++ /* Save the user-mode address (verify_iovec will change the
++ * kernel msghdr to use the kernel address space)
+ */
+-
+ uaddr = (__force void __user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+- if (MSG_CMSG_COMPAT & flags) {
++ if (MSG_CMSG_COMPAT & flags)
+ err = verify_compat_iovec(msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+- } else
++ else
+ err = verify_iovec(msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+@@ -2164,6 +2165,9 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ cmsg_ptr = (unsigned long)msg_sys->msg_control;
+ msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
+
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg_sys->msg_namelen = 0;
++
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 65fe23b..bfb78fa 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -390,8 +390,10 @@ static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen,
+ return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+ }
+
+-static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
++static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy)
+ {
++ ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
++ int offset, size_t size, int flags);
+ struct page **ppage;
+ unsigned int remainder;
+ int err, sent = 0;
+@@ -400,6 +402,9 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ base += xdr->page_base;
+ ppage = xdr->pages + (base >> PAGE_SHIFT);
+ base &= ~PAGE_MASK;
++ do_sendpage = sock->ops->sendpage;
++ if (!zerocopy)
++ do_sendpage = sock_no_sendpage;
+ for(;;) {
+ unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
+ int flags = XS_SENDMSG_FLAGS;
+@@ -407,7 +412,7 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ remainder -= len;
+ if (remainder != 0 || more)
+ flags |= MSG_MORE;
+- err = sock->ops->sendpage(sock, *ppage, base, len, flags);
++ err = do_sendpage(sock, *ppage, base, len, flags);
+ if (remainder == 0 || err != len)
+ break;
+ sent += err;
+@@ -428,9 +433,10 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ * @addrlen: UDP only -- length of destination address
+ * @xdr: buffer containing this request
+ * @base: starting position in the buffer
++ * @zerocopy: true if it is safe to use sendpage()
+ *
+ */
+-static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
++static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy)
+ {
+ unsigned int remainder = xdr->len - base;
+ int err, sent = 0;
+@@ -458,7 +464,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
+ if (base < xdr->page_len) {
+ unsigned int len = xdr->page_len - base;
+ remainder -= len;
+- err = xs_send_pagedata(sock, xdr, base, remainder != 0);
++ err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy);
+ if (remainder == 0 || err != len)
+ goto out;
+ sent += err;
+@@ -561,7 +567,7 @@ static int xs_local_send_request(struct rpc_task *task)
+ req->rq_svec->iov_base, req->rq_svec->iov_len);
+
+ status = xs_sendpages(transport->sock, NULL, 0,
+- xdr, req->rq_bytes_sent);
++ xdr, req->rq_bytes_sent, true);
+ dprintk("RPC: %s(%u) = %d\n",
+ __func__, xdr->len - req->rq_bytes_sent, status);
+ if (likely(status >= 0)) {
+@@ -617,7 +623,7 @@ static int xs_udp_send_request(struct rpc_task *task)
+ status = xs_sendpages(transport->sock,
+ xs_addr(xprt),
+ xprt->addrlen, xdr,
+- req->rq_bytes_sent);
++ req->rq_bytes_sent, true);
+
+ dprintk("RPC: xs_udp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+@@ -688,6 +694,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct xdr_buf *xdr = &req->rq_snd_buf;
++ bool zerocopy = true;
+ int status;
+
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
+@@ -695,13 +702,20 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base,
+ req->rq_svec->iov_len);
++ /* Don't use zero copy if this is a resend. If the RPC call
++ * completes while the socket holds a reference to the pages,
++ * then we may end up resending corrupted data.
++ */
++ if (task->tk_flags & RPC_TASK_SENT)
++ zerocopy = false;
+
+ /* Continue transmitting the packet/record. We must be careful
+ * to cope with writespace callbacks arriving _after_ we have
+ * called sendmsg(). */
+ while (1) {
+ status = xs_sendpages(transport->sock,
+- NULL, 0, xdr, req->rq_bytes_sent);
++ NULL, 0, xdr, req->rq_bytes_sent,
++ zerocopy);
+
+ dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index fdf34af..058941e 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -949,9 +949,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1078,9 +1075,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5122b22..9338ccc 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1744,7 +1744,6 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
+ {
+ struct unix_sock *u = unix_sk(sk);
+
+- msg->msg_namelen = 0;
+ if (u->addr) {
+ msg->msg_namelen = u->addr->len;
+ memcpy(msg->msg_name, u->addr->name, u->addr->len);
+@@ -1767,8 +1766,6 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err) {
+ err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+@@ -1902,8 +1899,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+- msg->msg_namelen = 0;
+-
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index dc24ba9..07b9973 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1343,10 +1343,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (sx25) {
+ sx25->sx25_family = AF_X25;
+ sx25->sx25_addr = x25->dest_addr;
++ msg->msg_namelen = sizeof(*sx25);
+ }
+
+- msg->msg_namelen = sizeof(struct sockaddr_x25);
+-
+ x25_check_rbuf(sk);
+ rc = copied;
+ out_free_dgram:
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 1126c10..5898f34 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -53,6 +53,7 @@
+ #include <net/icmp.h>
+ #include <net/ip.h> /* for local_port_range[] */
+ #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
++#include <net/inet_connection_sock.h>
+ #include <net/net_namespace.h>
+ #include <net/netlabel.h>
+ #include <linux/uaccess.h>
+@@ -3704,6 +3705,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+ return 0;
+ }
+
++/**
++ * selinux_conn_sid - Determine the child socket label for a connection
++ * @sk_sid: the parent socket's SID
++ * @skb_sid: the packet's SID
++ * @conn_sid: the resulting connection SID
++ *
++ * If @skb_sid is valid then the user:role:type information from @sk_sid is
++ * combined with the MLS information from @skb_sid in order to create
++ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
++ * of @sk_sid. Returns zero on success, negative values on failure.
++ *
++ */
++static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
++{
++ int err = 0;
++
++ if (skb_sid != SECSID_NULL)
++ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
++ else
++ *conn_sid = sk_sid;
++
++ return err;
++}
++
+ /* socket security operations */
+
+ static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+@@ -4295,7 +4320,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ struct sk_security_struct *sksec = sk->sk_security;
+ int err;
+ u16 family = sk->sk_family;
+- u32 newsid;
++ u32 connsid;
+ u32 peersid;
+
+ /* handle mapped IPv4 packets arriving via IPv6 sockets */
+@@ -4305,16 +4330,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ err = selinux_skb_peerlbl_sid(skb, family, &peersid);
+ if (err)
+ return err;
+- if (peersid == SECSID_NULL) {
+- req->secid = sksec->sid;
+- req->peer_secid = SECSID_NULL;
+- } else {
+- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
+- if (err)
+- return err;
+- req->secid = newsid;
+- req->peer_secid = peersid;
+- }
++ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
++ if (err)
++ return err;
++ req->secid = connsid;
++ req->peer_secid = peersid;
+
+ return selinux_netlbl_inet_conn_request(req, family);
+ }
+@@ -4542,6 +4562,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+ static unsigned int selinux_ip_output(struct sk_buff *skb,
+ u16 family)
+ {
++ struct sock *sk;
+ u32 sid;
+
+ if (!netlbl_enabled())
+@@ -4550,8 +4571,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
+ /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
+ * because we want to make sure we apply the necessary labeling
+ * before IPsec is applied so we can leverage AH protection */
+- if (skb->sk) {
+- struct sk_security_struct *sksec = skb->sk->sk_security;
++ sk = skb->sk;
++ if (sk) {
++ struct sk_security_struct *sksec;
++
++ if (sk->sk_state == TCP_LISTEN)
++ /* if the socket is the listening state then this
++ * packet is a SYN-ACK packet which means it needs to
++ * be labeled based on the connection/request_sock and
++ * not the parent socket. unfortunately, we can't
++ * lookup the request_sock yet as it isn't queued on
++ * the parent socket until after the SYN-ACK is sent.
++ * the "solution" is to simply pass the packet as-is
++ * as any IP option based labeling should be copied
++ * from the initial connection request (in the IP
++ * layer). it is far from ideal, but until we get a
++ * security label in the packet itself this is the
++ * best we can do. */
++ return NF_ACCEPT;
++
++ /* standard practice, label using the parent socket */
++ sksec = sk->sk_security;
+ sid = sksec->sid;
+ } else
+ sid = SECINITSID_KERNEL;
+@@ -4633,12 +4673,12 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+- /* if the packet is being forwarded then get the peer label from the
+- * packet itself; otherwise check to see if it is from a local
+- * application or the kernel, if from an application get the peer label
+- * from the sending socket, otherwise use the kernel's sid */
+ sk = skb->sk;
+ if (sk == NULL) {
++ /* Without an associated socket the packet is either coming
++ * from the kernel or it is being forwarded; check the packet
++ * to determine which and if the packet is being forwarded
++ * query the packet directly to determine the security label. */
+ if (skb->skb_iif) {
+ secmark_perm = PACKET__FORWARD_OUT;
+ if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
+@@ -4647,7 +4687,26 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ secmark_perm = PACKET__SEND;
+ peer_sid = SECINITSID_KERNEL;
+ }
++ } else if (sk->sk_state == TCP_LISTEN) {
++ /* Locally generated packet but the associated socket is in the
++ * listening state which means this is a SYN-ACK packet. In
++ * this particular case the correct security label is assigned
++ * to the connection/request_sock but unfortunately we can't
++ * query the request_sock as it isn't queued on the parent
++ * socket until after the SYN-ACK packet is sent; the only
++ * viable choice is to regenerate the label like we do in
++ * selinux_inet_conn_request(). See also selinux_ip_output()
++ * for similar problems. */
++ u32 skb_sid;
++ struct sk_security_struct *sksec = sk->sk_security;
++ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
++ return NF_DROP;
++ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
++ return NF_DROP;
++ secmark_perm = PACKET__SEND;
+ } else {
++ /* Locally generated packet, fetch the security label from the
++ * associated socket. */
+ struct sk_security_struct *sksec = sk->sk_security;
+ peer_sid = sksec->sid;
+ secmark_perm = PACKET__SEND;
+diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
+index da4b8b2..6235d05 100644
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+ sksec->nlbl_state != NLBL_CONNLABELED)
+ return 0;
+
+- local_bh_disable();
+- bh_lock_sock_nested(sk);
++ lock_sock(sk);
+
+ /* connected sockets are allowed to disconnect when the address family
+ * is set to AF_UNSPEC, if that is what is happening we want to reset
+@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+ sksec->nlbl_state = NLBL_CONNLABELED;
+
+ socket_connect_return:
+- bh_unlock_sock(sk);
+- local_bh_enable();
++ release_sock(sk);
+ return rc;
+ }
+diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
+index 946a0cb..e6ad8d4 100644
+--- a/sound/drivers/pcsp/pcsp.c
++++ b/sound/drivers/pcsp/pcsp.c
+@@ -187,8 +187,8 @@ static int __devinit pcsp_probe(struct platform_device *dev)
+ static int __devexit pcsp_remove(struct platform_device *dev)
+ {
+ struct snd_pcsp *chip = platform_get_drvdata(dev);
+- alsa_card_pcsp_exit(chip);
+ pcspkr_input_remove(chip->input_dev);
++ alsa_card_pcsp_exit(chip);
+ platform_set_drvdata(dev, NULL);
+ return 0;
+ }
+diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
+index 0961e2c..a9cc687 100644
+--- a/sound/isa/msnd/msnd_pinnacle.c
++++ b/sound/isa/msnd/msnd_pinnacle.c
+@@ -73,9 +73,11 @@
+ #ifdef MSND_CLASSIC
+ # include "msnd_classic.h"
+ # define LOGNAME "msnd_classic"
++# define DEV_NAME "msnd-classic"
+ #else
+ # include "msnd_pinnacle.h"
+ # define LOGNAME "snd_msnd_pinnacle"
++# define DEV_NAME "msnd-pinnacle"
+ #endif
+
+ static void __devinit set_default_audio_parameters(struct snd_msnd *chip)
+@@ -1068,8 +1070,6 @@ static int __devexit snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
+ return 0;
+ }
+
+-#define DEV_NAME "msnd-pinnacle"
+-
+ static struct isa_driver snd_msnd_driver = {
+ .match = snd_msnd_isa_match,
+ .probe = snd_msnd_isa_probe,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 984b5b1..843d9f3 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4610,6 +4610,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
+ .patch = patch_conexant_auto },
+ { .id = 0x14f15115, .name = "CX20757",
+ .patch = patch_conexant_auto },
++ { .id = 0x14f151d7, .name = "CX20952",
++ .patch = patch_conexant_auto },
+ {} /* terminator */
+ };
+
+@@ -4636,6 +4638,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15111");
+ MODULE_ALIAS("snd-hda-codec-id:14f15113");
+ MODULE_ALIAS("snd-hda-codec-id:14f15114");
+ MODULE_ALIAS("snd-hda-codec-id:14f15115");
++MODULE_ALIAS("snd-hda-codec-id:14f151d7");
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Conexant HD-audio codec");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 92c913d..1f78ca6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5943,6 +5943,7 @@ static int patch_alc662(struct hda_codec *codec)
+ case 0x10ec0272:
+ case 0x10ec0663:
+ case 0x10ec0665:
++ case 0x10ec0668:
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ break;
+ case 0x10ec0273:
+@@ -6019,6 +6020,7 @@ static int patch_alc680(struct hda_codec *codec)
+ */
+ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
++ { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
+ { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
+ { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
+ { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
+diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
+index 1c4999d..f2dac5c 100644
+--- a/sound/soc/codecs/ak4642.c
++++ b/sound/soc/codecs/ak4642.c
+@@ -214,7 +214,7 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
+ * This operation came from example code of
+ * "ASAHI KASEI AK4642" (japanese) manual p94.
+ */
+- snd_soc_write(codec, SG_SL1, PMMP | MGAIN0);
++ snd_soc_update_bits(codec, SG_SL1, PMMP | MGAIN0, PMMP | MGAIN0);
+ snd_soc_write(codec, TIMER, ZTM(0x3) | WTM(0x3));
+ snd_soc_write(codec, ALC_CTL1, ALC | LMTH0);
+ snd_soc_update_bits(codec, PW_MGMT1, PMVCM | PMADL,
+diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
+index a7c9ae1..6cfd4f7 100644
+--- a/sound/soc/codecs/wm8731.c
++++ b/sound/soc/codecs/wm8731.c
+@@ -392,10 +392,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ iface |= 0x0001;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+- iface |= 0x0003;
++ iface |= 0x0013;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+- iface |= 0x0013;
++ iface |= 0x0003;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
+index d29a962..cfa5bea 100644
+--- a/sound/soc/codecs/wm8990.c
++++ b/sound/soc/codecs/wm8990.c
+@@ -1266,6 +1266,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
+
+ /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
+ snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
++
++ codec->cache_sync = 1;
+ break;
+ }
+
+diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
+index c7dca7b..46a2816 100644
+--- a/sound/usb/6fire/chip.c
++++ b/sound/usb/6fire/chip.c
+@@ -102,7 +102,7 @@ static int __devinit usb6fire_chip_probe(struct usb_interface *intf,
+ usb_set_intfdata(intf, chips[i]);
+ mutex_unlock(&register_mutex);
+ return 0;
+- } else if (regidx < 0)
++ } else if (!devices[i] && regidx < 0)
+ regidx = i;
+ }
+ if (regidx < 0) {
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 533db33..5dbb35d 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -101,6 +101,10 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ while ((gfn << PAGE_SHIFT) & (page_size - 1))
+ page_size >>= 1;
+
++ /* Make sure hva is aligned to the page size we want to map */
++ while (gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
++ page_size >>= 1;
++
+ /*
+ * Pin all pages we are about to map in memory. This is
+ * important because we unmap and unpin in 4kb steps later.
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 8bf05f0..d83aa5e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -774,7 +774,7 @@ skip_lpage:
+ new.userspace_addr = mem->userspace_addr;
+ #endif /* not defined CONFIG_S390 */
+
+- if (!npages) {
++ if (!npages || base_gfn != old.base_gfn) {
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+@@ -788,8 +788,10 @@ skip_lpage:
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+- /* From this point no new shadow pages pointing to a deleted
+- * memslot will be created.
++ /* slot was deleted or moved, clear iommu mapping */
++ kvm_iommu_unmap_pages(kvm, &old);
++ /* From this point no new shadow pages pointing to a deleted,
++ * or moved, memslot will be created.
+ *
+ * validation of sp->gfn happens in:
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+@@ -803,14 +805,6 @@ skip_lpage:
+ if (r)
+ goto out_free;
+
+- /* map/unmap the pages in iommu page table */
+- if (npages) {
+- r = kvm_iommu_map_pages(kvm, &new);
+- if (r)
+- goto out_free;
+- } else
+- kvm_iommu_unmap_pages(kvm, &old);
+-
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+@@ -820,6 +814,13 @@ skip_lpage:
+ slots->nmemslots = mem->slot + 1;
+ slots->generation++;
+
++ /* map new memory slot into the iommu */
++ if (npages) {
++ r = kvm_iommu_map_pages(kvm, &new);
++ if (r)
++ goto out_slots;
++ }
++
+ /* actual memory is freed via old in kvm_free_physmem_slot below */
+ if (!npages) {
+ new.rmap = NULL;
+@@ -847,6 +848,8 @@ skip_lpage:
+
+ return 0;
+
++out_slots:
++ kfree(slots);
+ out_free:
+ kvm_free_physmem_slot(&new, &old);
+ out:
+@@ -1683,6 +1686,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ int r;
+ struct kvm_vcpu *vcpu, *v;
+
++ if (id >= KVM_MAX_VCPUS)
++ return -EINVAL;
++
+ vcpu = kvm_arch_vcpu_create(kvm, id);
+ if (IS_ERR(vcpu))
+ return PTR_ERR(vcpu);
diff --git a/3.2.54/4420_grsecurity-3.0-3.2.54-201401051649.patch b/3.2.54/4420_grsecurity-3.0-3.2.54-201401051649.patch
new file mode 100644
index 0000000..3e204e8
--- /dev/null
+++ b/3.2.54/4420_grsecurity-3.0-3.2.54-201401051649.patch
@@ -0,0 +1,117735 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index dfa6fc6..be27ac3 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -2,9 +2,11 @@
+ *.aux
+ *.bin
+ *.bz2
++*.c.[012]*.*
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -14,6 +16,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -48,14 +51,17 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -70,9 +76,11 @@ Kerntypes
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
++TRACEEVENT-CFLAGS
+ aconf
+ af_names.h
+ aic7*reg.h*
+@@ -81,6 +89,7 @@ aic7*seq.h*
+ aicasm
+ aicdb.h*
+ altivec*.c
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -93,19 +102,24 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+ config
+ config-*
+ config_data.h*
++config.c
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+@@ -116,9 +130,11 @@ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -126,12 +142,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -146,7 +165,7 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+@@ -154,7 +173,7 @@ kxgettext
+ lkc_defs.h
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -166,14 +185,15 @@ machtypes.h
+ map
+ map_hugetlb
+ maui_boot.h
+-media
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -209,6 +229,7 @@ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -218,7 +239,10 @@ series
+ setup
+ setup.bin
+ setup.elf
++signing_key*
++size_overflow_hash.h
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -229,6 +253,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -246,7 +271,9 @@ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -254,9 +281,12 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
++x509*
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 2ba8272..e2a9806 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -859,6 +859,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ gpt [EFI] Forces disk with valid GPT signature but
+ invalid Protective MBR to be treated as GPT.
+
++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++ ignore grsecurity's /proc restrictions
++
+ hashdist= [KNL,NUMA] Large hashes allocated during boot
+ are distributed across NUMA nodes. Defaults on
+ for 64-bit NUMA, off otherwise.
+@@ -1960,6 +1963,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_sanitize_slab=
++ 0/1 to disable/enable slab object sanitization (enabled by
++ default).
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
++ pax_extra_latent_entropy
++ Enable a very simple form of latent entropy extraction
++ from the first 4GB of memory as the bootmem allocator
++ passes the memory pages to the buddy allocator.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
+index 88fd7f5..b318a78 100644
+--- a/Documentation/sysctl/fs.txt
++++ b/Documentation/sysctl/fs.txt
+@@ -163,16 +163,22 @@ This value can be used to query and set the core dump mode for setuid
+ or otherwise protected/tainted binaries. The modes are
+
+ 0 - (default) - traditional behaviour. Any process which has changed
+- privilege levels or is execute only will not be dumped
++ privilege levels or is execute only will not be dumped.
+ 1 - (debug) - all processes dump core when possible. The core dump is
+ owned by the current user and no security is applied. This is
+ intended for system debugging situations only. Ptrace is unchecked.
++ This is insecure as it allows regular users to examine the memory
++ contents of privileged processes.
+ 2 - (suidsafe) - any binary which normally would not be dumped is dumped
+- readable by root only. This allows the end user to remove
+- such a dump but not access it directly. For security reasons
+- core dumps in this mode will not overwrite one another or
+- other files. This mode is appropriate when administrators are
+- attempting to debug problems in a normal environment.
++ anyway, but only if the "core_pattern" kernel sysctl is set to
++ either a pipe handler or a fully qualified path. (For more details
++ on this limitation, see CVE-2006-2451.) This mode is appropriate
++ when administrators are attempting to debug problems in a normal
++ environment, and either have a core dump pipe handler that knows
++ to treat privileged core dumps with care, or specific directory
++ defined for catching core dumps. If a core dump happens without
++ a pipe handler or fully qualifid path, a message will be emitted
++ to syslog warning about the lack of a correct setting.
+
+ ==============================================================
+
+diff --git a/Makefile b/Makefile
+index 848be26..3deab0e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
+
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
+ # Rules shared between *config targets and build targets
+
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic gcc-plugins
++scripts_basic: gcc-plugins
+ $(Q)$(MAKE) $(build)=scripts/basic
+ $(Q)rm -f .tmp_quiet_recordmcount
+
+@@ -564,6 +565,65 @@ else
+ KBUILD_CFLAGS += -O2
+ endif
+
++ifndef DISABLE_PAX_PLUGINS
++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
++else
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
++endif
++ifneq ($(PLUGINCC),)
++ifdef CONFIG_PAX_CONSTIFY_PLUGIN
++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STACKLEAK
++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
++endif
++ifdef CONFIG_KALLOCSTAT_PLUGIN
++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
++endif
++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
++endif
++ifdef CONFIG_CHECKER_PLUGIN
++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
++endif
++endif
++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
++ifdef CONFIG_PAX_SIZE_OVERFLOW
++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++endif
++ifdef CONFIG_PAX_LATENT_ENTROPY
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
++STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
++endif
++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
++export PLUGINCC CONSTIFY_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
++gcc-plugins:
++ $(Q)$(MAKE) $(build)=tools/gcc
++else
++gcc-plugins: ;
++endif
++else
++gcc-plugins:
++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
++else
++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
++endif
++ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
++endif
++endif
++
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+
+ ifneq ($(CONFIG_FRAME_WARN),0)
+@@ -708,7 +768,7 @@ export mod_strip_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -932,6 +992,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+
+ # The actual objects are generated when descending,
+ # make sure no implicit rule kicks in
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+
+ # Handle descending into subdirectories listed in $(vmlinux-dirs)
+@@ -941,7 +1003,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+ # Error messages still appears in the original language
+
+ PHONY += $(vmlinux-dirs)
+-$(vmlinux-dirs): prepare scripts
++$(vmlinux-dirs): gcc-plugins prepare scripts
+ $(Q)$(MAKE) $(build)=$@
+
+ # Store (new) KERNELRELASE string in include/config/kernel.release
+@@ -985,6 +1047,7 @@ prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=.
+
+ # All the preparing..
++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ prepare: prepare0
+
+ # Generate some files
+@@ -1089,6 +1152,8 @@ all: modules
+ # using awk while concatenating to the final file.
+
+ PHONY += modules
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+ @$(kecho) ' Building modules, stage 2.';
+@@ -1104,7 +1169,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+
+ # Target to prepare building external modules
+ PHONY += modules_prepare
+-modules_prepare: prepare scripts
++modules_prepare: gcc-plugins prepare scripts
+
+ # Target to install modules
+ PHONY += modules_install
+@@ -1163,7 +1228,7 @@ CLEAN_FILES += vmlinux System.map \
+ MRPROPER_DIRS += include/config usr/include include/generated \
+ arch/*/include/generated
+ MRPROPER_FILES += .config .config.old .version .old_version \
+- include/linux/version.h \
++ include/linux/version.h tools/gcc/size_overflow_hash.h\
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
+
+ # clean - Delete most, but leave enough to build external modules
+@@ -1201,6 +1266,7 @@ distclean: mrproper
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+ -o -name '.*.rej' \
++ -o -name '.*.rej' -o -name '*.so' \
+ -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+@@ -1361,6 +1427,8 @@ PHONY += $(module-dirs) modules
+ $(module-dirs): crmodverdir $(objtree)/Module.symvers
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
+
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(module-dirs)
+ @$(kecho) ' Building modules, stage 2.';
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+@@ -1487,17 +1555,21 @@ else
+ target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+ endif
+
+-%.s: %.c prepare scripts FORCE
++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.s: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.i: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.c prepare scripts FORCE
++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.o: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.lst: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.s: %.S prepare scripts FORCE
++%.s: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.S prepare scripts FORCE
++%.o: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.symtypes: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+@@ -1507,11 +1579,15 @@ endif
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%/: prepare scripts FORCE
++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%/: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%.ko: prepare scripts FORCE
++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.ko: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir) $(@:.ko=.o)
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 4b0669c..7389b35 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -181,4 +181,28 @@ config HAVE_RCU_TABLE_FREE
+ config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ bool
+
++config HAVE_ARCH_SECCOMP_FILTER
++ bool
++ help
++ An arch should select this symbol if it provides all of these things:
++ - syscall_get_arch()
++ - syscall_get_arguments()
++ - syscall_rollback()
++ - syscall_set_return_value()
++ - SIGSYS siginfo_t support
++ - uses __secure_computing_int() or secure_computing()
++ - secure_computing is called from a ptrace_event()-safe context
++ - secure_computing return value is checked and a return value of -1
++ results in the system call being skipped immediately.
++
++config SECCOMP_FILTER
++ def_bool y
++ depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
++ help
++ Enable tasks to build secure computing environments defined
++ in terms of Berkeley Packet Filter programs which implement
++ task-defined system call filtering polices.
++
++ See Documentation/prctl/seccomp_filter.txt for details.
++
+ source "kernel/gcov/Kconfig"
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 6f1aca7..fa956e0 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
+
++#include <linux/const.h>
+
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES 64
+ # define L1_CACHE_SHIFT 6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+ direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES 32
+ # define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #endif
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index da5449e..7418343 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index bc2a0da..8ad11ee 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ pgd_set(pgd, pmd);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index de98a73..bd4f1f8 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 2fd00b7..cfd5069 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index 01e8715..6a5a03b 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1138,16 +1138,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+- unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long limit, unsigned long flags)
+ {
+ struct vm_area_struct *vma = find_vma(current->mm, addr);
+-
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ while (1) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+@@ -1183,20 +1183,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+
+ return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index fadd5f8..904e73a 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index b7c5d5d..4b0c4ed 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -231,7 +231,7 @@
+ */
+ #ifdef CONFIG_THUMB2_KERNEL
+
+- .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
++ .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
+ 9999:
+ .if \inc == 1
+ \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+@@ -271,7 +271,7 @@
+
+ #else /* !CONFIG_THUMB2_KERNEL */
+
+- .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
++ .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
+ .rept \rept
+ 9999:
+ .if \inc == 1
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index 86976d0..269b872 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -15,6 +15,10 @@
+ #include <linux/types.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ #ifdef __KERNEL__
+@@ -25,7 +29,15 @@
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic_set(v,i) (((v)->counter) = (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
++"1: ldrex %1, [%3]\n"
++" adds %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_add_unchecked\n"
+ "1: ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic_add_return\n"
++"1: ldrex %1, [%3]\n"
++" adds %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
+ "1: ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
++"1: ldrex %1, [%3]\n"
++" subs %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_sub_unchecked\n"
+ "1: ldrex %0, [%3]\n"
+ " sub %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic_sub_return\n"
+-"1: ldrex %0, [%3]\n"
+-" sub %0, %0, %4\n"
++"1: ldrex %1, [%3]\n"
++" subs %0, %1, %4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strex %1, %0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "strexeq %0, %5, [%3]\n"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+ unsigned long tmp, tmp2;
+@@ -165,7 +307,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
+
+ return val;
+ }
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_return(i, v);
++}
++
+ #define atomic_add(i, v) (void) atomic_add_return(i, v)
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ (void) atomic_add_return(i, v);
++}
+
+ static inline int atomic_sub_return(int i, atomic_t *v)
+ {
+@@ -180,6 +332,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ return val;
+ }
+ #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ (void) atomic_sub_return(i, v);
++}
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+@@ -195,6 +351,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return ret;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return atomic_cmpxchg(v, old, new);
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+ unsigned long flags;
+@@ -207,6 +368,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ #endif /* __LINUX_ARM_ARCH__ */
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+@@ -219,11 +384,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ }
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+@@ -239,6 +420,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ static inline u64 atomic64_read(atomic64_t *v)
+@@ -254,6 +443,19 @@ static inline u64 atomic64_read(atomic64_t *v)
+ return result;
+ }
+
++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ u64 result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrexd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, u64 i)
+ {
+ u64 tmp;
+@@ -268,6 +470,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+ : "cc");
+ }
+
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
++{
++ u64 tmp;
++
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1: ldrexd %0, %H0, [%2]\n"
++" strexd %0, %3, %H3, [%2]\n"
++" teq %0, #0\n"
++" bne 1b"
++ : "=&r" (tmp), "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
+ static inline void atomic64_add(u64 i, atomic64_t *v)
+ {
+ u64 result;
+@@ -276,6 +492,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+ __asm__ __volatile__("@ atomic64_add\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " adds %0, %0, %4\n"
++" adcs %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
++static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ __asm__ __volatile__("@ atomic64_add_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" adds %0, %0, %4\n"
+ " adc %H0, %H0, %H4\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+@@ -287,12 +533,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+
+ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+ {
+- u64 result;
+- unsigned long tmp;
++ u64 result, tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_add_return\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" adds %0, %1, %4\n"
++" adcs %H0, %H1, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++" mov %H0, %H1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " adds %0, %0, %4\n"
+ " adc %H0, %H0, %H4\n"
+@@ -316,6 +599,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+ __asm__ __volatile__("@ atomic64_sub\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+ " subs %0, %0, %4\n"
++" sbcs %H0, %H0, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strexd %1, %0, %H0, [%3]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
++
++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
++{
++ u64 result;
++ unsigned long tmp;
++
++ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
++"1: ldrexd %0, %H0, [%3]\n"
++" subs %0, %0, %4\n"
+ " sbc %H0, %H0, %H4\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+@@ -327,18 +640,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+
+ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+ {
+- u64 result;
+- unsigned long tmp;
++ u64 result, tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_sub_return\n"
+-"1: ldrexd %0, %H0, [%3]\n"
+-" subs %0, %0, %4\n"
+-" sbc %H0, %H0, %H4\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" subs %0, %1, %4\n"
++" sbcs %H0, %H1, %H4\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++" mov %H0, %H1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+@@ -372,6 +699,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+ return oldval;
+ }
+
++static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
++{
++ u64 oldval;
++ unsigned long res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
++ "ldrexd %1, %H1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "teqeq %H1, %H4\n"
++ "strexdeq %0, %5, %H5, [%3]"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "r" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ {
+ u64 result;
+@@ -395,21 +746,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+
+ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+ {
+- u64 result;
+- unsigned long tmp;
++ u64 result, tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+-"1: ldrexd %0, %H0, [%3]\n"
+-" subs %0, %0, #1\n"
+-" sbc %H0, %H0, #0\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" subs %0, %1, #1\n"
++" sbcs %H0, %H1, #0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++" mov %H0, %H1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " teq %H0, #0\n"
+-" bmi 2f\n"
++" bmi 4f\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+@@ -432,13 +796,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+ " teq %0, %5\n"
+ " teqeq %H0, %H5\n"
+ " moveq %1, #0\n"
+-" beq 2f\n"
++" beq 4f\n"
+ " adds %0, %0, %6\n"
+-" adc %H0, %H0, %H6\n"
++" adcs %H0, %H0, %H6\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strexd %2, %0, %H0, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -451,10 +827,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v) atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v) atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..2255c86 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 1397408..c4f6969 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -108,7 +108,7 @@ struct cpu_cache_fns {
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const;
+
+ /*
+ * Select the calling method
+diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
+index af18cea..b5dc173 100644
+--- a/arch/arm/include/asm/domain.h
++++ b/arch/arm/include/asm/domain.h
+@@ -83,9 +83,9 @@
+ * instructions (inline assembly)
+ */
+ #ifdef CONFIG_CPU_USE_DOMAINS
+-#define T(instr) #instr "t"
++#define TUSER(instr) #instr "t"
+ #else
+-#define T(instr) #instr
++#define TUSER(instr) #instr
+ #endif
+
+ #else /* __ASSEMBLY__ */
+@@ -95,9 +95,9 @@
+ * instructions
+ */
+ #ifdef CONFIG_CPU_USE_DOMAINS
+-#define T(instr) instr ## t
++#define TUSER(instr) instr ## t
+ #else
+-#define T(instr) instr
++#define TUSER(instr) instr
+ #endif
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index 0e9ce8d..6ef1e03 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ extern void elf_set_personality(const struct elf32_hdr *);
+ #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ extern int vectors_user_mapping(void);
+ #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 253cc86..7be5469 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -75,9 +75,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+
+ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+ __asm__ __volatile__( \
+- "1: " T(ldr) " %1, [%3]\n" \
++ "1: " TUSER(ldr) " %1, [%3]\n" \
+ " " insn "\n" \
+- "2: " T(str) " %0, [%3]\n" \
++ "2: " TUSER(str) " %0, [%3]\n" \
+ " mov %0, #0\n" \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
+@@ -95,10 +95,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+- "1: " T(ldr) " %1, [%4]\n"
++ "1: " TUSER(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+ " it eq @ explicit IT needed for the 2b label\n"
+- "2: " T(streq) " %3, [%4]\n"
++ "2: " TUSER(streq) " %3, [%4]\n"
+ __futex_atomic_ex_table("%5")
+ : "+r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index e51b1e8..32a3113 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -21,6 +21,7 @@ enum km_type {
+ KM_L1_CACHE,
+ KM_L2_CACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index 53426c6..c7baff3 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -35,7 +35,7 @@ struct outer_cache_fns {
+ #endif
+ void (*set_debug)(unsigned long);
+ void (*resume)(void);
+-};
++} __no_const;
+
+ #ifdef CONFIG_OUTER_CACHE
+
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index ca94653..6ac0d56 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -123,7 +123,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
+index 3e08fd3..3f14f89 100644
+--- a/arch/arm/include/asm/pgalloc.h
++++ b/arch/arm/include/asm/pgalloc.h
+@@ -31,6 +31,7 @@
+ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd) do { } while (0)
+ #define pgd_populate(mm,pmd,pte) BUG()
++#define pgd_populate_kernel(mm,pmd,pte) BUG()
+
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
+index 96187ff..7a9b049 100644
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -72,7 +72,7 @@
+ * ARMv7 groups of PSR bits
+ */
+ #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
+ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
+
+diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
+index 984014b..a6d914f 100644
+--- a/arch/arm/include/asm/system.h
++++ b/arch/arm/include/asm/system.h
+@@ -90,6 +90,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
+
+ #define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
++#define xchg_unchecked(ptr,x) \
++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+ extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+@@ -101,7 +103,7 @@ extern int __pure cpu_architecture(void);
+ extern void cpu_init(void);
+
+ void arm_machine_restart(char mode, const char *cmd);
+-extern void (*arm_pm_restart)(char str, const char *cmd);
++extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
+
+ #define UDBG_UNDEFINED (1 << 0)
+ #define UDBG_SYSCALL (1 << 1)
+@@ -526,6 +528,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
+
+ #endif /* __LINUX_ARM_ARCH__ >= 6 */
+
++#define _ASM_EXTABLE(from, to) \
++" .pushsection __ex_table,\"a\"\n"\
++" .align 3\n" \
++" .long " #from ", " #to"\n" \
++" .popsection"
++
++
+ #endif /* __ASSEMBLY__ */
+
+ #define arch_align_stack(x) (x)
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 7b5cc8d..5d70d88 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -139,6 +139,12 @@ extern void vfp_flush_hwstate(struct thread_info *);
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+ #define TIF_SYSCALL_TRACE 8
++
++/* within 8 bits of TIF_SYSCALL_TRACE
++ to meet flexible second operand requirements
++*/
++#define TIF_GRSEC_SETXID 9
++
+ #define TIF_POLLING_NRFLAG 16
+ #define TIF_USING_IWMMXT 17
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+@@ -155,6 +161,10 @@ extern void vfp_flush_hwstate(struct thread_info *);
+ #define _TIF_FREEZE (1 << TIF_FREEZE)
+ #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
++
++/* Checks for any syscall work in entry-common.S */
++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_GRSEC_SETXID)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 292c3f8..47aa55e 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -241,7 +241,7 @@ do { \
+
+ #define __get_user_asm_byte(x,addr,err) \
+ __asm__ __volatile__( \
+- "1: " T(ldrb) " %1,[%2],#0\n" \
++ "1: " TUSER(ldrb) " %1,[%2],#0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+@@ -277,7 +277,7 @@ do { \
+
+ #define __get_user_asm_word(x,addr,err) \
+ __asm__ __volatile__( \
+- "1: " T(ldr) " %1,[%2],#0\n" \
++ "1: " TUSER(ldr) " %1,[%2],#0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+@@ -322,7 +322,7 @@ do { \
+
+ #define __put_user_asm_byte(x,__pu_addr,err) \
+ __asm__ __volatile__( \
+- "1: " T(strb) " %1,[%2],#0\n" \
++ "1: " TUSER(strb) " %1,[%2],#0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+@@ -355,7 +355,7 @@ do { \
+
+ #define __put_user_asm_word(x,__pu_addr,err) \
+ __asm__ __volatile__( \
+- "1: " T(str) " %1,[%2],#0\n" \
++ "1: " TUSER(str) " %1,[%2],#0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+@@ -380,10 +380,10 @@ do { \
+
+ #define __put_user_asm_dword(x,__pu_addr,err) \
+ __asm__ __volatile__( \
+- ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
+- ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
+- THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
+- THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
++ ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
++ ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
++ THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
++ THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
+ "3:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+@@ -401,8 +401,21 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ check_object_size(to, n, false);
++ return ___copy_from_user(to, from, n);
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ check_object_size(from, n, true);
++ return ___copy_to_user(to, from, n);
++}
++
+ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+@@ -417,6 +430,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -426,6 +442,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 5b0bce6..becd81c 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
+ EXPORT_SYMBOL(__clear_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index b2a27b6..520889c 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -87,7 +87,7 @@ ENTRY(ret_from_fork)
+ get_thread_info tsk
+ ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
+ mov why, #1
+- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
++ tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ beq ret_slow_syscall
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+@@ -443,7 +443,7 @@ ENTRY(vector_swi)
+ 1:
+ #endif
+
+- tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
++ tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ bne __sys_trace
+
+ cmp scno, #NR_syscalls @ check upper syscall limit
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 3606e85..44ba19d 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -46,7 +46,9 @@
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
+
+ .macro pgtbl, rd, phys
+- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
++ mov \rd, #TEXT_OFFSET
++ sub \rd, #PG_DIR_SIZE
++ add \rd, \rd, \phys
+ .endm
+
+ #ifdef CONFIG_XIP_KERNEL
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 2bc1a8e..f433c88 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -986,7 +986,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata dbg_reset_nb = {
++static struct notifier_block dbg_reset_nb = {
+ .notifier_call = dbg_reset_notify,
+ };
+
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index 1e9be5d..4e0f470 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -39,6 +39,8 @@
+ #ifdef CONFIG_MMU
+ void *module_alloc(unsigned long size)
+ {
++ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
++ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+ __builtin_return_address(0));
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index d9e3c61..9cf9513 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -28,7 +28,6 @@
+ #include <linux/tick.h>
+ #include <linux/utsname.h>
+ #include <linux/uaccess.h>
+-#include <linux/random.h>
+ #include <linux/hw_breakpoint.h>
+ #include <linux/cpuidle.h>
+
+@@ -92,7 +91,7 @@ static int __init hlt_setup(char *__unused)
+ __setup("nohlt", nohlt_setup);
+ __setup("hlt", hlt_setup);
+
+-void arm_machine_restart(char mode, const char *cmd)
++__noreturn void arm_machine_restart(char mode, const char *cmd)
+ {
+ /* Disable interrupts first */
+ local_irq_disable();
+@@ -135,7 +134,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
++void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
+ EXPORT_SYMBOL_GPL(arm_pm_restart);
+
+ static void do_nothing(void *unused)
+@@ -250,6 +249,7 @@ void machine_power_off(void)
+ machine_shutdown();
+ if (pm_power_off)
+ pm_power_off();
++ BUG();
+ }
+
+ void machine_restart(char *cmd)
+@@ -268,8 +268,8 @@ void __show_regs(struct pt_regs *regs)
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+- print_symbol("PC is at %s\n", instruction_pointer(regs));
+- print_symbol("LR is at %s\n", regs->ARM_lr);
++ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
++ printk("LR is at %pA\n", (void *)regs->ARM_lr);
+ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -489,12 +489,6 @@ unsigned long get_wchan(struct task_struct *p)
+ return 0;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ #ifdef CONFIG_MMU
+ /*
+ * The vectors page is always readable from user space for the
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index 90fa8b3..a3a2212 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -904,10 +904,19 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
+ {
+ unsigned long ip;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return scno;
+ if (!(current->ptrace & PT_PTRACED))
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 7ac5dfd..0ce09c2 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
+
+ static DEFINE_RAW_SPINLOCK(die_lock);
+
++extern void gr_handle_kernel_exploit(void);
++
+ /*
+ * This function is protected against re-entrancy.
+ */
+@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+ }
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index 20b3041..da44b1f 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -103,6 +103,8 @@ SECTIONS
+ ARM_CPU_KEEP(PROC_INFO)
+ }
+
++ _etext = .; /* End of text section */
++
+ RO_DATA(PAGE_SIZE)
+
+ #ifdef CONFIG_ARM_UNWIND
+@@ -122,8 +124,6 @@ SECTIONS
+ }
+ #endif
+
+- _etext = .; /* End of text and rodata section */
+-
+ #ifndef CONFIG_XIP_KERNEL
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index 66a477a..bee61d3 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_from_user(void *to, const void *from, size_t n)
++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -84,11 +84,11 @@
+
+ .text
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
+index d066df6..df28194 100644
+--- a/arch/arm/lib/copy_to_user.S
++++ b/arch/arm/lib/copy_to_user.S
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_to_user(void *to, const void *from, size_t n)
++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -88,11 +88,11 @@
+ .text
+
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+ ENDPROC(__copy_to_user_std)
+
+ .pushsection .fixup,"ax"
+diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
+index 4306fbf..9b06bb4 100644
+--- a/arch/arm/lib/getuser.S
++++ b/arch/arm/lib/getuser.S
+@@ -34,7 +34,7 @@
+
+ ENTRY(__get_user_1)
+ check_uaccess r0, 1, r1, r2, __get_user_bad
+-1: T(ldrb) r2, [r0]
++1: TUSER(ldrb) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__get_user_1)
+@@ -61,7 +61,7 @@ ENDPROC(__get_user_2)
+
+ ENTRY(__get_user_4)
+ check_uaccess r0, 4, r1, r2, __get_user_bad
+-4: T(ldr) r2, [r0]
++4: TUSER(ldr) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__get_user_4)
+diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
+index 9a897fa..3d73dcb 100644
+--- a/arch/arm/lib/putuser.S
++++ b/arch/arm/lib/putuser.S
+@@ -34,7 +34,7 @@
+
+ ENTRY(__put_user_1)
+ check_uaccess r0, 1, r1, ip, __put_user_bad
+-1: T(strb) r2, [r0]
++1: TUSER(strb) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__put_user_1)
+@@ -44,19 +44,19 @@ ENTRY(__put_user_2)
+ mov ip, r2, lsr #8
+ #ifdef CONFIG_THUMB2_KERNEL
+ #ifndef __ARMEB__
+-2: T(strb) r2, [r0]
+-3: T(strb) ip, [r0, #1]
++2: TUSER(strb) r2, [r0]
++3: TUSER(strb) ip, [r0, #1]
+ #else
+-2: T(strb) ip, [r0]
+-3: T(strb) r2, [r0, #1]
++2: TUSER(strb) ip, [r0]
++3: TUSER(strb) r2, [r0, #1]
+ #endif
+ #else /* !CONFIG_THUMB2_KERNEL */
+ #ifndef __ARMEB__
+-2: T(strb) r2, [r0], #1
+-3: T(strb) ip, [r0]
++2: TUSER(strb) r2, [r0], #1
++3: TUSER(strb) ip, [r0]
+ #else
+-2: T(strb) ip, [r0], #1
+-3: T(strb) r2, [r0]
++2: TUSER(strb) ip, [r0], #1
++3: TUSER(strb) r2, [r0]
+ #endif
+ #endif /* CONFIG_THUMB2_KERNEL */
+ mov r0, #0
+@@ -65,7 +65,7 @@ ENDPROC(__put_user_2)
+
+ ENTRY(__put_user_4)
+ check_uaccess r0, 4, r1, ip, __put_user_bad
+-4: T(str) r2, [r0]
++4: TUSER(str) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__put_user_4)
+@@ -73,11 +73,11 @@ ENDPROC(__put_user_4)
+ ENTRY(__put_user_8)
+ check_uaccess r0, 8, r1, ip, __put_user_bad
+ #ifdef CONFIG_THUMB2_KERNEL
+-5: T(str) r2, [r0]
+-6: T(str) r3, [r0, #4]
++5: TUSER(str) r2, [r0]
++6: TUSER(str) r3, [r0, #4]
+ #else
+-5: T(str) r2, [r0], #4
+-6: T(str) r3, [r0]
++5: TUSER(str) r2, [r0], #4
++6: TUSER(str) r3, [r0]
+ #endif
+ mov r0, #0
+ mov pc, lr
+diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
+index d0ece2a..e712687 100644
+--- a/arch/arm/lib/uaccess.S
++++ b/arch/arm/lib/uaccess.S
+@@ -20,7 +20,7 @@
+
+ #define PAGE_SHIFT 12
+
+-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+@@ -32,15 +32,15 @@
+ rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+-USER( T(strb) r3, [r0], #1) @ May fault
++USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+-USER( T(strgeb) r3, [r0], #1) @ May fault
++USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+-USER( T(strgtb) r3, [r0], #1) @ May fault
++USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+-ENTRY(__copy_to_user)
++ENTRY(___copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+@@ -59,7 +59,7 @@ ENTRY(__copy_to_user)
+ addmi ip, r2, #4
+ bmi .Lc2u_0nowords
+ ldr r3, [r1], #4
+-USER( T(str) r3, [r0], #4) @ May fault
++USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+@@ -88,18 +88,18 @@ USER( T(str) r3, [r0], #4) @ May fault
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ ldrne r3, [r1], #4
+- T(strne) r3, [r0], #4 @ Shouldnt fault
++ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_0fupi
+ .Lc2u_0nowords: teq ip, #0
+ beq .Lc2u_finished
+ .Lc2u_nowords: cmp ip, #2
+ ldrb r3, [r1], #1
+-USER( T(strb) r3, [r0], #1) @ May fault
++USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+-USER( T(strgeb) r3, [r0], #1) @ May fault
++USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+-USER( T(strgtb) r3, [r0], #1) @ May fault
++USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+ .Lc2u_not_enough:
+@@ -120,7 +120,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
+ mov r3, r7, pull #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #24
+-USER( T(str) r3, [r0], #4) @ May fault
++USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+@@ -155,18 +155,18 @@ USER( T(str) r3, [r0], #4) @ May fault
+ movne r3, r7, pull #8
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #24
+- T(strne) r3, [r0], #4 @ Shouldnt fault
++ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_1fupi
+ .Lc2u_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+-USER( T(strb) r3, [r0], #1) @ May fault
++USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_2
+-USER( T(strgeb) r3, [r0], #1) @ May fault
++USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ movgt r3, r7, get_byte_3
+-USER( T(strgtb) r3, [r0], #1) @ May fault
++USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+ .Lc2u_2fupi: subs r2, r2, #4
+@@ -175,7 +175,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
+ mov r3, r7, pull #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #16
+-USER( T(str) r3, [r0], #4) @ May fault
++USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+@@ -210,18 +210,18 @@ USER( T(str) r3, [r0], #4) @ May fault
+ movne r3, r7, pull #16
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #16
+- T(strne) r3, [r0], #4 @ Shouldnt fault
++ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_2fupi
+ .Lc2u_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+-USER( T(strb) r3, [r0], #1) @ May fault
++USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_3
+-USER( T(strgeb) r3, [r0], #1) @ May fault
++USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+-USER( T(strgtb) r3, [r0], #1) @ May fault
++USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+ .Lc2u_3fupi: subs r2, r2, #4
+@@ -230,7 +230,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
+ mov r3, r7, pull #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #8
+-USER( T(str) r3, [r0], #4) @ May fault
++USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+@@ -265,27 +265,27 @@ USER( T(str) r3, [r0], #4) @ May fault
+ movne r3, r7, pull #24
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #8
+- T(strne) r3, [r0], #4 @ Shouldnt fault
++ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_3fupi
+ .Lc2u_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+-USER( T(strb) r3, [r0], #1) @ May fault
++USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+-USER( T(strgeb) r3, [r0], #1) @ May fault
++USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+-USER( T(strgtb) r3, [r0], #1) @ May fault
++USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ 9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+@@ -295,16 +295,16 @@ ENDPROC(__copy_to_user)
+ .Lcfu_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+-USER( T(ldrb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+-USER( T(ldrgeb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+-USER( T(ldrgtb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+@@ -322,7 +322,7 @@ ENTRY(__copy_from_user)
+ .Lcfu_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_0nowords
+-USER( T(ldr) r3, [r1], #4)
++USER( TUSER( ldr) r3, [r1], #4)
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+@@ -351,18 +351,18 @@ USER( T(ldr) r3, [r1], #4)
+ ldmneia r1!, {r3 - r4} @ Shouldnt fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+- T(ldrne) r3, [r1], #4 @ Shouldnt fault
++ TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_0fupi
+ .Lcfu_0nowords: teq ip, #0
+ beq .Lcfu_finished
+ .Lcfu_nowords: cmp ip, #2
+-USER( T(ldrb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+-USER( T(ldrgeb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+-USER( T(ldrgtb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+@@ -375,7 +375,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
+
+ .Lcfu_src_not_aligned:
+ bic r1, r1, #3
+-USER( T(ldr) r7, [r1], #4) @ May fault
++USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ cmp ip, #2
+ bgt .Lcfu_3fupi
+ beq .Lcfu_2fupi
+@@ -383,7 +383,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
+ addmi ip, r2, #4
+ bmi .Lcfu_1nowords
+ mov r3, r7, pull #8
+-USER( T(ldr) r7, [r1], #4) @ May fault
++USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #24
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+@@ -418,7 +418,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #8
+-USER( T(ldrne) r7, [r1], #4) @ May fault
++USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #24
+ strne r3, [r0], #4
+ ands ip, ip, #3
+@@ -438,7 +438,7 @@ USER( T(ldrne) r7, [r1], #4) @ May fault
+ addmi ip, r2, #4
+ bmi .Lcfu_2nowords
+ mov r3, r7, pull #16
+-USER( T(ldr) r7, [r1], #4) @ May fault
++USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #16
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+@@ -474,7 +474,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #16
+-USER( T(ldrne) r7, [r1], #4) @ May fault
++USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #16
+ strne r3, [r0], #4
+ ands ip, ip, #3
+@@ -486,7 +486,7 @@ USER( T(ldrne) r7, [r1], #4) @ May fault
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_3
+ strgeb r3, [r0], #1
+-USER( T(ldrgtb) r3, [r1], #0) @ May fault
++USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+@@ -494,7 +494,7 @@ USER( T(ldrgtb) r3, [r1], #0) @ May fault
+ addmi ip, r2, #4
+ bmi .Lcfu_3nowords
+ mov r3, r7, pull #24
+-USER( T(ldr) r7, [r1], #4) @ May fault
++USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #8
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+@@ -529,7 +529,7 @@ USER( T(ldr) r7, [r1], #4) @ May fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #24
+-USER( T(ldrne) r7, [r1], #4) @ May fault
++USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #8
+ strne r3, [r0], #4
+ ands ip, ip, #3
+@@ -539,12 +539,12 @@ USER( T(ldrne) r7, [r1], #4) @ May fault
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+-USER( T(ldrgeb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+-USER( T(ldrgtb) r3, [r1], #1) @ May fault
++USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 025f742..8432b08 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -104,7 +104,7 @@ out:
+ }
+
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ /*
+ * This test is stubbed out of the main function above to keep
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index e9d5f4a..f099699 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -593,7 +593,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+ }
+ #endif
+
+-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+ .late_init = n8x0_menelaus_late_init,
+ };
+
+diff --git a/arch/arm/mach-omap2/smartreflex.h b/arch/arm/mach-omap2/smartreflex.h
+index 5f35b9e..6d09f99 100644
+--- a/arch/arm/mach-omap2/smartreflex.h
++++ b/arch/arm/mach-omap2/smartreflex.h
+@@ -183,7 +183,7 @@ struct omap_sr_class_data {
+ int (*notify)(struct voltagedomain *voltdm, u32 status);
+ u8 notify_flags;
+ u8 class_type;
+-};
++} __do_const;
+
+ /**
+ * struct omap_sr_nvalue_table - Smartreflex n-target value info
+diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
+index 2b2d51c..0127490 100644
+--- a/arch/arm/mach-ux500/mbox-db5500.c
++++ b/arch/arm/mach-ux500/mbox-db5500.c
+@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
+ return sprintf(buf, "0x%X\n", mbox_value);
+ }
+
+-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
++static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
+
+ static int mbox_show(struct seq_file *s, void *data)
+ {
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 4b0bc37..d556b08 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -386,6 +386,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+@@ -630,6 +657,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fsr_fs(ifsr) == 2) {
++ unsigned int bkpt;
++
++ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
++ current->thread.error_code = ifsr;
++ current->thread.trap_no = 0;
++ pax_report_refcount_overflow(regs);
++ fixup_exception(regs);
++ return;
++ }
++ }
++#endif
++
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 44b628e..be706ee 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -33,6 +33,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long start_addr;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ /*
+ * We only need to do colour alignment if either the I or D
+@@ -54,6 +55,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -61,16 +66,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* 8 bits of randomness in 20 address space bits */
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE))
+@@ -89,14 +98,14 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -111,7 +120,6 @@ full_search:
+ }
+ }
+
+-
+ /*
+ * You really shouldn't be using read() or write() on /dev/mem. This
+ * might go away in the future.
+diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
+index 4c1a363..df311d0 100644
+--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
++++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
+@@ -41,7 +41,7 @@ struct samsung_dma_ops {
+ int (*started)(unsigned ch);
+ int (*flush)(unsigned ch);
+ int (*stop)(unsigned ch);
+-};
++} __no_const;
+
+ extern void *samsung_dmadev_get_ops(void);
+ extern void *s3c_dma_get_ops(void);
+diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
+index 5f28cae..3d23723 100644
+--- a/arch/arm/plat-samsung/include/plat/ehci.h
++++ b/arch/arm/plat-samsung/include/plat/ehci.h
+@@ -14,7 +14,7 @@
+ struct s5p_ehci_platdata {
+ int (*phy_init)(struct platform_device *pdev, int type);
+ int (*phy_exit)(struct platform_device *pdev, int type);
+-};
++} __no_const;
+
+ extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
+
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index 3b3159b..425ea94d 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index b7f5c68..556135c 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index f7040a1..db9f300 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -156,6 +173,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
+
++#include <linux/const.h>
+ #include <linux/linkage.h> /* for asmlinkage */
+
+ /*
+@@ -14,7 +15,7 @@
+ * Blackfin loads 32 bytes for cache
+ */
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
+
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 1de779f..336fad3 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index 0d8a7d6..d0c9ff5 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401d 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index f8e16b2..c73ff79 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 385fd30..27cf8ba 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ struct vm_area_struct *vma;
+ unsigned long limit;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ goto success;
+ }
+
+@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ goto success;
+ addr = vma->vm_end;
+ }
+diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
+index c635028..6d9445a 100644
+--- a/arch/h8300/include/asm/cache.h
++++ b/arch/h8300/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ARCH_H8300_CACHE_H
+ #define __ARCH_H8300_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+-#define L1_CACHE_BYTES 4
++#define L1_CACHE_BYTES _AC(4,UL)
+
+ /* m68k-elf-gcc 2.95.2 doesn't like these */
+
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index 0f01de2..d37d309 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
+ #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 2fc214b..7597423 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v) atomic64_add(1, (v))
+ #define atomic64_dec(v) atomic64_sub(1, (v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic operations are already serializing */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+
+ /* Bytes per L1 (data) cache line. */
+ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index b5298eb..67c6e62 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index 96a8d92..617a1cf 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+ pgd_val(*pgd_entry) = __pa(pud);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++ pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+ pud_val(*pud_entry) = __pa(pmd);
+ }
+
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++ pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 1a97af3..7529d31 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
+index fba7696..a7650fd 100644
+--- a/arch/ia64/include/asm/processor.h
++++ b/arch/ia64/include/asm/processor.h
+@@ -320,7 +320,7 @@ struct thread_struct {
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+- if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
++ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index b77768d..e0795eb 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 449c8c0..18965fb 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
++ if (!__builtin_constant_p(count))
++ check_object_size(from, count, true);
++
+ return __copy_user(to, (__force void __user *) from, count);
+ }
+
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
++ if (!__builtin_constant_p(count))
++ check_object_size(to, count, false);
++
+ return __copy_user((__force void __user *) to, from, count);
+ }
+
+@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void __user *__cu_to = (to); \
+ const void *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
++ if (!__builtin_constant_p(n)) \
++ check_object_size(__cu_from, __cu_len, true); \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
++ } \
+ __cu_len; \
+ })
+
+@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void *__cu_to = (to); \
+ const void __user *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
++ if (!__builtin_constant_p(n)) \
++ check_object_size(__cu_to, __cu_len, false); \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
++ } \
+ __cu_len; \
+ })
+
+diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
+index c539c68..c95d3db 100644
+--- a/arch/ia64/kernel/err_inject.c
++++ b/arch/ia64/kernel/err_inject.c
+@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
++static struct notifier_block err_inject_cpu_notifier =
+ {
+ .notifier_call = err_inject_cpu_callback,
+ };
+diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
+index 782c3a35..3540c5e 100644
+--- a/arch/ia64/kernel/irq_ia64.c
++++ b/arch/ia64/kernel/irq_ia64.c
+@@ -23,7 +23,6 @@
+ #include <linux/ioport.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/ptrace.h>
+-#include <linux/random.h> /* for rand_initialize_irq() */
+ #include <linux/signal.h>
+ #include <linux/smp.h>
+ #include <linux/threads.h>
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index 9b97303..69464a9 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -1919,7 +1919,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
++static struct notifier_block mca_cpu_notifier = {
+ .notifier_call = mca_cpu_callback
+ };
+
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 24603be..948052d 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index 77597e5..189dd62f 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -977,7 +977,7 @@ create_palinfo_proc_entries(unsigned int cpu)
+ struct proc_dir_entry **pdir;
+ struct proc_dir_entry *cpu_dir;
+ int j;
+- char cpustr[sizeof(CPUSTR)];
++ char cpustr[3+4+1];
+
+
+ /*
+@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata palinfo_cpu_notifier =
++static struct notifier_block palinfo_cpu_notifier =
+ {
+ .notifier_call = palinfo_cpu_callback,
+ .priority = 0,
+diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
+index 89accc6..236d389 100644
+--- a/arch/ia64/kernel/perfmon.c
++++ b/arch/ia64/kernel/perfmon.c
+@@ -632,6 +632,7 @@ static struct file_system_type pfm_fs_type = {
+ .mount = pfmfs_mount,
+ .kill_sb = kill_anon_super,
+ };
++MODULE_ALIAS_FS("pfmfs");
+
+ DEFINE_PER_CPU(unsigned long, pfm_syst_info);
+ DEFINE_PER_CPU(struct task_struct *, pmu_owner);
+@@ -2370,7 +2371,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
+ */
+ insert_vm_struct(mm, vma);
+
+- mm->total_vm += size >> PAGE_SHIFT;
+ vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
+ vma_pages(vma));
+ up_write(&task->mm->mmap_sem);
+diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
+index 79802e5..1a89ec5 100644
+--- a/arch/ia64/kernel/salinfo.c
++++ b/arch/ia64/kernel/salinfo.c
+@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
++static struct notifier_block salinfo_cpu_notifier =
+ {
+ .notifier_call = salinfo_cpu_callback,
+ .priority = 0,
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 609d500..254a3d7 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
+diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
+index 9be1f11..f2eef30 100644
+--- a/arch/ia64/kernel/topology.c
++++ b/arch/ia64/kernel/topology.c
+@@ -444,7 +444,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata cache_cpu_notifier =
++static struct notifier_block cache_cpu_notifier =
+ {
+ .notifier_call = cache_cpu_callback
+ };
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index 53c0ba0..2accdde 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -199,7 +199,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 20b3593..1ce77f0 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 5ca674b..0d1395a 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_area_struct *vmm;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, &addr, len, offset))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 00cb0e2..2ad8024 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee9..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
+
++#include <linux/const.h>
+ #include <asm/registers.h>
+
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 1d93f81..67794d0 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -21,6 +21,10 @@
+ #include <asm/war.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ /*
+@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ */
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* CONFIG_64BIT */
+
+ /*
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <linux/const.h>
+ #include <kmalloc.h>
+
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 455c0ac..ad65fbe 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -372,13 +372,16 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+-struct mm_struct;
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_ELF_H */
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index e59cd1a..8e329d6 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index 881d18b..cea38bc 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #endif
+
+ /*
+diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
+index 6018c80..7c37203 100644
+--- a/arch/mips/include/asm/system.h
++++ b/arch/mips/include/asm/system.h
+@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_SYSTEM_H */
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index adda036..e0f33bb 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -124,6 +124,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
+ #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
+ #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
+ #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
+ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
+
+ #ifdef CONFIG_MIPS32_O32
+@@ -148,15 +150,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
+ #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
+ #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
+ #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
++
++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_leave() */
+-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
++#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK (0x0000ffef & \
+ ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
+ /* work to do on any return to u-space */
+-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
++#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
+
+ #endif /* __KERNEL__ */
+
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 9fdd8bc..4bd7f1a 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index ff44823..97f8906 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index bf128d7..bc244d6 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -479,15 +479,3 @@ unsigned long get_wchan(struct task_struct *task)
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 4e6ea1f..0922422 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -529,6 +529,10 @@ static inline int audit_arch(void)
+ return arch;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+@@ -538,6 +542,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
+ /* do the secure computing check first */
+ secure_computing(regs->regs[2]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (!(current->ptrace & PT_PTRACED))
+ goto out;
+
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index a632bc1..0b77c7c 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
+
+ stack_done:
+ lw t0, TI_FLAGS($28) # syscall tracing enabled?
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ and t0, t1
+ bnez t0, syscall_trace_entry # -> yes
+
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index 3b5a5e9..e1ee86d 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, syscall_trace_entry
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 6be6f70..1859577 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, n32_syscall_trace_entry
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 5422855..74e63a3 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
+ PTR 4b, bad_stack
+ .previous
+
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, trace_a_syscall
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 937cf33..adb39bb 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -28,6 +28,23 @@
+ #include <asm/highmem.h> /* For VMALLOC_END */
+ #include <linux/kdebug.h>
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index 302d779..3845a09 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ do_color_align = 1;
+
+ /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, &addr, len, offset))
+ return addr;
+ }
+
+@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vmm, &addr, len, offset))
+ return addr;
+ addr = vma->vm_end;
+ if (do_color_align)
+@@ -144,10 +149,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr - len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vmm, &addr, len, offset))
+ /* cache the address as a hint for next time */
+- return mm->free_area_cache = addr - len;
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+@@ -155,17 +161,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ goto bottomup;
+
+ addr = mm->mmap_base - len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+ do {
++ if (do_color_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (check_heap_stack_gap(vmm, &addr, len, offset)) {
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr;
+ }
+@@ -175,10 +181,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start - len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -223,6 +227,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ random_factor = get_random_int();
+ random_factor = random_factor << PAGE_SHIFT;
+@@ -234,38 +242,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+ }
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = get_random_int();
+-
+- rnd = rnd << PAGE_SHIFT;
+- /* 8MB for 32bit, 256MB for 64bit */
+- if (TASK_IS_32BIT_ADDR)
+- rnd = rnd & 0x7ffffful;
+- else
+- rnd = rnd & 0xffffffful;
+-
+- return rnd;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache */
+
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
+-#define L1_CACHE_BYTES 16 /* bytes per entry */
+ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /*
+ * L1 cache
+ */
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+-#define L1_CACHE_BYTES 32 /* bytes per entry */
+ #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 4ce7a01..449202a 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,11 +19,13 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
+
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_OPENRISC_CACHE_H */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index c4b779b..775b66b 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+
+
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 47f11c7..3420df2 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
+@@ -15,13 +16,13 @@
+ * just ruin performance.
+ */
+ #ifdef CONFIG_PA20
+-#define L1_CACHE_BYTES 64
+ #define L1_CACHE_SHIFT 6
+ #else
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 19f6cb1..6c78cf2 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index fc987a1..6e068ef 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x) do { } while (0)
+ #define pgd_populate(mm, pmd, pte) BUG()
++#define pgd_populate_kernel(mm, pmd, pte) BUG()
+
+ #endif
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 9d35a3e..af9b6d3 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -16,6 +16,8 @@
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+
++extern spinlock_t pa_dbit_lock;
++
+ /*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+
+ #define set_pte_at(mm, addr, ptep, pteval) \
+ do { \
++ unsigned long flags; \
++ spin_lock_irqsave(&pa_dbit_lock, flags); \
+ set_pte(ptep, pteval); \
+ purge_tlb_entries(mm, addr); \
++ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
+ } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+@@ -216,6 +221,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+@@ -433,48 +449,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+
+ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+ {
+-#ifdef CONFIG_SMP
++ pte_t pte;
++ unsigned long flags;
++
+ if (!pte_young(*ptep))
+ return 0;
+- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
+-#else
+- pte_t pte = *ptep;
+- if (!pte_young(pte))
++
++ spin_lock_irqsave(&pa_dbit_lock, flags);
++ pte = *ptep;
++ if (!pte_young(pte)) {
++ spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ return 0;
+- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
++ }
++ set_pte(ptep, pte_mkold(pte));
++ purge_tlb_entries(vma->vm_mm, addr);
++ spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ return 1;
+-#endif
+ }
+
+-extern spinlock_t pa_dbit_lock;
+-
+ struct mm_struct;
+ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ pte_t old_pte;
++ unsigned long flags;
+
+- spin_lock(&pa_dbit_lock);
++ spin_lock_irqsave(&pa_dbit_lock, flags);
+ old_pte = *ptep;
+ pte_clear(mm,addr,ptep);
+- spin_unlock(&pa_dbit_lock);
++ purge_tlb_entries(mm, addr);
++ spin_unlock_irqrestore(&pa_dbit_lock, flags);
+
+ return old_pte;
+ }
+
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+-#ifdef CONFIG_SMP
+- unsigned long new, old;
+-
+- do {
+- old = pte_val(*ptep);
+- new = pte_val(pte_wrprotect(__pte (old)));
+- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
++ unsigned long flags;
++ spin_lock_irqsave(&pa_dbit_lock, flags);
++ set_pte(ptep, pte_wrprotect(*ptep));
+ purge_tlb_entries(mm, addr);
+-#else
+- pte_t old_pte = *ptep;
+- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+-#endif
++ spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ }
+
+ #define pte_same(A,B) (pte_val(A) == pte_val(B))
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index ff4cf9d..c0564bb 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -253,10 +253,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+ int ret = -EFAULT;
+
+- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
++ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
+ ret = __copy_from_user(to, from, n);
+ else
+ copy_from_user_overflow();
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 5241698..91dcb12 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -428,14 +428,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ /* Note: purge_tlb_entries can be called at startup with
+ no context. */
+
+- /* Disable preemption while we play with %sr1. */
+- preempt_disable();
++ purge_tlb_start(flags);
+ mtsp(mm->context, 1);
+- purge_tlb_start(flags);
+ pdtlb(addr);
+ pitlb(addr);
+ purge_tlb_end(flags);
+- preempt_enable();
+ }
+ EXPORT_SYMBOL(purge_tlb_entries);
+
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index 5709c5e..14285ca 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
+ static void setup_bus_id(struct parisc_device *padev)
+ {
+ struct hardware_path path;
+- char name[20];
++ char name[28];
+ char *output = name;
+ int i;
+
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index 5e34ccf..672bc9c 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -98,16 +98,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
+
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
+index a3328c2..3b812eb 100644
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
+ /* called from hpux boot loader */
+ boot_command_line[0] = '\0';
+ } else {
+- strcpy(boot_command_line, (char *)__va(boot_args[1]));
++ strlcpy(boot_command_line, (char *)__va(boot_args[1]),
++ COMMAND_LINE_SIZE);
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 7ea75d1..5075226 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -33,9 +33,11 @@
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
+
+-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ addr = PAGE_ALIGN(addr);
+
+@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
+ return offset & 0x3FF000;
+ }
+
+-static unsigned long get_shared_area(struct address_space *mapping,
+- unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
++ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ int offset = mapping ? get_offset(mapping) : 0;
++ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
+@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, rand_offset))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
++ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
+ } else if(flags & MAP_SHARED) {
+- addr = get_shared_area(NULL, addr, len, pgoff);
++ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
+ } else {
+- addr = get_unshared_area(addr, len);
++ addr = get_unshared_area(filp, addr, len, flags);
+ }
+ return addr;
+ }
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index cd8b02f..543008b 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 18162ce..94de376 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index 02e41b5..ec6e26c 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __powerpc64__ */
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index 4b50941..5605819 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,7 @@
+
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -22,7 +23,7 @@
+ #define L1_CACHE_SHIFT 7
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index 3bf9cca..e7457d0 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index bca8fdc..61e9580 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -27,6 +27,7 @@ enum km_type {
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index d4a7f64..451de1c 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+ }
+ #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 5b0bde2..9f83e1a 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -151,8 +151,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -180,6 +181,9 @@ extern phys_addr_t kernstart_addr;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * Use the top bit of the higher-level page table entries to indicate whether
+ * the entries we point to contain hugepages. This works because we know that
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index fb40ede..d3ce956 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -144,15 +144,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
+index 292725c..f87ae14 100644
+--- a/arch/powerpc/include/asm/pgalloc-64.h
++++ b/arch/powerpc/include/asm/pgalloc-64.h
+@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+
+ #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, (unsigned long)pmd);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #else /* CONFIG_PPC_64K_PAGES */
+
+ #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 88b0bd9..e32bc67 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h> /* For TASK_SIZE */
+ #include <asm/mmu.h>
+diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
+index 4aad413..85d86bf 100644
+--- a/arch/powerpc/include/asm/pte-hash32.h
++++ b/arch/powerpc/include/asm/pte-hash32.h
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 578e5a0..2ab6a8a 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -212,6 +212,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index adba970..ef0d917 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -50,7 +50,7 @@ struct smp_ops_t {
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
+index e30a13d..2b7d994 100644
+--- a/arch/powerpc/include/asm/system.h
++++ b/arch/powerpc/include/asm/system.h
+@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ /* Used in very early kernel initialization. */
+ extern unsigned long reloc_offset(void);
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 836f231..39d0b94 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -104,7 +104,6 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SINGLESTEP 8 /* singlestepping active */
+-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
+ #define TIF_SECCOMP 10 /* secure computing */
+ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
+ #define TIF_NOERROR 12 /* Force successful syscall return */
+@@ -112,6 +111,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_FREEZE 14 /* Freezing for suspend */
+ #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+ #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
++#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -130,8 +132,11 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_FREEZE (1<<TIF_FREEZE)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
++
+ #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
++ _TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME)
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index bd0fb84..1f2d065 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -327,52 +327,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +350,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +380,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +401,92 @@ static inline unsigned long __copy_to_user(void __user *to,
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 429983c..7af363b 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -587,6 +587,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -596,8 +597,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b .ret_from_except_lite
+-1: bl .save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 8c3baa0..4d8c6f1 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1004,10 +1004,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+- bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 745c1e7..59d97a6 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -547,9 +547,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
+ host->ops = ops;
+ host->of_node = of_node_get(of_node);
+
+- if (host->ops->match == NULL)
+- host->ops->match = default_irq_host_match;
+-
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* If it's a legacy controller, check for duplicates and
+@@ -622,7 +619,12 @@ struct irq_host *irq_find_host(struct device_node *node)
+ */
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+ list_for_each_entry(h, &irq_hosts, link)
+- if (h->ops->match(h, node)) {
++ if (h->ops->match) {
++ if (h->ops->match(h, node)) {
++ found = h;
++ break;
++ }
++ } else if (default_irq_host_match(h, node)) {
+ found = h;
+ break;
+ }
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 2e3200c..72095ce 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index d687e3f..074a8cd 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1157,10 +1157,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+- printk(" (%pS)",
++ printk(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1180,7 +1180,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- Exception: %lx at %pS\n LR = %pS\n",
++ printk("--- Exception: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1255,58 +1255,3 @@ void thread_info_cache_init(void)
+ }
+
+ #endif /* THREAD_SHIFT < PAGE_SHIFT */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 5de73db..a05f61c 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * We must return the syscall number to actually look up in the table.
+ * This can be -1L to skip running any syscall at all.
+@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+
+ secure_computing(regs->gpr[0]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+@@ -1748,6 +1757,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+ int step;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(current->audit_context))
+ audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
+ regs->result);
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index fa1e56b..e8ef867 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -865,7 +865,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 60d1f75..2c29348 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -435,7 +435,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
+index f2496f2..4e3cc47 100644
+--- a/arch/powerpc/kernel/syscalls.c
++++ b/arch/powerpc/kernel/syscalls.c
+@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
+ long ret;
+
+ if (personality(current->personality) == PER_LINUX32
+- && personality == PER_LINUX)
+- personality = PER_LINUX32;
++ && personality(personality) == PER_LINUX)
++ personality = (personality & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+ }
+ #endif
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index ca683a1..ab912dd 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -531,7 +531,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
++static struct notifier_block sysfs_cpu_nb = {
+ .notifier_call = sysfs_cpu_notify,
+ };
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 9844662..04a2a1e 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
+ static inline void pmac_backlight_unblank(void) { }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ int die(const char *str, struct pt_regs *regs, long err)
+ {
+ static struct {
+@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
++ gr_handle_kernel_exploit();
++
+ oops_exit();
+ do_exit(err);
+
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index 7d14bb69..1305601 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -35,6 +35,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 5efe8c9..db9ceef 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -32,6 +32,10 @@
+ #include <linux/perf_event.h>
+ #include <linux/magic.h>
+ #include <linux/ratelimit.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -43,6 +47,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/siginfo.h>
+ #include <mm/mmu_decl.h>
++#include <asm/ptrace.h>
+
+ #ifdef CONFIG_KPROBES
+ static inline int notify_page_fault(struct pt_regs *regs)
+@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -259,7 +291,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -274,7 +306,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -343,6 +375,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
+index 5a783d8..fbe4c8b 100644
+--- a/arch/powerpc/mm/mmap_64.c
++++ b/arch/powerpc/mm/mmap_64.c
+@@ -65,6 +65,10 @@ static unsigned long mmap_rnd(void)
+ {
+ unsigned long rnd = 0;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ /* 8MB for 32bit, 1GB for 64bit */
+ if (is_32bit_task())
+@@ -99,10 +103,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
+index 5b63bd3..248942d 100644
+--- a/arch/powerpc/mm/mmu_context_nohash.c
++++ b/arch/powerpc/mm/mmu_context_nohash.c
+@@ -370,7 +370,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
++static struct notifier_block mmu_context_cpu_nb = {
+ .notifier_call = mmu_context_cpu_notify,
+ };
+
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 24523dc..7205007 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -964,7 +964,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
+ return ret;
+ }
+
+-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
++static struct notifier_block ppc64_numa_nb = {
+ .notifier_call = cpu_numa_callback,
+ .priority = 1 /* Must run before sched domains notifier. */
+ };
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 73709f7..8e825a8 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, &addr, len, 0);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -256,7 +256,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, 0)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ }
+ }
+
+- addr = mm->mmap_base;
+- while (addr > len) {
++ if (mm->mmap_base < len)
++ addr = -ENOMEM;
++ else
++ addr = mm->mmap_base - len;
++
++ while (!IS_ERR_VALUE(addr)) {
+ /* Go down by chunk size */
+- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
++ addr = _ALIGN_DOWN(addr, 1ul << pshift);
+
+ /* Check for hit with different page size */
+ mask = slice_range_to_mask(addr, len);
+@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, 0)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start;
++ addr = skip_heap_stack_gap(vma, len, 0);
+ }
+
+ /*
+@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index 0cfece4..2f1a0e5 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ return VM_FAULT_NOPAGE;
+ }
+
+-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
+ unsigned long address,
+- void *buf, int len, int write)
++ void *buf, size_t len, int write)
+ {
+ struct spu_context *ctx = vma->vm_file->private_data;
+ unsigned long offset = address - vma->vm_start;
+diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
+index 70ec4e9..3e7a115 100644
+--- a/arch/powerpc/platforms/cell/spufs/inode.c
++++ b/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -811,6 +811,7 @@ static struct file_system_type spufs_type = {
+ .mount = spufs_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("spufs");
+
+ static int __init spufs_init(void)
+ {
+diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
+index 3394254..8c6825c 100644
+--- a/arch/powerpc/platforms/powermac/smp.c
++++ b/arch/powerpc/platforms/powermac/smp.c
+@@ -886,7 +886,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
++static struct notifier_block smp_core99_cpu_nb = {
+ .notifier_call = smp_core99_cpu_notify,
+ };
+ #endif /* CONFIG_HOTPLUG_CPU */
+diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
+index 24bff4f..0248123 100644
+--- a/arch/s390/appldata/appldata_base.c
++++ b/arch/s390/appldata/appldata_base.c
+@@ -610,7 +610,7 @@ static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata appldata_nb = {
++static struct notifier_block appldata_nb = {
+ .notifier_call = appldata_cpu_notify,
+ };
+
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index 481f4f7..f16ec59 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -454,6 +454,7 @@ static struct file_system_type hypfs_type = {
+ .mount = hypfs_mount,
+ .kill_sb = hypfs_kill_super
+ };
++MODULE_ALIAS_FS("s390_hypfs");
+
+ static const struct super_operations hypfs_s_ops = {
+ .statfs = simple_statfs,
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index 8517d2a..d2738d4 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 2a30d5a..5e5586f 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -11,8 +11,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+
+-#define L1_CACHE_BYTES 256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 8
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD 32
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 547f1a6..3e6d0a0 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
++#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+@@ -183,7 +189,8 @@ extern char elf_platform[];
+ #define ELF_PLATFORM (elf_platform)
+
+ #ifndef __s390x__
+-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
++#define SET_PERSONALITY(ex) \
++ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+ #else /* __s390x__ */
+ #define SET_PERSONALITY(ex) \
+ do { \
+@@ -211,7 +218,4 @@ struct linux_binprm;
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ int arch_setup_additional_pages(struct linux_binprm *, int);
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif
+diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
+index ef573c1..75a1ce6 100644
+--- a/arch/s390/include/asm/system.h
++++ b/arch/s390/include/asm/system.h
+@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
+ extern void (*_machine_halt)(void);
+ extern void (*_machine_power_off)(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ static inline int tprot(unsigned long addr)
+ {
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 2b23885..e84d6ac 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -235,6 +235,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -291,10 +298,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- unsigned int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+ might_fault();
+- if (unlikely(sz != -1 && sz < n)) {
++
++ if ((long)n < 0)
++ return n;
++
++ if (unlikely(sz != (size_t)-1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+ }
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index dfcb343..eda788a 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 53088e2..9f44a36 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -320,39 +320,3 @@ unsigned long get_wchan(struct task_struct *p)
+ }
+ return 0;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+- else
+- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (!(current->flags & PF_RANDOMIZE))
+- return base;
+- if (ret < base)
+- return base;
+- return ret;
+-}
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 1df64a8..aea2a39 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -1035,7 +1035,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block __cpuinitdata smp_cpu_nb = {
++static struct notifier_block smp_cpu_nb = {
+ .notifier_call = smp_cpu_notify,
+ };
+
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index c70b3d8..d01c6b3 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
+index 589d5c7..669e274 100644
+--- a/arch/score/include/asm/system.h
++++ b/arch/score/include/asm/system.h
+@@ -17,7 +17,7 @@ do { \
+ #define finish_arch_switch(prev) do {} while (0)
+
+ typedef void (*vi_handler_t)(void);
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #define mb() barrier()
+ #define rmb() barrier()
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index 25d0803..d6c8e36 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+index 03f2b55..b0270327 100644
+--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
++++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
++static struct notifier_block shx3_cpu_notifier = {
+ .notifier_call = shx3_cpu_callback,
+ };
+
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index afeb710..8da5c79 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+@@ -106,7 +106,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+@@ -178,28 +178,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_colour_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -209,10 +210,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
+index eddcfb3..b117d90 100644
+--- a/arch/sparc/Makefile
++++ b/arch/sparc/Makefile
+@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
+index 07dd35e..2c6f765 100644
+--- a/arch/sparc/include/asm/atomic_32.h
++++ b/arch/sparc/include/asm/atomic_32.h
+@@ -13,6 +13,8 @@
+
+ #include <linux/types.h>
+
++#include <asm-generic/atomic64.h>
++
+ #ifdef __KERNEL__
+
+ #include <asm-generic/atomic64.h>
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index 9f421df..b81fc12 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+
+@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(1, v);
++}
+
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+
+ #define atomic_add_return(i, v) atomic_add_ret(i, v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(i, v);
++}
+ #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(i, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
+@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 69358b5..9d0d492 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
+
++#include <linux/const.h>
++
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index 4269ca6..e3da77f 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 7df8b7f..4946269 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -180,6 +180,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP sparc64_elf_hwcap
+
+diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
+index 156707b..aefa786 100644
+--- a/arch/sparc/include/asm/page_32.h
++++ b/arch/sparc/include/asm/page_32.h
+@@ -8,6 +8,8 @@
+ #ifndef _SPARC_PAGE_H
+ #define _SPARC_PAGE_H
+
++#include <linux/const.h>
++
+ #define PAGE_SHIFT 12
+
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index ca2b344..c6084f89 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
+ BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
+ #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
+ #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
+ #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index 40b2d7a..22a665b 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ }
+
+ #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index a790cc6..091ed94 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index f6ae2b2..b03ffc7 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 9689176..63c18ea 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
+
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+ int tmp1, tmp2;
+
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ return tmp1;
+ }
+
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2;
+
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+ __asm__ __volatile__(
+ " stw %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2, result;
+
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index fa57532..e1a4c53 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -50,6 +50,8 @@ struct thread_info {
+ unsigned long w_saved;
+
+ struct restart_block restart_block;
++
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index 60d86be..6389ac8 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -63,6 +63,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ };
+
+@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
+ /* flag bit 6 is available */
+ #define TIF_32BIT 7 /* 32-bit binary */
+-/* flag bit 8 is available */
++#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
+ #define TIF_SECCOMP 9 /* secure computing */
+ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+ * in using in assembly, else we can't use the mask as
+ * an immediate value in instructions such as andcc.
+@@ -238,12 +241,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+ #define _TIF_FREEZE (1<<TIF_FREEZE)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+ _TIF_DO_NOTIFY_RESUME_MASK | \
+ _TIF_NEED_RESCHED)
+ #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
+
++#define _TIF_WORK_SYSCALL \
++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
++
++
+ /*
+ * Thread-synchronous status.
+ *
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index e88fbe5..bd0eda7 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 8303ac4..07f333d 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 3e1449f..5293a0e 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/system.h>
+ #include <asm/spitfire.h>
+@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+
+@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
++
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index cb85458..e063f17 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index 27728e1..0010e923 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
+ char *base, *p;
+ int msg_len, loops;
+
++ if (strlen(var) + strlen(value) + 2 >
++ sizeof(pkt) - sizeof(pkt.header)) {
++ printk(KERN_ERR PFX
++ "contents length: %zu, which more than max: %lu,"
++ "so could not set (%s) variable to (%s).\n",
++ strlen(var) + strlen(value) + 2,
++ sizeof(pkt) - sizeof(pkt.header), var, value);
++ return;
++ }
++
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.header.data.tag.type = DS_DATA;
+ pkt.header.data.handle = cp->handle;
+diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
+index a19c8a0..d04a60b 100644
+--- a/arch/sparc/kernel/leon_kernel.c
++++ b/arch/sparc/kernel/leon_kernel.c
+@@ -53,11 +53,13 @@ static inline unsigned int leon_eirq_get(int cpu)
+ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ unsigned int eirq;
++ struct irq_bucket *p;
+ int cpu = sparc_leon3_cpuid();
+
+ eirq = leon_eirq_get(cpu);
+- if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
+- generic_handle_irq(irq_map[eirq]->irq);
++ p = irq_map[eirq];
++ if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
++ generic_handle_irq(p->irq);
+ }
+
+ /* The extended IRQ controller has been found, this function registers it */
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index f793742..4d880af 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
+ rw->ins[4], rw->ins[5],
+ rw->ins[6],
+ rw->ins[7]);
+- printk("%pS\n", (void *) rw->ins[7]);
++ printk("%pA\n", (void *) rw->ins[7]);
+ rw = (struct reg_window32 *) rw->ins[6];
+ }
+ spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
+@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index 3739a06..48b2ff0 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+ {
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
+index 741df91..97cdf05 100644
+--- a/arch/sparc/kernel/prom_common.c
++++ b/arch/sparc/kernel/prom_common.c
+@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
+
+ unsigned int prom_early_allocated __initdata;
+
+-static struct of_pdt_ops prom_sparc_ops __initdata = {
++static struct of_pdt_ops prom_sparc_ops __initconst = {
+ .nextprop = prom_common_nextprop,
+ .getproplen = prom_getproplen,
+ .getproperty = prom_getproperty,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 96ee50a..68ce124 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1058,6 +1058,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+ int ret = 0;
+@@ -1065,6 +1069,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ /* do the secure computing check first */
+ secure_computing(regs->u_regs[UREG_G1]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+@@ -1086,6 +1095,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+
+ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ {
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ #ifdef CONFIG_AUDITSYSCALL
+ if (unlikely(current->audit_context)) {
+ unsigned long tstate = regs->tstate;
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index e21d3c0d..8f453c1 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -871,8 +871,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
+ extern unsigned long xcall_flush_dcache_page_spitfire;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-extern atomic_t dcpage_flushes;
+-extern atomic_t dcpage_flushes_xcall;
++extern atomic_unchecked_t dcpage_flushes;
++extern atomic_unchecked_t dcpage_flushes_xcall;
+ #endif
+
+ static inline void __local_flush_dcache_page(struct page *page)
+@@ -896,7 +896,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ return;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ this_cpu = get_cpu();
+@@ -920,7 +920,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpumask_of(cpu));
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ }
+@@ -939,7 +939,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ preempt_disable();
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+ data0 = 0;
+ pg_addr = page_address(page);
+@@ -956,7 +956,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpu_online_mask);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ __local_flush_dcache_page(page);
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 42b282f..408977c 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
+ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_area_struct * vmm;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+@@ -56,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+@@ -71,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, &addr, len, offset))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 5e4252b..379f84f 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -119,12 +119,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ unsigned long task_size = TASK_SIZE;
+ unsigned long start_addr;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -139,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -146,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -174,14 +178,14 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -207,6 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long task_size = STACK_TOP32;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ /* This should only ever run for 32-bit processes. */
+ BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -215,7 +220,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -236,8 +241,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+@@ -257,28 +261,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_color_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -288,10 +293,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -361,10 +364,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
+ EXPORT_SYMBOL(get_fb_unmapped_area);
+
+ /* Essentially the same as PowerPC. */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
+ {
+ unsigned long rnd = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ unsigned long val = get_random_int();
+ if (test_thread_flag(TIF_32BIT))
+@@ -377,7 +384,7 @@ static unsigned long mmap_rnd(void)
+
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+- unsigned long random_factor = mmap_rnd();
++ unsigned long random_factor = mmap_rnd(mm);
+ unsigned long gap;
+
+ /*
+@@ -390,6 +397,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -402,6 +415,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 817187d..1d4541e 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
+ #endif
+ .align 32
+ 1: ldx [%g6 + TI_FLAGS], %l5
+- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ andcc %l5, _TIF_WORK_SYSCALL, %g0
+ be,pt %icc, rtrap
+ nop
+ call syscall_trace_leave
+@@ -179,7 +179,7 @@ linux_sparc_syscall32:
+
+ srl %i3, 0, %o3 ! IEU0
+ srl %i2, 0, %o2 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+@@ -202,7 +202,7 @@ linux_sparc_syscall:
+
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+ 2: call %l7 ! CTI Group brk forced
+@@ -218,7 +218,7 @@ ret_sys_call:
+
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+
+ 2:
+diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
+index 7408201..b349841 100644
+--- a/arch/sparc/kernel/sysfs.c
++++ b/arch/sparc/kernel/sysfs.c
+@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
++static struct notifier_block sysfs_cpu_nb = {
+ .notifier_call = sysfs_cpu_notify,
+ };
+
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 591f20c..0f1b925 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 0cbdaa4..f37a97c 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1786,8 +1797,8 @@ struct sun4v_error_entry {
+ u16 err_pad;
+ };
+
+-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+ static const char *sun4v_err_type_to_str(u32 type)
+ {
+@@ -1807,7 +1818,7 @@ static const char *sun4v_err_type_to_str(u32 type)
+ }
+ }
+
+-static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
++static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_unchecked_t *ocnt)
+ {
+ int cnt;
+
+@@ -1842,8 +1853,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+
+ show_regs(regs);
+
+- if ((cnt = atomic_read(ocnt)) != 0) {
+- atomic_set(ocnt, 0);
++ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
++ atomic_set_unchecked(ocnt, 0);
+ wmb();
+ printk("%s: Queue overflowed %d times.\n",
+ pfx, cnt);
+@@ -1895,7 +1906,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
+ */
+ void sun4v_resum_overflow(struct pt_regs *regs)
+ {
+- atomic_inc(&sun4v_resum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
+ }
+
+ /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
+@@ -1948,7 +1959,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ /* XXX Actually even this can make not that much sense. Perhaps
+ * XXX we should just pull the plug and panic directly from here?
+ */
+- atomic_inc(&sun4v_nonresum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
+ }
+
+ unsigned long sun4v_err_itlb_vaddr;
+@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ user_instruction_dump ((unsigned int __user *) regs->tpc);
+ }
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 76e4ac1..78f8bb1 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
+index eb1624b..55100de 100644
+--- a/arch/sparc/kernel/us3_cpufreq.c
++++ b/arch/sparc/kernel/us3_cpufreq.c
+@@ -18,14 +18,12 @@
+ #include <asm/head.h>
+ #include <asm/timer.h>
+
+-static struct cpufreq_driver *cpufreq_us3_driver;
+-
+ struct us3_freq_percpu_info {
+ struct cpufreq_frequency_table table[4];
+ };
+
+ /* Indexed by cpu number. */
+-static struct us3_freq_percpu_info *us3_freq_table;
++static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
+
+ /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
+ * in the Safari config register.
+@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+
+ static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+ {
+- if (cpufreq_us3_driver)
+- us3_set_cpu_divider_index(policy->cpu, 0);
++ us3_set_cpu_divider_index(policy->cpu, 0);
+
+ return 0;
+ }
+
++static int __init us3_freq_init(void);
++static void __exit us3_freq_exit(void);
++
++static struct cpufreq_driver cpufreq_us3_driver = {
++ .init = us3_freq_cpu_init,
++ .verify = us3_freq_verify,
++ .target = us3_freq_target,
++ .get = us3_freq_get,
++ .exit = us3_freq_cpu_exit,
++ .owner = THIS_MODULE,
++ .name = "UltraSPARC-III",
++
++};
++
+ static int __init us3_freq_init(void)
+ {
+ unsigned long manuf, impl, ver;
+@@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
+ (impl == CHEETAH_IMPL ||
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+- impl == PANTHER_IMPL)) {
+- struct cpufreq_driver *driver;
+-
+- ret = -ENOMEM;
+- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+- if (!driver)
+- goto err_out;
+-
+- us3_freq_table = kzalloc(
+- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+- GFP_KERNEL);
+- if (!us3_freq_table)
+- goto err_out;
+-
+- driver->init = us3_freq_cpu_init;
+- driver->verify = us3_freq_verify;
+- driver->target = us3_freq_target;
+- driver->get = us3_freq_get;
+- driver->exit = us3_freq_cpu_exit;
+- driver->owner = THIS_MODULE,
+- strcpy(driver->name, "UltraSPARC-III");
+-
+- cpufreq_us3_driver = driver;
+- ret = cpufreq_register_driver(driver);
+- if (ret)
+- goto err_out;
+-
+- return 0;
+-
+-err_out:
+- if (driver) {
+- kfree(driver);
+- cpufreq_us3_driver = NULL;
+- }
+- kfree(us3_freq_table);
+- us3_freq_table = NULL;
+- return ret;
+- }
++ impl == PANTHER_IMPL))
++ return cpufreq_register_driver(&cpufreq_us3_driver);
+
+ return -ENODEV;
+ }
+
+ static void __exit us3_freq_exit(void)
+ {
+- if (cpufreq_us3_driver) {
+- cpufreq_unregister_driver(cpufreq_us3_driver);
+- kfree(cpufreq_us3_driver);
+- cpufreq_us3_driver = NULL;
+- kfree(us3_freq_table);
+- us3_freq_table = NULL;
+- }
++ cpufreq_unregister_driver(&cpufreq_us3_driver);
+ }
+
+ MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index 4961516..f82ff86 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 59186e0..f747d7a 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
++ .globl atomic_add_ret_unchecked
++ .type atomic_add_ret_unchecked,#function
++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ addcc %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ add %g7, %o0, %g7
++ sra %g7, 0, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
++
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
++ .globl atomic64_add_unchecked
++ .type atomic64_add_unchecked,#function
++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
++
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
++ .globl atomic64_sub_unchecked
++ .type atomic64_sub_unchecked,#function
++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ subcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
++
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
++ .globl atomic64_add_ret_unchecked
++ .type atomic64_add_ret_unchecked,#function
++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ add %g7, %o0, %g7
++ mov %g7, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
++
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index fbb8005..984a269 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -133,12 +133,18 @@ EXPORT_SYMBOL(__clear_user);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
++EXPORT_SYMBOL(atomic_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+
+ /* Atomic bit operations. */
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 301421c..e2535d1 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 8023fd7..3a6d569 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -21,6 +21,9 @@
+ #include <linux/perf_event.h>
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -208,6 +211,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned int addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -280,6 +554,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 504c062..a383267 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -21,6 +21,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned long addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -340,6 +803,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 07e1453..ae6e02e 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+- unsigned long flags)
++ unsigned long flags,
++ unsigned long offset)
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct * vma;
+@@ -67,7 +68,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -85,7 +86,8 @@ static unsigned long
+ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len,
+ const unsigned long pgoff,
+- const unsigned long flags)
++ const unsigned long flags,
++ const unsigned long offset)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+@@ -105,26 +107,28 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = (mm->mmap_base-len) & HPAGE_MASK;
++ addr = mm->mmap_base - len;
+
+ do {
++ addr &= HPAGE_MASK;
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -134,8 +138,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start-len) & HPAGE_MASK;
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -163,6 +167,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+@@ -181,16 +186,15 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ else
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ }
+
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
+index 7b00de6..78239f4 100644
+--- a/arch/sparc/mm/init_32.c
++++ b/arch/sparc/mm/init_32.c
+@@ -316,6 +316,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -344,17 +347,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index b4989f9..530099e 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -170,9 +170,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+ int num_kernel_image_mappings;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-atomic_t dcpage_flushes = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
+ #ifdef CONFIG_SMP
+-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+ #endif
+ #endif
+
+@@ -180,7 +180,7 @@ inline void flush_dcache_page_impl(struct page *page)
+ {
+ BUG_ON(tlb_type == hypervisor);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ #ifdef DCACHE_ALIASING_POSSIBLE
+@@ -417,10 +417,10 @@ void mmu_info(struct seq_file *m)
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+ seq_printf(m, "DCPageFlushes\t: %d\n",
+- atomic_read(&dcpage_flushes));
++ atomic_read_unchecked(&dcpage_flushes));
+ #ifdef CONFIG_SMP
+ seq_printf(m, "DCPageFlushesXC\t: %d\n",
+- atomic_read(&dcpage_flushes_xcall));
++ atomic_read_unchecked(&dcpage_flushes_xcall));
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_DEBUG_DCFLUSH */
+ }
+diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
+index cbef74e..c38fead 100644
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
+index 27fe667..36d474c 100644
+--- a/arch/tile/include/asm/atomic_64.h
++++ b/arch/tile/include/asm/atomic_64.h
+@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic dec and inc don't implement barrier, so provide them if needed. */
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index 392e533..536b092 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/chip.h>
+
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index ef34d2caa..d6ce60c 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
++ if (likely(sz == (size_t)-1 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else
+ copy_from_user_overflow();
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 7730af6..880804f 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
+ $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
+ $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
+
++ifdef CONSTIFY_PLUGIN
++USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
+
++#include <linux/const.h>
+
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT 5
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 6c03acd..a5e0215 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index 7cfc3ce..cbd1a58 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index 0032f92..cd151e0 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index c533835..84db18e 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -406,22 +406,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index fb2e69d..27ff8ca 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -75,6 +75,7 @@ config X86
+ select HAVE_BPF_JIT if (X86_64 && NET)
+ select CLKEVT_I8253
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
++ select HAVE_ARCH_SECCOMP_FILTER
+
+ config INSTRUCTION_DECODER
+ def_bool (KPROBES || PERF_EVENTS)
+@@ -235,7 +236,7 @@ config X86_HT
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_HWEIGHT_CFLAGS
+ string
+@@ -999,6 +1000,7 @@ config MICROCODE_OLD_INTERFACE
+
+ config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
+@@ -1022,7 +1024,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1059,7 +1061,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1113,7 +1115,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1496,6 +1498,7 @@ config SECCOMP
+
+ config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 || !PAX_MEMORY_UDEREF
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -1616,6 +1619,8 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1647,9 +1652,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index e3ca7e0..b30b28a 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -365,7 +365,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -411,7 +411,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index bf56e17..91465a1 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -81,7 +81,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+@@ -272,7 +272,7 @@ config OPTIMIZE_INLINING
+
+ config DEBUG_STRICT_USER_COPY_CHECKS
+ bool "Strict copy size checks"
+- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
++ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
+ ---help---
+ Enabling this option turns a certain set of sanity checks for user
+ copy operations into compile time failures.
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 03dbc7f5b..e1aa479 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -40,12 +40,12 @@ ifeq ($(CONFIG_X86_32),y)
+ KBUILD_CFLAGS += $(cflags-y)
+
+ # temporary until string.h is fixed
+- KBUILD_CFLAGS += -ffreestanding
+ else
+ BITS := 64
+ UTS_MACHINE := x86_64
+ CHECKFLAGS += -D__x86_64__ -m64
+
++ biarch := $(call cc-option,-m64)
+ KBUILD_AFLAGS += -m64
+ KBUILD_CFLAGS += -m64
+
+@@ -72,6 +72,8 @@ else
+ KBUILD_CFLAGS += -maccumulate-outgoing-args
+ endif
+
++KBUILD_CFLAGS += -ffreestanding
++
+ ifdef CONFIG_CC_STACKPROTECTOR
+ cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
+ ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
+@@ -199,3 +201,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index e80542b..c5099c3 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
+ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 878e4b9..20537ab 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index c7093bd..d4247ffe0 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -85,7 +85,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index cda5cef..c1f26aa 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -15,6 +15,9 @@ KBUILD_CFLAGS += $(cflags-y)
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 67a655a..b924059 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -76,7 +76,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -162,7 +162,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -170,8 +170,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 35af09d..99c9676 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -91,7 +91,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -233,7 +233,7 @@ ENTRY(startup_64)
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 3a19d04..7c1d55a 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -310,7 +310,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4d3ff03..e4972ff 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index bdb4d45..77703de 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -224,10 +224,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
++#else
+ #define VO_INIT_SIZE (VO__end - VO__text)
++#endif
+ #if ZO_INIT_SIZE > VO_INIT_SIZE
+ #define INIT_SIZE ZO_INIT_SIZE
+ #else
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index db75d07..8e6d0af 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index 11e8c6e..fdbb1ed 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index 43eda28..5ab5fdb 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -96,7 +96,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 5b577d5..eb7f25e 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+ * including this sentence is retained in full.
+ */
+
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
+ je B192; \
+ leaq 32(r9),r9;
+
++#define ret pax_force_retaddr; ret
++
+ #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+ movq r1,r2; \
+ movq r3,r4; \
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 3470624..9b476a3 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -31,6 +31,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef __x86_64__
+ .data
+@@ -199,7 +200,7 @@ enc: .octa 0x2
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -208,8 +209,8 @@ enc: .octa 0x2
+ .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -217,15 +218,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ movdqa SHUF_MASK(%rip), %xmm14
+@@ -437,7 +438,7 @@ _initial_blocks_done\num_initial_blocks\operation:
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -446,8 +447,8 @@ _initial_blocks_done\num_initial_blocks\operation:
+ .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -455,15 +456,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ movdqa SHUF_MASK(%rip), %xmm14
+@@ -1264,7 +1265,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
+ *****************************************************************************/
+
+ ENTRY(aesni_gcm_dec)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1274,8 +1275,8 @@ ENTRY(aesni_gcm_dec)
+ */
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13 # %xmm13 = HashKey
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13 # %xmm13 = HashKey
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1303,10 +1304,10 @@ ENTRY(aesni_gcm_dec)
+ movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
+ mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+- mov %r13, %r12
+- and $(3<<4), %r12
++ mov %r13, %r15
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_decrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_decrypt
+ je _initial_num_blocks_is_2_decrypt
+ _initial_num_blocks_is_3_decrypt:
+@@ -1356,16 +1357,16 @@ _zero_cipher_left_decrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+
+ movdqa %xmm1, %xmm2
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm2
+@@ -1394,9 +1395,9 @@ _less_than_8_bytes_left_decrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_decrypt
+ _multiple_of_16_bytes_decrypt:
+- mov arg8, %r12 # %r13 = aadLen (number of bytes)
+- shl $3, %r12 # convert into number of bits
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r13 = aadLen (number of bytes)
++ shl $3, %r15 # convert into number of bits
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1435,8 +1436,10 @@ _return_T_done_decrypt:
+ mov %r14, %rsp
+ pop %r14
+ pop %r13
+- pop %r12
++ pop %r15
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_gcm_dec)
+
+
+ /*****************************************************************************
+@@ -1523,7 +1526,7 @@ _return_T_done_decrypt:
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+ ENTRY(aesni_gcm_enc)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1533,8 +1536,8 @@ ENTRY(aesni_gcm_enc)
+ #
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1558,13 +1561,13 @@ ENTRY(aesni_gcm_enc)
+ movdqa %xmm13, HashKey(%rsp)
+ mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
+ and $-16, %r13
+- mov %r13, %r12
++ mov %r13, %r15
+
+ # Encrypt first few blocks
+
+- and $(3<<4), %r12
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_encrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_encrypt
+ je _initial_num_blocks_is_2_encrypt
+ _initial_num_blocks_is_3_encrypt:
+@@ -1617,14 +1620,14 @@ _zero_cipher_left_encrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+ movdqa SHUF_MASK(%rip), %xmm10
+@@ -1657,9 +1660,9 @@ _less_than_8_bytes_left_encrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_encrypt
+ _multiple_of_16_bytes_encrypt:
+- mov arg8, %r12 # %r12 = addLen (number of bytes)
+- shl $3, %r12
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r15 = addLen (number of bytes)
++ shl $3, %r15
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1698,8 +1701,10 @@ _return_T_done_encrypt:
+ mov %r14, %rsp
+ pop %r14
+ pop %r13
+- pop %r12
++ pop %r15
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_gcm_enc)
+
+ #endif
+
+@@ -1714,6 +1719,7 @@ _key_expansion_256a:
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr
+ ret
+
+ .align 4
+@@ -1738,6 +1744,7 @@ _key_expansion_192a:
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 0x10(TKEYP)
+ add $0x20, TKEYP
++ pax_force_retaddr
+ ret
+
+ .align 4
+@@ -1757,6 +1764,7 @@ _key_expansion_192b:
+
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr
+ ret
+
+ .align 4
+@@ -1769,6 +1777,7 @@ _key_expansion_256b:
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
+ #ifndef __x86_64__
+ popl KEYP
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_set_key)
+
+ /*
+ * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_enc)
+
+ /*
+ * _aesni_enc1: internal ABI
+@@ -1959,6 +1972,7 @@ _aesni_enc1:
+ AESENC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESENCLAST KEY STATE
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -2067,6 +2081,7 @@ _aesni_enc4:
+ AESENCLAST KEY STATE2
+ AESENCLAST KEY STATE3
+ AESENCLAST KEY STATE4
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_dec)
+
+ /*
+ * _aesni_dec1: internal ABI
+@@ -2146,6 +2163,7 @@ _aesni_dec1:
+ AESDEC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESDECLAST KEY STATE
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -2254,6 +2272,7 @@ _aesni_dec4:
+ AESDECLAST KEY STATE2
+ AESDECLAST KEY STATE3
+ AESDECLAST KEY STATE4
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_ecb_enc)
+
+ /*
+ * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_ecb_dec)
+
+ /*
+ * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_cbc_enc)
+
+ /*
+ * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_cbc_dec)
+
+ #ifdef __x86_64__
+ .align 16
+@@ -2526,6 +2553,7 @@ _aesni_inc_init:
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -2554,6 +2582,7 @@ _aesni_inc:
+ .Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
+ .Lctr_enc_ret:
+ movups IV, (IVP)
+ .Lctr_enc_just_ret:
++ pax_force_retaddr
+ ret
++ENDPROC(aesni_ctr_enc)
+ #endif
+diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+index 391d245..c73d634 100644
+--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "blowfish-x86_64-asm.S"
+ .text
+
+@@ -151,9 +153,11 @@ __blowfish_enc_blk:
+ jnz __enc_xor;
+
+ write_block();
++ pax_force_retaddr
+ ret;
+ __enc_xor:
+ xor_block();
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -188,6 +192,7 @@ blowfish_dec_blk:
+
+ movq %r11, %rbp;
+
++ pax_force_retaddr
+ ret;
+
+ /**********************************************************************
+@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr
+ ret;
+
+ __enc_xor4:
+@@ -349,6 +355,7 @@ __enc_xor4:
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
+ popq %rbx;
+ popq %rbp;
+
++ pax_force_retaddr
+ ret;
+
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+index 6214a9b..5c0f959 100644
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -1,3 +1,5 @@
++#include <asm/alternative-asm.h>
++
+ # enter ECRYPT_encrypt_bytes
+ .text
+ .p2align 5
+@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ # bytesatleast65:
+ ._bytesatleast65:
+@@ -891,6 +894,7 @@ ECRYPT_keysetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ # enter ECRYPT_ivsetup
+ .text
+@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index b2c2f57..f30325b 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -28,6 +28,8 @@
+ * (at your option) any later version.
+ */
+
++#include <asm/alternative-asm.h>
++
+ #define CTX %rdi // arg1
+ #define BUF %rsi // arg2
+ #define CNT %rdx // arg3
+@@ -75,9 +77,9 @@
+ \name:
+ push %rbx
+ push %rbp
+- push %r12
++ push %r14
+
+- mov %rsp, %r12
++ mov %rsp, %r14
+ sub $64, %rsp # allocate workspace
+ and $~15, %rsp # align stack
+
+@@ -99,11 +101,12 @@
+ xor %rax, %rax
+ rep stosq
+
+- mov %r12, %rsp # deallocate workspace
++ mov %r14, %rsp # deallocate workspace
+
+- pop %r12
++ pop %r14
+ pop %rbp
+ pop %rbx
++ pax_force_retaddr
+ ret
+
+ .size \name, .-\name
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+index 5b012a2..9712c31 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -20,6 +20,8 @@
+ *
+ */
+
++#include <asm/alternative-asm.h>
++
+ .file "twofish-x86_64-asm-3way.S"
+ .text
+
+@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr
+ ret;
+
+ __enc_xor3:
+@@ -271,6 +274,7 @@ __enc_xor3:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr
+ ret;
+
+ .global twofish_dec_blk_3way
+@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr
+ ret;
+
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
+index 7bcf3fc..560ff4c 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+ .text
+
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define a_offset 0
+ #define b_offset 4
+@@ -268,6 +269,7 @@ twofish_enc_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr
+ ret
+
+ twofish_dec_blk:
+@@ -319,4 +321,5 @@ twofish_dec_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr
+ ret
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index fd84387..887aa7e 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
+ unsigned long dump_start, dump_size;
+ struct user32 dump;
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+@@ -315,6 +317,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ current->mm->cached_hole_size = 0;
+
++ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
++ if (retval < 0) {
++ /* Someone check-me: is this error path enough? */
++ send_sig(SIGKILL, current, 0);
++ return retval;
++ }
++
+ install_exec_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+
+@@ -410,13 +419,6 @@ beyond_if:
+
+ set_brk(current->mm->start_brk, current->mm->brk);
+
+- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+- if (retval < 0) {
+- /* Someone check-me: is this error path enough? */
+- send_sig(SIGKILL, current, 0);
+- return retval;
+- }
+-
+ current->mm->start_stack =
+ (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
+ /* start thread */
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 6557769..dfa8ead 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -73,6 +73,10 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ switch (from->si_code >> 16) {
+ case __SI_FAULT >> 16:
+ break;
++ case __SI_SYS >> 16:
++ put_user_ex(from->si_syscall, &to->si_syscall);
++ put_user_ex(from->si_arch, &to->si_arch);
++ break;
+ case __SI_CHLD >> 16:
+ put_user_ex(from->si_utime, &to->si_utime);
+ put_user_ex(from->si_stime, &to->si_stime);
+@@ -169,7 +173,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
+ }
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
++ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
+ set_fs(seg);
+ if (ret >= 0 && uoss_ptr) {
+ if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
+@@ -370,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
+ */
+ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size,
+- void **fpstate)
++ void __user **fpstate)
+ {
+ unsigned long sp;
+
+@@ -391,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+
+ if (used_math()) {
+ sp = sp - sig_xstate_ia32_size;
+- *fpstate = (struct _fpstate_ia32 *) sp;
++ *fpstate = (struct _fpstate_ia32 __user *) sp;
+ if (save_i387_xstate_ia32(*fpstate) < 0)
+ return (void __user *) -1L;
+ }
+@@ -399,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -457,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -499,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+@@ -529,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ else
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+- rt_sigreturn);
++ restorer = &frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 95b4eb3..87e6dc1 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -13,7 +13,9 @@
+ #include <asm/thread_info.h>
+ #include <asm/segment.h>
+ #include <asm/irqflags.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -61,12 +63,12 @@
+ */
+ .macro LOAD_ARGS32 offset, _r9=0
+ .if \_r9
+- movl \offset+16(%rsp),%r9d
++ movl \offset+R9(%rsp),%r9d
+ .endif
+- movl \offset+40(%rsp),%ecx
+- movl \offset+48(%rsp),%edx
+- movl \offset+56(%rsp),%esi
+- movl \offset+64(%rsp),%edi
++ movl \offset+RCX(%rsp),%ecx
++ movl \offset+RDX(%rsp),%edx
++ movl \offset+RSI(%rsp),%esi
++ movl \offset+RDI(%rsp),%edi
+ movl %eax,%eax /* zero extension */
+ .endm
+
+@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
+ ENDPROC(native_irq_enable_sysexit)
+ #endif
+
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
+ /*
+ * 32bit SYSENTER instruction entry.
+ *
+@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
+ CFI_REGISTER rsp,rbp
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(kernel_stack), %rsp
+- addq $(KERNEL_STACK_OFFSET),%rsp
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs, here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %ebp,%ebp /* zero extension */
+ pushq_cfi $__USER32_DS
+ /*CFI_REL_OFFSET ss,0*/
+@@ -134,25 +156,44 @@ ENTRY(ia32_sysenter_target)
+ CFI_REL_OFFSET rsp,0
+ pushfq_cfi
+ /*CFI_REL_OFFSET rflags,0*/
+- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
+- CFI_REGISTER rip,r10
++ orl $X86_EFLAGS_IF,(%rsp)
++ GET_THREAD_INFO(%r11)
++ movl TI_sysenter_return(%r11), %r11d
++ CFI_REGISTER rip,r11
+ pushq_cfi $__USER32_CS
+ /*CFI_REL_OFFSET cs,0*/
+ movl %eax, %eax
+- pushq_cfi %r10
++ pushq_cfi %r11
+ CFI_REL_OFFSET rip,0
+ pushq_cfi %rax
+ cld
+ SAVE_ARGS 0,1,0
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs, here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ /* no need to do an access_ok check here because rbp has been
+ 32bit zero extended */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov pax_user_shadow_base,%r11
++ add %r11,%rbp
++#endif
++
+ 1: movl (%rbp),%ebp
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz sysenter_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -162,16 +203,18 @@ sysenter_do_call:
+ sysenter_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
+- andl $~TS_COMPAT,TI_status(%r10)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ /* clear IF, that popfq doesn't enable interrupts early */
+- andl $~0x200,EFLAGS-R11(%rsp)
+- movl RIP-R11(%rsp),%edx /* User %eip */
++ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
++ movl RIP(%rsp),%edx /* User %eip */
+ CFI_REGISTER rip,rdx
+ RESTORE_ARGS 0,24,0,0,0,0
+ xorq %r8,%r8
+@@ -194,6 +237,9 @@ sysexit_from_sys_call:
+ movl %eax,%esi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -205,7 +251,7 @@ sysexit_from_sys_call:
+ .endm
+
+ .macro auditsys_exit exit
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+@@ -215,12 +261,12 @@ sysexit_from_sys_call:
+ movzbl %al,%edi /* zero-extend that into %edi */
+ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
+ call audit_syscall_exit
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl %edi,TI_flags(%r10)
++ testl %edi,TI_flags(%r11)
+ jz \exit
+ CLEAR_RREGS -ARGOFFSET
+ jmp int_with_check
+@@ -238,7 +284,7 @@ sysexit_audit:
+
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz sysenter_auditsys
+ #endif
+ SAVE_REST
+@@ -250,6 +296,9 @@ sysenter_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
++
++ pax_erase_kstack
++
+ jmp sysenter_do_call
+ CFI_ENDPROC
+ ENDPROC(ia32_sysenter_target)
+@@ -277,19 +326,25 @@ ENDPROC(ia32_sysenter_target)
+ ENTRY(ia32_cstar_target)
+ CFI_STARTPROC32 simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+ movl %esp,%r8d
+ CFI_REGISTER rsp,r8
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,0,0
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,0,0
+ movl %eax,%eax /* zero extension */
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+@@ -305,13 +360,19 @@ ENTRY(ia32_cstar_target)
+ /* no need to do an access_ok check here because r8 has been
+ 32bit zero extended */
+ /* hardware stack frame is complete now */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov pax_user_shadow_base,%r11
++ add %r11,%r8
++#endif
++
+ 1: movl (%r8),%r9d
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz cstar_tracesys
+ cmpq $IA32_NR_syscalls-1,%rax
+@@ -321,14 +382,16 @@ cstar_do_call:
+ cstar_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysretl_audit
+ sysretl_from_sys_call:
+- andl $~TS_COMPAT,TI_status(%r10)
+- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
++ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
+ movl RIP-ARGOFFSET(%rsp),%ecx
+ CFI_REGISTER rip,rcx
+ movl EFLAGS-ARGOFFSET(%rsp),%r11d
+@@ -355,7 +418,7 @@ sysretl_audit:
+
+ cstar_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz cstar_auditsys
+ #endif
+ xchgl %r9d,%ebp
+@@ -369,6 +432,9 @@ cstar_tracesys:
+ xchgl %ebp,%r9d
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
++
++ pax_erase_kstack
++
+ jmp cstar_do_call
+ END(ia32_cstar_target)
+
+@@ -409,20 +475,26 @@ ENTRY(ia32_syscall)
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs and here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%eax
+ pushq_cfi %rax
+ cld
+ /* note the registers are not zero extended to the sf.
+ this could be a problem. */
+ SAVE_ARGS 0,1,0
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ jnz ia32_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -445,6 +517,9 @@ ia32_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
++
++ pax_erase_kstack
++
+ jmp ia32_do_call
+ END(ia32_syscall)
+
+@@ -455,6 +530,7 @@ ia32_badsys:
+
+ quiet_ni_syscall:
+ movq $-ENOSYS,%rax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index f6f5c53..8e51d70 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+ */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+- typeof(ubuf->st_uid) uid = 0;
+- typeof(ubuf->st_gid) gid = 0;
++ typeof(((struct stat64 *)0)->st_uid) uid = 0;
++ typeof(((struct stat64 *)0)->st_gid) gid = 0;
+ SET_UID(uid, stat->uid);
+ SET_GID(gid, stat->gid);
+ if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ }
+ set_fs(KERNEL_DS);
+ ret = sys_rt_sigprocmask(how,
+- set ? (sigset_t __user *)&s : NULL,
+- oset ? (sigset_t __user *)&s : NULL,
++ set ? (sigset_t __force_user *)&s : NULL,
++ oset ? (sigset_t __force_user *)&s : NULL,
+ sigsetsize);
+ set_fs(old_fs);
+ if (ret)
+@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
+ return alarm_setitimer(seconds);
+ }
+
+-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
++asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
+ int options)
+ {
+ return compat_sys_wait4(pid, stat_addr, options, NULL);
+@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
+ set_fs(old_fs);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+@@ -363,13 +363,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+ compat_size_t sigsetsize)
+ {
+- sigset_t s;
++ sigset_t s = { };
+ compat_sigset_t s32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
++ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
+ set_fs(old_fs);
+ if (!ret) {
+ switch (_NSIG_WORDS) {
+@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
++ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
+ set_fs(old_fs);
+ return ret;
+ }
+@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
+ count);
+ set_fs(old_fs);
+
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index 091508b..2cc2c2d 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -4,10 +4,10 @@
+
+ #ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+-1: lock
++672: lock
+ .section .smp_locks,"a"
+ .balign 4
+- .long 1b - .
++ .long 672b - .
+ .previous
+ .endm
+ #else
+@@ -15,6 +15,45 @@
+ .endm
+ #endif
+
++#ifdef KERNEXEC_PLUGIN
++ .macro pax_force_retaddr_bts rip=0
++ btsq $63,\rip(%rsp)
++ .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ .macro pax_force_retaddr rip=0, reload=0
++ btsq $63,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ btsq $63,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .macro pax_force_retaddr rip=0, reload=0
++ .if \reload
++ pax_set_fptr_mask
++ .endif
++ orq %r12,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ orq %r12,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ movabs $0x8000000000000000,%r12
++ .endm
++#endif
++#else
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++ .macro pax_force_fptr ptr
++ .endm
++ .macro pax_force_retaddr_bts rip=0
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++
+ .macro altinstruction_entry orig alt feature orig_len alt_len
+ .long \orig - .
+ .long \alt - .
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 37ad100..7d47faa 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ ".section .discard,\"aw\",@progbits\n" \
+ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 1a6c09a..fec2432 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 20370c6..a2eb9b0 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index 58cb6d4..612b126 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -22,7 +22,18 @@
+ */
+ static inline int atomic_read(const atomic_t *v)
+ {
+- return (*(volatile int *)&(v)->counter);
++ return (*(volatile const int *)&(v)->counter);
++}
++
++/**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return (*(volatile const int *)&(v)->counter);
+ }
+
+ /**
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
++ : "+m" (v->counter), "=qm" (c)
++ : : "memory");
++ return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ unsigned char c;
++
++ asm volatile(LOCK_PREFIX "incl %0\n"
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ goto no_xadd;
+ #endif
+ /* Modern 486+ processor */
+- return i + xadd(&v->counter, i);
++ return i + xadd_check_overflow(&v->counter, i);
+
+ #ifdef CONFIG_M386
+ no_xadd: /* Legacy 386 processor */
+@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
+ }
+
+ /**
++ * atomic_add_return_unchecked - add integer and return
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++#ifdef CONFIG_M386
++ int __i;
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ return i + xadd(&v->counter, i);
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ raw_local_irq_save(flags);
++ __i = atomic_read_unchecked(v);
++ atomic_set_unchecked(v, i + __i);
++ raw_local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+@@ -204,9 +394,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+ {
+ return cmpxchg(&v->counter, old, new);
+ }
+@@ -216,6 +415,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ return xchg(&v->counter, new);
+ }
+
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ */
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ return c;
+ }
+
++/**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++ int val, c = hint, new;
++
++ /* sanity test, should be removed by compiler if hint is a constant */
++ if (!hint)
++ return __atomic_add_unless(v, 1, 0);
++
++ do {
++ asm volatile("incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c));
++
++ val = atomic_cmpxchg(v, c, new);
++ if (val == c)
++ return 1;
++ c = val;
++ } while (c);
++
++ return 0;
++}
+
+ /*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
+ #endif
+
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+- : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec() barrier()
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index 24098aa..aabcac7 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ #ifdef CONFIG_X86_CMPXCHG64
+@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
+ }
+
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++ return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
+@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ unsigned high = (unsigned)(i >> 32);
++ unsigned low = (unsigned)i;
++ asm volatile(ATOMIC64_ALTERNATIVE(set)
++ : "+b" (low), "+c" (high)
++ : "S" (v)
++ : "eax", "edx", "memory"
++ );
++}
++
++/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
++{
++ long long r;
++ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
++ : "=A" (r), "+c" (v)
++ : : "memory"
++ );
++ return r;
++ }
++
++/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+ return i;
+ }
+
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
++ : "+A" (i), "+c" (v)
++ : : "memory"
++ );
++ return i;
++}
++
+ /*
+ * Other variants with different arithmetic operators:
+ */
+@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
+ return a;
+ }
+
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ long long a;
++ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
++ : "=A" (a)
++ : "S" (v)
++ : "memory", "ecx"
++ );
++ return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+ long long a;
+@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
+ }
+
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
++ : "+A" (i), "+c" (v)
++ : : "memory"
++ );
++ return i;
++}
++
++/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index 0e1cbfc..a891fc7 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -18,7 +18,19 @@
+ */
+ static inline long atomic64_read(const atomic64_t *v)
+ {
+- return (*(volatile long *)&(v)->counter);
++ return (*(volatile const long *)&(v)->counter);
++}
++
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return (*(volatile const long *)&(v)->counter);
+ }
+
+ /**
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ */
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
++ return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
+ return i + xadd(&v->counter, i);
+ }
+
+@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+ return xchg(&v->counter, new);
+@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 1775d6e..f84af0c 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -38,7 +38,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+@@ -344,7 +344,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+-static inline unsigned long __ffs(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
+ {
+ asm("bsf %1,%0"
+ : "=r" (word)
+@@ -358,7 +358,7 @@ static inline unsigned long __ffs(unsigned long word)
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+-static inline unsigned long ffz(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
+ {
+ asm("bsf %1,%0"
+ : "=r" (word)
+@@ -372,7 +372,7 @@ static inline unsigned long ffz(unsigned long word)
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+-static inline unsigned long __fls(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+ asm("bsr %1,%0"
+ : "=r" (word)
+@@ -419,7 +419,7 @@ static inline int ffs(int x)
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+-static inline int fls(int x)
++static inline int __intentional_overflow(-1) fls(int x)
+ {
+ int r;
+ #ifdef CONFIG_X86_CMOV
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 5e1a2ee..c9f9533 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
+index 48f99f1..d78ebf9 100644
+--- a/arch/x86/include/asm/cache.h
++++ b/arch/x86/include/asm/cache.h
+@@ -5,12 +5,13 @@
+
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
+index 4e12668..501d239 100644
+--- a/arch/x86/include/asm/cacheflush.h
++++ b/arch/x86/include/asm/cacheflush.h
+@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
+- return -1;
++ return ~0UL;
+ else if (pg_flags == _PGMT_WC)
+ return _PAGE_CACHE_WC;
+ else if (pg_flags == _PGMT_UC_MINUS)
+diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
+index a9e3a74..44966f3 100644
+--- a/arch/x86/include/asm/calling.h
++++ b/arch/x86/include/asm/calling.h
+@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
+ #define RSP (152)
+ #define SS (160)
+
+-#define ARGOFFSET R11
+-#define SWFRAME ORIG_RAX
++#define ARGOFFSET R15
+
+ .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
+- subq $9*8+\addskip, %rsp
+- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
+- movq_cfi rdi, 8*8
+- movq_cfi rsi, 7*8
+- movq_cfi rdx, 6*8
++ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
++ movq_cfi rdi, RDI
++ movq_cfi rsi, RSI
++ movq_cfi rdx, RDX
+
+ .if \save_rcx
+- movq_cfi rcx, 5*8
++ movq_cfi rcx, RCX
+ .endif
+
+- movq_cfi rax, 4*8
++ movq_cfi rax, RAX
+
+ .if \save_r891011
+- movq_cfi r8, 3*8
+- movq_cfi r9, 2*8
+- movq_cfi r10, 1*8
+- movq_cfi r11, 0*8
++ movq_cfi r8, R8
++ movq_cfi r9, R9
++ movq_cfi r10, R10
++ movq_cfi r11, R11
+ .endif
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
+ .endm
+
+-#define ARG_SKIP (9*8)
++#define ARG_SKIP ORIG_RAX
+
+ .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
+ rstor_r8910=1, rstor_rdx=1
++
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12, r12
++#endif
++
+ .if \rstor_r11
+- movq_cfi_restore 0*8, r11
++ movq_cfi_restore R11, r11
+ .endif
+
+ .if \rstor_r8910
+- movq_cfi_restore 1*8, r10
+- movq_cfi_restore 2*8, r9
+- movq_cfi_restore 3*8, r8
++ movq_cfi_restore R10, r10
++ movq_cfi_restore R9, r9
++ movq_cfi_restore R8, r8
+ .endif
+
+ .if \rstor_rax
+- movq_cfi_restore 4*8, rax
++ movq_cfi_restore RAX, rax
+ .endif
+
+ .if \rstor_rcx
+- movq_cfi_restore 5*8, rcx
++ movq_cfi_restore RCX, rcx
+ .endif
+
+ .if \rstor_rdx
+- movq_cfi_restore 6*8, rdx
++ movq_cfi_restore RDX, rdx
+ .endif
+
+- movq_cfi_restore 7*8, rsi
+- movq_cfi_restore 8*8, rdi
++ movq_cfi_restore RSI, rsi
++ movq_cfi_restore RDI, rdi
+
+- .if ARG_SKIP+\addskip > 0
+- addq $ARG_SKIP+\addskip, %rsp
+- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
++ .if ORIG_RAX+\addskip > 0
++ addq $ORIG_RAX+\addskip, %rsp
++ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
+ .endif
+ .endm
+
+- .macro LOAD_ARGS offset, skiprax=0
+- movq \offset(%rsp), %r11
+- movq \offset+8(%rsp), %r10
+- movq \offset+16(%rsp), %r9
+- movq \offset+24(%rsp), %r8
+- movq \offset+40(%rsp), %rcx
+- movq \offset+48(%rsp), %rdx
+- movq \offset+56(%rsp), %rsi
+- movq \offset+64(%rsp), %rdi
++ .macro LOAD_ARGS skiprax=0
++ movq R11(%rsp), %r11
++ movq R10(%rsp), %r10
++ movq R9(%rsp), %r9
++ movq R8(%rsp), %r8
++ movq RCX(%rsp), %rcx
++ movq RDX(%rsp), %rdx
++ movq RSI(%rsp), %rsi
++ movq RDI(%rsp), %rdi
+ .if \skiprax
+ .else
+- movq \offset+72(%rsp), %rax
++ movq RAX(%rsp), %rax
+ .endif
+ .endm
+
+-#define REST_SKIP (6*8)
+-
+ .macro SAVE_REST
+- subq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET REST_SKIP
+- movq_cfi rbx, 5*8
+- movq_cfi rbp, 4*8
+- movq_cfi r12, 3*8
+- movq_cfi r13, 2*8
+- movq_cfi r14, 1*8
+- movq_cfi r15, 0*8
++ movq_cfi rbx, RBX
++ movq_cfi rbp, RBP
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
++ movq_cfi r13, R13
++ movq_cfi r14, R14
++ movq_cfi r15, R15
+ .endm
+
+ .macro RESTORE_REST
+- movq_cfi_restore 0*8, r15
+- movq_cfi_restore 1*8, r14
+- movq_cfi_restore 2*8, r13
+- movq_cfi_restore 3*8, r12
+- movq_cfi_restore 4*8, rbp
+- movq_cfi_restore 5*8, rbx
+- addq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
++ movq_cfi_restore R15, r15
++ movq_cfi_restore R14, r14
++ movq_cfi_restore R13, r13
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12, r12
++#endif
++
++ movq_cfi_restore RBP, rbp
++ movq_cfi_restore RBX, rbx
+ .endm
+
+ .macro SAVE_ALL
+diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
+index 46fc474..b02b0f9 100644
+--- a/arch/x86/include/asm/checksum_32.h
++++ b/arch/x86/include/asm/checksum_32.h
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
+index 5d3acdf..6447a02 100644
+--- a/arch/x86/include/asm/cmpxchg.h
++++ b/arch/x86/include/asm/cmpxchg.h
+@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+ extern void __xadd_wrong_size(void)
+ __compiletime_error("Bad argument size for xadd");
++extern void __xadd_check_overflow_wrong_size(void)
++ __compiletime_error("Bad argument size for xadd_check_overflow");
+
+ /*
+ * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
+ __ret; \
+ })
+
++#define __xadd_check_overflow(ptr, inc, lock) \
++ ({ \
++ __typeof__ (*(ptr)) __ret = (inc); \
++ switch (sizeof(*(ptr))) { \
++ case __X86_CASE_L: \
++ asm volatile (lock "xaddl %0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ case __X86_CASE_Q: \
++ asm volatile (lock "xaddq %q0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ default: \
++ __xadd_check_overflow_wrong_size(); \
++ } \
++ __ret; \
++ })
++
+ /*
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
+ * value of "*ptr".
+@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
+ #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
+ #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+
++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
++
+ #endif /* ASM_X86_CMPXCHG_H */
+diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
+index 30d737e..9830a9b 100644
+--- a/arch/x86/include/asm/compat.h
++++ b/arch/x86/include/asm/compat.h
+@@ -194,7 +194,7 @@ typedef struct user_regs_struct32 compat_elf_gregset_t;
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+-typedef u32 compat_uptr_t;
++typedef u32 __user compat_uptr_t;
+
+ static inline void __user *compat_ptr(compat_uptr_t uptr)
+ {
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index a315f1c..540df6a 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -197,8 +197,9 @@
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
++#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
+ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
++#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
+
+ #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+
+@@ -363,7 +364,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index 41935fa..2be7ac3 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+
+ #include <linux/smp.h>
+
+@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+ {
+- gate->a = (seg << 16) | (base & 0xffff);
+- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++ gate->gate.offset_low = base;
++ gate->gate.seg = seg;
++ gate->gate.reserved = 0;
++ gate->gate.type = type;
++ gate->gate.s = 0;
++ gate->gate.dpl = dpl;
++ gate->gate.p = 1;
++ gate->gate.offset_high = base >> 16;
+ }
+
+ #endif
+@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+
+ static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void
+@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
+ default: size = sizeof(*gdt); break;
+ }
+
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -284,7 +297,7 @@ static inline void load_LDT(mm_context_t *pc)
+ preempt_enable();
+ }
+
+-static inline unsigned long get_desc_base(const struct desc_struct *desc)
++static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
+ {
+ return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+ }
+@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+ desc->limit = (limit >> 16) & 0xf;
+ }
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
+index 278441f..b95a174 100644
+--- a/arch/x86/include/asm/desc_defs.h
++++ b/arch/x86/include/asm/desc_defs.h
+@@ -31,6 +31,12 @@ struct desc_struct {
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
++ struct {
++ u16 offset_low;
++ u16 seg;
++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++ unsigned offset_high: 16;
++ } gate;
+ };
+ } __attribute__((packed));
+
+diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
+index 9a2d644..5abb141 100644
+--- a/arch/x86/include/asm/div64.h
++++ b/arch/x86/include/asm/div64.h
+@@ -33,7 +33,7 @@
+ __mod; \
+ })
+
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+ union {
+ u64 v64;
+diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
+index 908b969..a1f4eb4 100644
+--- a/arch/x86/include/asm/e820.h
++++ b/arch/x86/include/asm/e820.h
+@@ -69,7 +69,7 @@ struct e820map {
+ #define ISA_START_ADDRESS 0xa0000
+ #define ISA_END_ADDRESS 0x100000
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #define BIOS_ROM_BASE 0xffe00000
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 5f962df..7289f09 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -238,7 +238,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -291,9 +309,7 @@ do { \
+
+ #define ARCH_DLINFO \
+ do { \
+- if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -304,7 +320,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ /*
+ * True on X86_32 or when emulating IA32 on X86_64
+ */
+diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
+index cc70c1c..d96d011 100644
+--- a/arch/x86/include/asm/emergency-restart.h
++++ b/arch/x86/include/asm/emergency-restart.h
+@@ -15,6 +15,6 @@ enum reboot_type {
+
+ extern enum reboot_type reboot_type;
+
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
+index d09bb03..0a3629b 100644
+--- a/arch/x86/include/asm/futex.h
++++ b/arch/x86/include/asm/futex.h
+@@ -12,20 +12,22 @@
+ #include <asm/system.h>
+
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
++ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
+ "\tjnz\t1b\n" \
+ "3:\t.section .fixup,\"ax\"\n" \
+ "4:\tmov\t%5, %1\n" \
+@@ -34,7 +36,7 @@
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+- "+m" (*uaddr), "=&r" (tem) \
++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+
+ switch (op) {
+ case FUTEX_OP_SET:
+- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
++ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index eb92a6e..b98b2f4 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
+ extern void enable_IO_APIC(void);
+
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+
+ /* EISA */
+ extern void eisa_set_level_irq(unsigned int irq);
+diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
+index a850b4d..1d8dfb7 100644
+--- a/arch/x86/include/asm/i387.h
++++ b/arch/x86/include/asm/i387.h
+@@ -88,10 +88,12 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
+ }
+
+ #ifdef CONFIG_X86_64
+-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
++static inline int fxrstor_checking(struct i387_fxsave_struct __user *fx)
+ {
+ int err;
+
++ fx = (struct i387_fxsave_struct __user *)____m(fx);
++
+ /* See comment in fxsave() below. */
+ #ifdef CONFIG_AS_FXSAVEQ
+ asm volatile("1: fxrstorq %[fx]\n\t"
+@@ -121,6 +123,8 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+ {
+ int err;
+
++ fx = (struct i387_fxsave_struct __user *)____m(fx);
++
+ /*
+ * Clear the bytes not touched by the fxsave and reserved
+ * for the SW usage.
+@@ -189,15 +193,15 @@ static inline void fpu_fxsave(struct fpu *fpu)
+ #else /* CONFIG_X86_32 */
+
+ /* perform fxrstor iff the processor has extended states, otherwise frstor */
+-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
++static inline int fxrstor_checking(struct i387_fxsave_struct __user *fx)
+ {
+ /*
+ * The "nop" is needed to make the instructions the same
+ * length.
+ */
+ alternative_input(
+- "nop ; frstor %1",
+- "fxrstor %1",
++ __copyuser_seg" frstor %1; nop",
++ __copyuser_seg" fxrstor %1",
+ X86_FEATURE_FXSR,
+ "m" (*fx));
+
+@@ -256,7 +260,14 @@ static inline int __save_init_fpu(struct task_struct *tsk)
+
+ static inline int fpu_fxrstor_checking(struct fpu *fpu)
+ {
+- return fxrstor_checking(&fpu->state->fxsave);
++ int ret;
++ mm_segment_t fs;
++
++ fs = get_fs();
++ set_fs(KERNEL_DS);
++ ret = fxrstor_checking(&fpu->state->fxsave);
++ set_fs(fs);
++ return ret;
+ }
+
+ static inline int fpu_restore_checking(struct fpu *fpu)
+@@ -424,7 +435,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
+ static inline bool interrupted_user_mode(void)
+ {
+ struct pt_regs *regs = get_irq_regs();
+- return regs && user_mode_vm(regs);
++ return regs && user_mode(regs);
+ }
+
+ /*
+diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
+index a203659..9889f1c 100644
+--- a/arch/x86/include/asm/i8259.h
++++ b/arch/x86/include/asm/i8259.h
+@@ -62,7 +62,7 @@ struct legacy_pic {
+ void (*init)(int auto_eoi);
+ int (*irq_pending)(unsigned int irq);
+ void (*make_irq)(unsigned int irq);
+-};
++} __do_const;
+
+ extern struct legacy_pic *legacy_pic;
+ extern struct legacy_pic null_legacy_pic;
+diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
+index 1f7e625..541485f 100644
+--- a/arch/x86/include/asm/ia32.h
++++ b/arch/x86/include/asm/ia32.h
+@@ -126,6 +126,12 @@ typedef struct compat_siginfo {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
++
++ struct {
++ unsigned int _call_addr; /* calling insn */
++ int _syscall; /* triggering system call number */
++ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
++ } _sigsys;
+ } _sifields;
+ } compat_siginfo_t;
+
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index d8e8eef..1765f78 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
+ "m" (*(volatile type __force *)addr) barrier); }
+
+ build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
+-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
+-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
++build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
++build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
+
+ build_mmio_read(__readb, "b", unsigned char, "=q", )
+-build_mmio_read(__readw, "w", unsigned short, "=r", )
+-build_mmio_read(__readl, "l", unsigned int, "=r", )
++build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
++build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
+
+ build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
+ build_mmio_write(writew, "w", unsigned short, "r", :"memory")
+@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+ return ioremap_nocache(offset, size);
+ }
+
+-extern void iounmap(volatile void __iomem *addr);
++extern void iounmap(const volatile void __iomem *addr);
+
+ extern void set_iounmap_nonlazy(void);
+
+@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index bba3cf8..06bc8da 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
+ sti; \
+ sysexit
+
++#define GET_CR0_INTO_RDI mov %cr0, %rdi
++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
++#define GET_CR3_INTO_RDI mov %cr3, %rdi
++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
+index 5478825..839e88c 100644
+--- a/arch/x86/include/asm/kprobes.h
++++ b/arch/x86/include/asm/kprobes.h
+@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
+ #define RELATIVEJUMP_SIZE 5
+ #define RELATIVECALL_OPCODE 0xe8
+ #define RELATIVE_ADDR_SIZE 4
+-#define MAX_STACK_SIZE 64
+-#define MIN_STACK_SIZE(ADDR) \
+- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR))) \
+- ? (MAX_STACK_SIZE) \
+- : (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR)))
++#define MAX_STACK_SIZE 64UL
++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
+
+ #define flush_insn_slot(p) do { } while (0)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index cfb5a40..fc8880d 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -459,7 +459,7 @@ struct kvm_arch {
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_max_mmu_pages;
+ unsigned int indirect_shadow_pages;
+- atomic_t invlpg_counter;
++ atomic_unchecked_t invlpg_counter;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ /*
+ * Hash table of struct kvm_mmu_page.
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 9cdae5d..3534f04 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -11,33 +11,97 @@ typedef struct {
+ atomic_long_t a;
+ } local_t;
+
++typedef struct {
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter));
++}
++
++static inline void local_inc_unchecked(local_unchecked_t *l)
++{
++ asm volatile(_ASM_INC "%0\n"
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter));
++}
++
++static inline void local_dec_unchecked(local_unchecked_t *l)
++{
++ asm volatile(_ASM_DEC "%0\n"
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter)
++ : "ir" (i));
++}
++
++static inline void local_add_unchecked(long i, local_unchecked_t *l)
++{
++ asm volatile(_ASM_ADD "%1,%0\n"
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter)
++ : "ir" (i));
++}
++
++static inline void local_sub_unchecked(long i, local_unchecked_t *l)
++{
++ asm volatile(_ASM_SUB "%1,%0\n"
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +119,16 @@ static inline int local_sub_and_test(long i, local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +146,16 @@ static inline int local_dec_and_test(local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +173,16 @@ static inline int local_inc_and_test(local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +201,16 @@ static inline int local_add_negative(long i, local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +233,15 @@ static inline long local_add_return(long i, local_t *l)
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_MOV "%0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+@@ -148,6 +256,38 @@ no_xadd: /* Legacy 386 processor */
+ #endif
+ }
+
++/**
++ * local_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @l: pointer to type local_unchecked_t
++ *
++ * Atomically adds @i to @l and returns @i + @l
++ */
++static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
++{
++ long __i;
++#ifdef CONFIG_M386
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ __i = i;
++ asm volatile(_ASM_XADD "%0, %1\n"
++ : "+r" (i), "+m" (l->a.counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = local_read_unchecked(l);
++ local_set_unchecked(l, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
+ static inline long local_sub_return(long i, local_t *l)
+ {
+ return local_add_return(-i, l);
+@@ -158,6 +298,8 @@ static inline long local_sub_return(long i, local_t *l)
+
+ #define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ /* Always has a lock prefix */
+ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+
+diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
+index 593e51d..fa69c9a 100644
+--- a/arch/x86/include/asm/mman.h
++++ b/arch/x86/include/asm/mman.h
+@@ -5,4 +5,14 @@
+
+ #include <asm-generic/mman.h>
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 5f55e69..e20bfb1 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,7 +9,7 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+
+ #ifdef CONFIG_X86_64
+@@ -18,7 +18,19 @@ typedef struct {
+ #endif
+
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 6902152..da4283a 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ unsigned int i;
++ pgd_t *pgd;
++
++ pax_open_kernel();
++ pgd = get_cpu_pgd(smp_processor_id());
++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++ set_pgd_batched(pgd+i, native_make_pgd(0));
++ pax_close_kernel();
++#endif
++
+ #ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#else
+ load_cr3(next->pgd);
++#endif
+
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+- }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++ }
+ else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#endif
++
++#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(next->pgd);
++#endif
++
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX))
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
++#endif
+ }
+-#endif
+ }
+
+ #define activate_mm(prev, next) \
+diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+index 9eae775..c914fea 100644
+--- a/arch/x86/include/asm/module.h
++++ b/arch/x86/include/asm/module.h
+@@ -5,6 +5,7 @@
+
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M386
+ #define MODULE_PROC_FAMILY "386 "
+ #elif defined CONFIG_M486
+@@ -59,8 +60,20 @@
+ #error unknown processor family
+ #endif
+
+-#ifdef CONFIG_X86_32
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
++#else
++#define MODULE_PAX_KERNEXEC ""
+ #endif
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++
+ #endif /* _ASM_X86_MODULE_H */
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 7639dbf..e08a58c 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
+
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+
+ extern unsigned long __phys_addr(unsigned long);
+ #define __phys_reloc_hide(x) (x)
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 91e758b..cac1cd6 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -601,7 +601,7 @@ static inline pmd_t __pmd(pmdval_t val)
+ return (pmd_t) { ret };
+ }
+
+-static inline pmdval_t pmd_val(pmd_t pmd)
++static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
+ {
+ pmdval_t ret;
+
+@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+ val);
+ }
+
++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
++ pgdval_t val = native_pgd_val(pgd);
++
++ if (sizeof(pgdval_t) > sizeof(long))
++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
++ val, (u64)val >> 32);
++ else
++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
++ val);
++}
++
+ static inline void pgd_clear(pgd_t *pgdp)
+ {
+ set_pgd(pgdp, __pgd(0));
+@@ -751,6 +763,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+ static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+@@ -967,7 +994,7 @@ extern void default_banner(void);
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1044,6 +1071,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0 \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3 \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index faf2c04..5724dcd 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -84,7 +84,7 @@ struct pv_init_ops {
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+-};
++} __no_const;
+
+
+ struct pv_lazy_ops {
+@@ -98,7 +98,7 @@ struct pv_time_ops {
+ unsigned long long (*sched_clock)(void);
+ unsigned long long (*steal_clock)(int cpu);
+ unsigned long (*get_tsc_khz)(void);
+-};
++} __no_const;
+
+ struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+@@ -194,7 +194,7 @@ struct pv_cpu_ops {
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+-};
++} __no_const;
+
+ struct pv_irq_ops {
+ /*
+@@ -225,7 +225,7 @@ struct pv_apic_ops {
+ unsigned long start_eip,
+ unsigned long start_esp);
+ #endif
+-};
++} __no_const;
+
+ struct pv_mmu_ops {
+ unsigned long (*read_cr2)(void);
+@@ -314,6 +314,7 @@ struct pv_mmu_ops {
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
+ #endif /* PAGETABLE_LEVELS == 4 */
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -325,6 +326,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct arch_spinlock;
+@@ -335,7 +342,7 @@ struct pv_lock_ops {
+ void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct arch_spinlock *lock);
+ void (*spin_unlock)(struct arch_spinlock *lock);
+-};
++} __no_const;
+
+ /* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
+index b4389a4..7024269 100644
+--- a/arch/x86/include/asm/pgalloc.h
++++ b/arch/x86/include/asm/pgalloc.h
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+
+ #ifdef CONFIG_X86_PAE
+ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
++{
++ pud_populate(mm, pudp, pmd);
++}
+ #else /* !CONFIG_X86_PAE */
+ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
++ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
++}
+ #endif /* CONFIG_X86_PAE */
+
+ #if PAGETABLE_LEVELS > 3
+@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
++ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
+index 98391db..8f6984e 100644
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index cb00ccc..17e9054 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 6be9909..ee359f2 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
+ #ifndef __PAGETABLE_PUD_FOLDED
+ #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
+ #define pgd_clear(pgd) native_pgd_clear(pgd)
+ #endif
+
+@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(cr0 & X86_CR0_WP);
++ write_cr0(cr0);
++ return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(!(cr0 & X86_CR0_WP));
++ write_cr0(cr0);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pgd_pfn(pgd_t pgd)
++{
++ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+ #endif
+
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
++{
++ return cpu_pgd[cpu];
++}
++#endif
++
+ #include <linux/mm_types.h>
+
+ static inline int pte_none(pte_t pte)
+@@ -515,7 +589,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
++#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
+
+ /* Find an entry in the second-level page table.. */
+ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+@@ -555,7 +629,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
++#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
+
+ /* to find an entry in a page-table-directory. */
+ static inline unsigned long pud_index(unsigned long address)
+@@ -570,7 +644,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -593,7 +667,12 @@ static inline int pgd_none(pgd_t pgd)
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
++#endif
++
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+@@ -604,6 +683,22 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#ifdef __ASSEMBLY__
++#define pax_user_shadow_base pax_user_shadow_base(%rip)
++#else
++extern unsigned long pax_user_shadow_base;
++#endif
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+
+ extern int direct_gbpages;
+@@ -768,11 +863,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ while (count--)
++ *dst++ = *src++;
++ pax_close_kernel();
+ }
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
++#endif
+
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index 0c92113..34a77c6 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -25,9 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t initial_page_table[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t initial_page_table[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -74,6 +79,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
+index ed5903b..c7fe163 100644
+--- a/arch/x86/include/asm/pgtable_32_types.h
++++ b/arch/x86/include/asm/pgtable_32_types.h
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index 975f709..9f779c9 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -16,10 +16,14 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_start_pgt[512];
++extern pud_t level3_vmalloc_end_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ *pudp = pud;
++ pax_close_kernel();
+ }
+
+ static inline void native_pud_clear(pud_t *pud)
+@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
++ *pgdp = pgd;
++ pax_close_kernel();
++}
++
++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
+ *pgdp = pgd;
+ }
+
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 766ea16..5b96cb3 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 013286a..8b42f4f 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -16,13 +16,12 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
++#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -40,7 +39,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -57,8 +55,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -96,6 +96,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -106,7 +109,7 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
+ #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+@@ -168,8 +171,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+ {
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
+
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+
+@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+ return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ return native_pgd_val(pud.pgd);
+@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ return native_pgd_val(pmd.pud.pgd);
+@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
+index 2dddb31..100c638 100644
+--- a/arch/x86/include/asm/processor-flags.h
++++ b/arch/x86/include/asm/processor-flags.h
+@@ -62,6 +62,7 @@
+ #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
+ #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
+ #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
++#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
+
+ /*
+ * x86-64 Task Priority Register, CR8
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index f7c89e2..9962bae 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -266,7 +266,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -859,11 +859,18 @@ static inline void spin_lock_prefetch(const void *x)
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+@@ -877,7 +884,7 @@ static inline void spin_lock_prefetch(const void *x)
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -888,11 +895,7 @@ static inline void spin_lock_prefetch(const void *x)
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -907,7 +910,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -917,13 +920,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ /*
+ * User space process size. 47bits minus one guard page.
+ */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -934,11 +937,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define STACK_TOP_MAX TASK_SIZE_MAX
+
+ #define INIT_THREAD { \
+- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ #define INIT_TSS { \
+- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ /*
+@@ -960,6 +963,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+@@ -972,7 +979,8 @@ extern int set_tsc_mode(unsigned int val);
+ extern int amd_get_nb_id(int cpu);
+
+ struct aperfmperf {
+- u64 aperf, mperf;
++ u64 aperf __intentional_overflow(-1);
++ u64 mperf __intentional_overflow(-1);
+ };
+
+ static inline void get_aperfmperf(struct aperfmperf *am)
+diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
+index 7b0a55a..ad115bf 100644
+--- a/arch/x86/include/asm/ptrace-abi.h
++++ b/arch/x86/include/asm/ptrace-abi.h
+@@ -49,7 +49,6 @@
+ #define EFLAGS 144
+ #define RSP 152
+ #define SS 160
+-#define ARGOFFSET R11
+ #endif /* __ASSEMBLY__ */
+
+ /* top of stack page */
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 3b96fd4..8790004 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
+ #ifdef CONFIG_X86_64
+ static inline bool user_64bit_mode(struct pt_regs *regs)
+ {
++ unsigned long cs = regs->cs & 0xffff;
+ #ifndef CONFIG_PARAVIRT
+ /*
+ * On non-paravirt systems, this is the only long mode CPL 3
+ * selector. We do not allow long mode selectors in the LDT.
+ */
+- return regs->cs == __USER_CS;
++ return cs == __USER_CS;
+ #else
+ /* Headers are too twisted for this to go in paravirt.h. */
+- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
++ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
+ #endif
+ }
+ #endif
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 92f29706..d0a1a53 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -6,19 +6,19 @@
+ struct pt_regs;
+
+ struct machine_ops {
+- void (*restart)(char *cmd);
+- void (*halt)(void);
+- void (*power_off)(void);
++ void (* __noreturn restart)(char *cmd);
++ void (* __noreturn halt)(void);
++ void (* __noreturn power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+- void (*emergency_restart)(void);
+-};
++ void (* __noreturn emergency_restart)(void);
++} __no_const;
+
+ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(unsigned int type);
++void __noreturn machine_real_restart(unsigned int type);
+ /* These must match dispatch_table in reboot_32.S */
+ #define MRR_BIOS 0
+ #define MRR_APM 1
+diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
+index 2dbe4a7..ce1db00 100644
+--- a/arch/x86/include/asm/rwsem.h
++++ b/arch/x86/include/asm/rwsem.h
+@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_DEC "(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0x00000001 */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ "1:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %3,%2\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+ long tmp;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0xffff0001, returns the old value */
+ " test %1,%1\n\t"
+ /* was the count 0 before? */
+@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
+ long tmp;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
+ long tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 0xffff0001, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ */
+ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (sem->count)
+ : "er" (delta));
+ }
+@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+ */
+ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+ {
+- return delta + xadd(&sem->count, delta);
++ return delta + xadd_check_overflow(&sem->count, delta);
+ }
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index 5e64171..f58957e 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -64,10 +64,15 @@
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ]
+- * 29 - unused
+- * 30 - unused
++ * 29 - PCI BIOS CS
++ * 30 - PCI BIOS DS
+ * 31 - TSS for double fault handler
+ */
++#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
++#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
++
+ #define GDT_ENTRY_TLS_MIN 6
+ #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+@@ -79,6 +84,8 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
++
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
+@@ -104,6 +111,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -141,7 +154,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+@@ -165,6 +178,8 @@
+ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
+ #define __USER32_DS __USER_DS
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8 /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -185,6 +200,7 @@
+ #endif
+
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 73b11bc..d4a3b63 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+ /* cpus sharing the last level cache: */
+ DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
+ DECLARE_PER_CPU(u16, cpu_llc_id);
+-DECLARE_PER_CPU(int, cpu_number);
++DECLARE_PER_CPU(unsigned int, cpu_number);
+
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+@@ -77,7 +77,7 @@ struct smp_ops {
+
+ void (*send_call_func_ipi)(const struct cpumask *mask);
+ void (*send_call_func_single_ipi)(int cpu);
+-};
++} __no_const;
+
+ /* Globals due to paravirt */
+ extern void set_cpu_sibling_map(int cpu);
+@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
+ extern int safe_smp_processor_id(void);
+
+ #elif defined(CONFIG_X86_64_SMP)
+-#define raw_smp_processor_id() (percpu_read(cpu_number))
+-
+-#define stack_smp_processor_id() \
+-({ \
+- struct thread_info *ti; \
+- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+- ti->cpu; \
+-})
++#define raw_smp_processor_id() (percpu_read(cpu_number))
++#define stack_smp_processor_id() raw_smp_processor_id()
+ #define safe_smp_processor_id() smp_processor_id()
+
+ #endif
+diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
+index 972c260..43ab1fd 100644
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
+ static inline void arch_read_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
+ static inline void arch_write_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+
+ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
++ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void arch_write_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
++ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 1575177..cb23f52 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -48,7 +48,7 @@
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+ #define GDT_STACK_CANARY_INIT \
+- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+
+ /*
+ * Initialize the stackprotector canary value.
+@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
+
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
+index 70bbe39..4ae2bd4 100644
+--- a/arch/x86/include/asm/stacktrace.h
++++ b/arch/x86/include/asm/stacktrace.h
+@@ -11,28 +11,20 @@
+
+ extern int kstack_depth_to_print;
+
+-struct thread_info;
++struct task_struct;
+ struct stacktrace_ops;
+
+-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+- unsigned long *stack,
+- unsigned long bp,
+- const struct stacktrace_ops *ops,
+- void *data,
+- unsigned long *end,
+- int *graph);
++typedef unsigned long walk_stack_t(struct task_struct *task,
++ void *stack_start,
++ unsigned long *stack,
++ unsigned long bp,
++ const struct stacktrace_ops *ops,
++ void *data,
++ unsigned long *end,
++ int *graph);
+
+-extern unsigned long
+-print_context_stack(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
+-
+-extern unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
+- unsigned long *stack, unsigned long bp,
+- const struct stacktrace_ops *ops, void *data,
+- unsigned long *end, int *graph);
++extern walk_stack_t print_context_stack;
++extern walk_stack_t print_context_stack_bp;
+
+ /* Generic stack tracer with callbacks */
+
+@@ -40,7 +32,7 @@ struct stacktrace_ops {
+ void (*address)(void *data, unsigned long address, int reliable);
+ /* On negative return stop dumping */
+ int (*stack)(void *data, char *name);
+- walk_stack_t walk_stack;
++ walk_stack_t *walk_stack;
+ };
+
+ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
+index cb23852..2dde194 100644
+--- a/arch/x86/include/asm/sys_ia32.h
++++ b/arch/x86/include/asm/sys_ia32.h
+@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
+ compat_sigset_t __user *, unsigned int);
+ asmlinkage long sys32_alarm(unsigned int);
+
+-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
++asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+ asmlinkage long sys32_sysfs(int, u32, u32);
+
+ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
+diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
+index c4a348f..e2ad7ea 100644
+--- a/arch/x86/include/asm/syscall.h
++++ b/arch/x86/include/asm/syscall.h
+@@ -13,6 +13,7 @@
+ #ifndef _ASM_X86_SYSCALL_H
+ #define _ASM_X86_SYSCALL_H
+
++#include <linux/audit.h>
+ #include <linux/sched.h>
+ #include <linux/err.h>
+
+@@ -86,6 +87,12 @@ static inline void syscall_set_arguments(struct task_struct *task,
+ memcpy(&regs->bx + i, args, n * sizeof(args[0]));
+ }
+
++static inline int syscall_get_arch(struct task_struct *task,
++ struct pt_regs *regs)
++{
++ return AUDIT_ARCH_I386;
++}
++
+ #else /* CONFIG_X86_64 */
+
+ static inline void syscall_get_arguments(struct task_struct *task,
+@@ -210,6 +217,22 @@ static inline void syscall_set_arguments(struct task_struct *task,
+ }
+ }
+
++static inline int syscall_get_arch(struct task_struct *task,
++ struct pt_regs *regs)
++{
++#ifdef CONFIG_IA32_EMULATION
++ /*
++ * TS_COMPAT is set for 32-bit syscall entries and then
++ * remains set until we return to user mode.
++ *
++ * TIF_IA32 tasks should always have TS_COMPAT set at
++ * system call time.
++ */
++ if (task_thread_info(task)->status & TS_COMPAT)
++ return AUDIT_ARCH_I386;
++#endif
++ return AUDIT_ARCH_X86_64;
++}
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* _ASM_X86_SYSCALL_H */
+diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
+index d75adff..c0cc78b 100644
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -125,7 +125,7 @@ do { \
+ "call __switch_to\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+- "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+@@ -136,7 +136,7 @@ do { \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+- [thread_info] "i" (offsetof(struct task_struct, stack)), \
++ [thread_info] "m" (current_tinfo), \
+ [current_task] "m" (current_task) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+@@ -196,7 +196,7 @@ static inline unsigned long get_limit(unsigned long segment)
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -390,13 +390,13 @@ static inline void clflush(volatile void *__p)
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+ bool set_pm_idle_to_default(void);
+
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+
+ /*
+ * Force strict CPU ordering.
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index d7ef849..b1b009a 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
++#include <asm/percpu.h>
+
+ /*
+ * low level task data that entry.S needs immediate access to
+@@ -24,7 +25,6 @@ struct exec_domain;
+ #include <linux/atomic.h>
+
+ struct thread_info {
+- struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+@@ -34,18 +34,12 @@ struct thread_info {
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+-#ifdef CONFIG_X86_32
+- unsigned long previous_esp; /* ESP of the previous stack in
+- case of nested (IRQ) stacks
+- */
+- __u8 supervisor_stack[0];
+-#endif
++ unsigned long lowest_stack;
+ int uaccess_err;
+ };
+
+-#define INIT_THREAD_INFO(tsk) \
++#define INIT_THREAD_INFO \
+ { \
+- .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+@@ -56,7 +50,7 @@ struct thread_info {
+ }, \
+ }
+
+-#define init_thread_info (init_thread_union.thread_info)
++#define init_thread_info (init_thread_union.stack)
+ #define init_stack (init_thread_union.stack)
+
+ #else /* !__ASSEMBLY__ */
+@@ -95,6 +89,7 @@ struct thread_info {
+ #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
+ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
+ #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
+
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+@@ -117,16 +112,17 @@ struct thread_info {
+ #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
+ #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_enter() */
+ #define _TIF_WORK_SYSCALL_ENTRY \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
+- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
+- _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK \
+@@ -136,7 +132,8 @@ struct thread_info {
+
+ /* work to do on any return to user space */
+ #define _TIF_ALLWORK_MASK \
+- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
++ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
++ _TIF_GRSEC_SETXID)
+
+ /* Only used for 64 bit */
+ #define _TIF_DO_NOTIFY_MASK \
+@@ -170,45 +167,40 @@ struct thread_info {
+ ret; \
+ })
+
+-#ifdef CONFIG_X86_32
+-
+-#define STACK_WARN (THREAD_SIZE/8)
+-/*
+- * macros/functions for gaining access to the thread information structure
+- *
+- * preempt_count needs to be 1 initially, until the scheduler is functional.
+- */
+-#ifndef __ASSEMBLY__
+-
+-
+-/* how to get the current stack pointer from C */
+-register unsigned long current_stack_pointer asm("esp") __used;
+-
+-/* how to get the thread information struct from C */
+-static inline struct thread_info *current_thread_info(void)
+-{
+- return (struct thread_info *)
+- (current_stack_pointer & ~(THREAD_SIZE - 1));
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
++#ifdef __ASSEMBLY__
+ /* how to get the thread information struct from ASM */
+ #define GET_THREAD_INFO(reg) \
+- movl $-THREAD_SIZE, reg; \
+- andl %esp, reg
++ mov PER_CPU_VAR(current_tinfo), reg
+
+ /* use this one if reg already contains %esp */
+-#define GET_THREAD_INFO_WITH_ESP(reg) \
+- andl $-THREAD_SIZE, reg
++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
++#else
++/* how to get the thread information struct from C */
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
++static __always_inline struct thread_info *current_thread_info(void)
++{
++ return percpu_read_stable(current_tinfo);
++}
++#endif
++
++#ifdef CONFIG_X86_32
++
++#define STACK_WARN (THREAD_SIZE/8)
++/*
++ * macros/functions for gaining access to the thread information structure
++ *
++ * preempt_count needs to be 1 initially, until the scheduler is functional.
++ */
++#ifndef __ASSEMBLY__
++
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("esp") __used;
+
+ #endif
+
+ #else /* X86_32 */
+
+-#include <asm/percpu.h>
+-#define KERNEL_STACK_OFFSET (5*8)
+-
+ /*
+ * macros/functions for gaining access to the thread information structure
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+@@ -216,21 +208,8 @@ static inline struct thread_info *current_thread_info(void)
+ #ifndef __ASSEMBLY__
+ DECLARE_PER_CPU(unsigned long, kernel_stack);
+
+-static inline struct thread_info *current_thread_info(void)
+-{
+- struct thread_info *ti;
+- ti = (void *)(percpu_read_stable(kernel_stack) +
+- KERNEL_STACK_OFFSET - THREAD_SIZE);
+- return ti;
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movq PER_CPU_VAR(kernel_stack),reg ; \
+- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+-
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("rsp") __used;
+ #endif
+
+ #endif /* !X86_32 */
+@@ -264,5 +243,16 @@ extern void arch_task_cache_init(void);
+ extern void free_thread_info(struct thread_info *ti);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ #define arch_task_cache_init arch_task_cache_init
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task) (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
++extern struct task_struct *alloc_task_struct_node(int node);
++extern void free_task_struct(struct task_struct *);
++
+ #endif
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 36361bf..4252f11 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -7,6 +7,7 @@
+ #include <linux/compiler.h>
+ #include <linux/thread_info.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+@@ -28,7 +29,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -76,7 +82,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -182,12 +214,20 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+@@ -199,8 +239,8 @@ extern int __get_user_bad(void);
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+@@ -252,7 +292,7 @@ extern void __put_user_8(void);
+ __typeof__(*(ptr)) __pu_val; \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+- __pu_val = x; \
++ __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_x(1, __pu_val, ptr, __ret_pu); \
+@@ -373,7 +413,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -381,7 +421,7 @@ do { \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
++ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __get_user_size_ex(x, ptr, size) \
+@@ -406,7 +446,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+@@ -423,13 +463,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < pax_user_shadow_base) \
++ ____x += pax_user_shadow_base; \
++ (typeof(x))____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -437,7 +488,7 @@ struct __large_struct { unsigned long buf[100]; };
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -445,10 +496,10 @@ struct __large_struct { unsigned long buf[100]; };
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+@@ -487,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
+ * On error, the variable @x is set to zero.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr) get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+
+ /**
+ * __put_user: - Write a simple value into user space, with less checking.
+@@ -510,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
+ * Returns zero on success, or -EFAULT on error.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr) put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -529,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
+index 566e803..9540707 100644
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ check_object_size(from, n, true);
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
+ return __copy_to_user_inatomic(to, from, n);
+ }
+
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -137,6 +146,12 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
++ check_object_size(to, n, false);
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -181,15 +200,19 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
++ if ((long)n < 0)
++ return n;
++
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
+ }
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check _copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
+-
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
+
+ extern void copy_from_user_overflow(void)
+ #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
+ #endif
+ ;
+
+-static inline unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n)
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(from);
+
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+- else
++ if (unlikely(sz != (size_t)-1 && sz < n))
++ copy_to_user_overflow();
++ else if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ size_t sz = __compiletime_object_size(to);
++
++ check_object_size(to, n, false);
++
++ if (unlikely(sz != (size_t)-1 && sz < n))
+ copy_from_user_overflow();
+-
++ else if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0)
++ memset(to, 0, n);
+ return n;
+ }
+
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index 1c66d30..9d90f2c 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -10,6 +10,9 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeature.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+ /*
+ * Copy To/From Userspace
+@@ -17,12 +20,12 @@
+
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+-copy_user_generic_string(void *to, const void *from, unsigned len);
++copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
+ __must_check unsigned long
+-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
++copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
+
+ static __always_inline __must_check unsigned long
+-copy_user_generic(void *to, const void *from, unsigned len)
++copy_user_generic(void *to, const void *from, unsigned long len)
+ {
+ unsigned ret;
+
+@@ -36,138 +39,200 @@ copy_user_generic(void *to, const void *from, unsigned len)
+ return ret;
+ }
+
++static __always_inline __must_check unsigned long
++__copy_to_user(void __user *to, const void *from, unsigned long len);
++static __always_inline __must_check unsigned long
++__copy_from_user(void *to, const void __user *from, unsigned long len);
+ __must_check unsigned long
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len);
++
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
++
++extern void copy_from_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_from_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_from_user() buffer size is not provably correct")
++#endif
++;
+
+ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
+-
+ might_fault();
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+-#ifdef CONFIG_DEBUG_VM
+- else
+- WARN(1, "Buffer overflow detected!\n");
+-#endif
++
++ check_object_size(to, n, false);
++
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if (n < INT_MAX)
++ memset(to, 0, n);
+ return n;
+ }
+
+ static __always_inline __must_check
+-int copy_to_user(void __user *dst, const void *src, unsigned size)
++int copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+ might_fault();
+
+- return _copy_to_user(dst, src, size);
++ if (access_ok(VERIFY_WRITE, dst, size))
++ size = __copy_to_user(dst, src, size);
++ return size;
+ }
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ size_t sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
++
++ if (size > INT_MAX)
++ return size;
++
++ check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
++ if (unlikely(sz != (size_t)-1 && sz < size)) {
++ copy_from_user_overflow();
++ return size;
++ }
++
+ if (!__builtin_constant_p(size))
+- return copy_user_generic(dst, (__force void *)src, size);
++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+ switch (size) {
+- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ return ret;
+- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+- (u16 __user *)(8 + (char __user *)src),
++ (const u16 __user *)(8 + (const char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+- (u64 __user *)(8 + (char __user *)src),
++ (const u64 __user *)(8 + (const char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
+- return copy_user_generic(dst, (__force void *)src, size);
++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+- int ret = 0;
++ size_t sz = __compiletime_object_size(src);
++ unsigned ret = 0;
+
+ might_fault();
++
++ if (size > INT_MAX)
++ return size;
++
++ check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (unlikely(sz != (size_t)-1 && sz < size)) {
++ copy_to_user_overflow();
++ return size;
++ }
++
+ if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst, src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ switch (size) {
+- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
+- return copy_user_generic((__force void *)dst, src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
+ if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst),
++ (__force_kernel const void *)____m(src), size);
+ switch (size) {
+ case 1: {
+ u8 tmp;
+- __get_user_asm(tmp, (u8 __user *)src,
++ __get_user_asm(tmp, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+@@ -176,7 +241,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ }
+ case 2: {
+ u16 tmp;
+- __get_user_asm(tmp, (u16 __user *)src,
++ __get_user_asm(tmp, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+@@ -186,7 +251,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+
+ case 4: {
+ u32 tmp;
+- __get_user_asm(tmp, (u32 __user *)src,
++ __get_user_asm(tmp, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ }
+ case 8: {
+ u64 tmp;
+- __get_user_asm(tmp, (u64 __user *)src,
++ __get_user_asm(tmp, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+@@ -203,8 +268,8 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ return ret;
+ }
+ default:
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst),
++ (__force_kernel const void *)____m(src), size);
+ }
+ }
+
+@@ -218,36 +283,57 @@ __must_check long strlen_user(const char __user *str);
+ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
+ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+
+-static __must_check __always_inline int
+-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
+ {
+- return copy_user_generic(dst, (__force const void *)src, size);
++ if (size > INT_MAX)
++ return size;
++
++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+ }
+
+-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++static __must_check __always_inline unsigned long
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
+ {
+- return copy_user_generic((__force void *)dst, src, size);
++ if (size > INT_MAX)
++ return size;
++
++ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
+- unsigned size, int zerorest);
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
++ unsigned long size, int zerorest);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
+ {
+ might_sleep();
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+- unsigned size)
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++ unsigned long size)
+ {
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++extern unsigned long
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
+index bb05228..d763d5b 100644
+--- a/arch/x86/include/asm/vdso.h
++++ b/arch/x86/include/asm/vdso.h
+@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
+ #define VDSO32_SYMBOL(base, name) \
+ ({ \
+ extern const char VDSO32_##name[]; \
+- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
++ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
+ })
+ #endif
+
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 1971e65..1e07354 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -139,7 +139,7 @@ struct x86_init_ops {
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
+ struct x86_init_pci pci;
+-};
++} __no_const;
+
+ /**
+ * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+@@ -147,7 +147,7 @@ struct x86_init_ops {
+ */
+ struct x86_cpuinit_ops {
+ void (*setup_percpu_clockev)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_platform_ops - platform specific runtime functions
+@@ -169,7 +169,7 @@ struct x86_platform_ops {
+ void (*nmi_init)(void);
+ unsigned char (*get_nmi_reason)(void);
+ int (*i8042_detect)(void);
+-};
++} __no_const;
+
+ struct pci_dev;
+
+@@ -177,7 +177,7 @@ struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*teardown_msi_irq)(unsigned int irq);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+-};
++} __no_const;
+
+ extern struct x86_init_ops x86_init;
+ extern struct x86_cpuinit_ops x86_cpuinit;
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index c34f96c..e26f052 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -54,7 +54,7 @@ extern int m2p_remove_override(struct page *page, bool clear_pte);
+ extern struct page *m2p_find_override(unsigned long mfn);
+ extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
+
+-static inline unsigned long pfn_to_mfn(unsigned long pfn)
++static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
+ {
+ unsigned long mfn;
+
+diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
+index c6ce245..aab6adb 100644
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -65,6 +65,8 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+ {
+ int err;
+
++ buf = (struct xsave_struct __user *)____m(buf);
++
+ /*
+ * Clear the xsave header first, so that reserved fields are
+ * initialized to zero.
+@@ -74,7 +76,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+ if (unlikely(err))
+ return -EFAULT;
+
+- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
++ __asm__ __volatile__("1:"
++ __copyuser_seg
++ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+@@ -96,11 +100,13 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+ {
+ int err;
+- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)____m(buf));
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
+- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
++ __asm__ __volatile__("1:"
++ __copyuser_seg
++ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 479d03c..2450277 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1345,7 +1345,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
+ * If your system is blacklisted here, but you find that acpi=force
+ * works for you, please contact linux-acpi@vger.kernel.org
+ */
+-static struct dmi_system_id __initdata acpi_dmi_table[] = {
++static const struct dmi_system_id __initconst acpi_dmi_table[] = {
+ /*
+ * Boxes that need ACPI disabled
+ */
+@@ -1420,7 +1420,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+ };
+
+ /* second table for DMI checks that should run after early-quirks */
+-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
++static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
+ /*
+ * HP laptops which use a DSDT reporting as HP/SB400/10000,
+ * which includes some code which overrides all temperature
+diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
+index 6a564ac..3f3a3d7 100644
+--- a/arch/x86/kernel/acpi/realmode/Makefile
++++ b/arch/x86/kernel/acpi/realmode/Makefile
+@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
+index b4fd836..4358fe3 100644
+--- a/arch/x86/kernel/acpi/realmode/wakeup.S
++++ b/arch/x86/kernel/acpi/realmode/wakeup.S
+@@ -108,6 +108,9 @@ wakeup_code:
+ /* Do any other stuff... */
+
+ #ifndef CONFIG_64BIT
++ /* Recheck NX bit overrides (64bit path does this in trampoline */
++ call verify_cpu
++
+ /* This could also be done in C code... */
+ movl pmode_cr3, %eax
+ movl %eax, %cr3
+@@ -131,6 +134,7 @@ wakeup_code:
+ movl pmode_cr0, %eax
+ movl %eax, %cr0
+ jmp pmode_return
++# include "../../verify_cpu.S"
+ #else
+ pushw $0
+ pushw trampoline_segment
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index 103b6ab..2004d0a 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
+ header->trampoline_segment = trampoline_address() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index 13ab720..95d5442 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index bda833c..a9bdd97 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ */
+ for (a = start; a < end; a++) {
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
++ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+ BUG_ON(a->replacementlen > a->instrlen);
+ BUG_ON(a->instrlen > sizeof(insnbuf));
+@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn DS segment override prefix into lock prefix */
+- if (*ptr == 0x3e)
++ if (*ktla_ktva(ptr) == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
++ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
++#endif
++
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn lock prefix into DS segment override prefix */
+- if (*ptr == 0xf0)
++ if (*ktla_ktva(ptr) == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
+ if (smp_alt_once)
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_locks,
+- (unsigned long)__smp_locks_end);
++ PAGE_ALIGN((unsigned long)__smp_locks_end));
+
+ restart_nmi();
+ }
+@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-void *__init_or_module text_poke_early(void *addr, const void *opcode,
++void *__kprobes text_poke_early(void *addr, const void *opcode,
+ size_t len)
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
+ sync_core();
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
+@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
+ return addr;
+ }
+
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index c4e3581..7e2f9d0 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
+ /*
+ * Debug level, exported for io_apic.c
+ */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+
+ int pic_mode;
+
+@@ -1857,7 +1857,7 @@ void smp_error_interrupt(struct pt_regs *regs)
+ apic_write(APIC_ESR, 0);
+ v1 = apic_read(APIC_ESR);
+ ack_APIC_irq();
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
+ smp_processor_id(), v0 , v1);
+diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
+index f7a41e4..be25d88 100644
+--- a/arch/x86/kernel/apic/apic_flat_64.c
++++ b/arch/x86/kernel/apic/apic_flat_64.c
+@@ -171,7 +171,7 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
+ return initial_apic_id >> index_msb;
+ }
+
+-static struct apic apic_flat = {
++static struct apic apic_flat __read_only = {
+ .name = "flat",
+ .probe = NULL,
+ .acpi_madt_oem_check = flat_acpi_madt_oem_check,
+@@ -327,7 +327,7 @@ static int physflat_probe(void)
+ return 0;
+ }
+
+-static struct apic apic_physflat = {
++static struct apic apic_physflat __read_only = {
+
+ .name = "physical flat",
+ .probe = physflat_probe,
+diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
+index 775b82b..841f78b 100644
+--- a/arch/x86/kernel/apic/apic_noop.c
++++ b/arch/x86/kernel/apic/apic_noop.c
+@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
+ WARN_ON_ONCE(cpu_has_apic && !disable_apic);
+ }
+
+-struct apic apic_noop = {
++struct apic apic_noop __read_only = {
+ .name = "noop",
+ .probe = noop_probe,
+ .acpi_madt_oem_check = NULL,
+diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
+index 521bead..a724871 100644
+--- a/arch/x86/kernel/apic/bigsmp_32.c
++++ b/arch/x86/kernel/apic/bigsmp_32.c
+@@ -193,7 +193,7 @@ static int probe_bigsmp(void)
+ return dmi_bigsmp;
+ }
+
+-static struct apic apic_bigsmp = {
++static struct apic apic_bigsmp __read_only = {
+
+ .name = "bigsmp",
+ .probe = probe_bigsmp,
+diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
+index 5d513bc..6a51935 100644
+--- a/arch/x86/kernel/apic/es7000_32.c
++++ b/arch/x86/kernel/apic/es7000_32.c
+@@ -619,8 +619,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
+ return ret && es7000_apic_is_cluster();
+ }
+
+-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
+-static struct apic __refdata apic_es7000_cluster = {
++static struct apic apic_es7000_cluster __read_only = {
+
+ .name = "es7000",
+ .probe = probe_es7000,
+@@ -685,7 +684,7 @@ static struct apic __refdata apic_es7000_cluster = {
+ .x86_32_early_logical_apicid = es7000_early_logical_apicid,
+ };
+
+-static struct apic __refdata apic_es7000 = {
++static struct apic apic_es7000 __read_only = {
+
+ .name = "es7000",
+ .probe = probe_es7000,
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 6d939d7..75d1260 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
+ raw_spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ raw_spin_unlock(&vector_lock);
+ }
+@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
+ ack_APIC_irq();
+ }
+
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+
+ static void ack_apic_level(struct irq_data *data)
+ {
+@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
+ * at the cpu.
+ */
+ if (!(v & (1 << (i & 0x1f)))) {
+- atomic_inc(&irq_mis_count);
++ atomic_inc_unchecked(&irq_mis_count);
+
+ eoi_ioapic_irq(irq, cfg);
+ }
+@@ -2634,13 +2634,16 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
+
+ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+ {
+- chip->irq_print_chip = ir_print_prefix;
+- chip->irq_ack = ir_ack_apic_edge;
+- chip->irq_eoi = ir_ack_apic_level;
++ pax_open_kernel();
++ *(void **)&chip->irq_print_chip = ir_print_prefix;
++ *(void **)&chip->irq_ack = ir_ack_apic_edge;
++ *(void **)&chip->irq_eoi = ir_ack_apic_level;
+
+ #ifdef CONFIG_SMP
+- chip->irq_set_affinity = ir_ioapic_set_affinity;
++ *(void **)&chip->irq_set_affinity = ir_ioapic_set_affinity;
+ #endif
++
++ pax_close_kernel();
+ }
+ #endif /* CONFIG_IRQ_REMAP */
+
+diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
+index c4a61ca..4c63d32 100644
+--- a/arch/x86/kernel/apic/numaq_32.c
++++ b/arch/x86/kernel/apic/numaq_32.c
+@@ -472,8 +472,7 @@ static void numaq_setup_portio_remap(void)
+ (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
+ }
+
+-/* Use __refdata to keep false positive warning calm. */
+-static struct apic __refdata apic_numaq = {
++static struct apic apic_numaq __read_only = {
+
+ .name = "NUMAQ",
+ .probe = probe_numaq,
+diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
+index 0787bb3..e222a80 100644
+--- a/arch/x86/kernel/apic/probe_32.c
++++ b/arch/x86/kernel/apic/probe_32.c
+@@ -87,7 +87,7 @@ static int probe_default(void)
+ return 1;
+ }
+
+-static struct apic apic_default = {
++static struct apic apic_default __read_only = {
+
+ .name = "default",
+ .probe = probe_default,
+diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
+index 1911442..2424a83 100644
+--- a/arch/x86/kernel/apic/summit_32.c
++++ b/arch/x86/kernel/apic/summit_32.c
+@@ -491,7 +491,7 @@ void setup_summit(void)
+ }
+ #endif
+
+-static struct apic apic_summit = {
++static struct apic apic_summit __read_only = {
+
+ .name = "summit",
+ .probe = probe_summit,
+diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
+index 5007958..2eba140 100644
+--- a/arch/x86/kernel/apic/x2apic_cluster.c
++++ b/arch/x86/kernel/apic/x2apic_cluster.c
+@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block __refdata x2apic_cpu_notifier = {
++static struct notifier_block x2apic_cpu_notifier = {
+ .notifier_call = update_clusterinfo,
+ };
+
+@@ -208,7 +208,7 @@ static int x2apic_cluster_probe(void)
+ return 0;
+ }
+
+-static struct apic apic_x2apic_cluster = {
++static struct apic apic_x2apic_cluster __read_only = {
+
+ .name = "cluster x2apic",
+ .probe = x2apic_cluster_probe,
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index db4f704..2d4f409 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -121,7 +121,7 @@ static int x2apic_phys_probe(void)
+ return apic == &apic_x2apic_phys;
+ }
+
+-static struct apic apic_x2apic_phys = {
++static struct apic apic_x2apic_phys __read_only = {
+
+ .name = "physical x2apic",
+ .probe = x2apic_phys_probe,
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 79b05b8..bc1d972 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -346,7 +346,7 @@ static int uv_probe(void)
+ return apic == &apic_x2apic_uv_x;
+ }
+
+-static struct apic __refdata apic_x2apic_uv_x = {
++static struct apic apic_x2apic_uv_x __read_only = {
+
+ .name = "UV large system",
+ .probe = uv_probe,
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index a46bd38..6b906d7 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 4f13faf..87db5d2 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -33,6 +33,8 @@ void common(void) {
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TI_preempt_count, thread_info, preempt_count);
++ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -53,8 +55,26 @@ void common(void) {
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+ #endif
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++#ifdef CONFIG_X86_64
++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
++#endif
++#endif
++
++#endif
++
++ BLANK();
++ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
++
+ #ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
+index e72a119..6e2955d 100644
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -69,6 +69,7 @@ int main(void)
+ BLANK();
+ #undef ENTRY
+
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ BLANK();
+
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index 25f24dc..4094a7f 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o scattered.o topology.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index f07becc..b17b101 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -694,7 +694,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
+ unsigned int size)
+ {
+ /* AMD errata T13 (order #21922) */
+- if ((c->x86 == 6)) {
++ if (c->x86 == 6) {
+ /* Duron Rev A0 */
+ if (c->x86_model == 3 && c->x86_mask == 0)
+ size = 64;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ca93cc7..def63d0 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg)
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
+
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+ struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+
+@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+ EXPORT_PER_CPU_SYMBOL(current_task);
+
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
+ DEFINE_PER_CPU(char *, irq_stack_ptr) =
+@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->fs = __KERNEL_PERCPU;
+- regs->gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs->gs);
+
+ return regs;
+ }
+@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ oist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
+ switch_to_new_gdt(cpu);
+ loadsegment(fs, 0);
+
+- load_idt((const struct desc_ptr *)&idt_descr);
++ load_idt(&idt_descr);
+
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
+@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- x86_configure_nx();
+ if (cpu != 0)
+ enable_x2apic();
+
+@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 3e6ff6c..54b4992 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index 0e89635..f0a7525 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -984,6 +984,22 @@ static struct attribute *default_attrs[] = {
+ };
+
+ #ifdef CONFIG_AMD_NB
++static struct attribute *default_attrs_amd_nb[] = {
++ &type.attr,
++ &level.attr,
++ &coherency_line_size.attr,
++ &physical_line_partition.attr,
++ &ways_of_associativity.attr,
++ &number_of_sets.attr,
++ &size.attr,
++ &shared_cpu_map.attr,
++ &shared_cpu_list.attr,
++ NULL,
++ NULL,
++ NULL,
++ NULL
++};
++
+ static struct attribute ** __cpuinit amd_l3_attrs(void)
+ {
+ static struct attribute **attrs;
+@@ -994,18 +1010,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
+
+ n = sizeof (default_attrs) / sizeof (struct attribute *);
+
+- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+- n += 2;
+-
+- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+- n += 1;
+-
+- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
+- if (attrs == NULL)
+- return attrs = default_attrs;
+-
+- for (n = 0; default_attrs[n]; n++)
+- attrs[n] = default_attrs[n];
++ attrs = default_attrs_amd_nb;
+
+ if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
+ attrs[n++] = &cache_disable_0.attr;
+@@ -1056,6 +1061,13 @@ static struct kobj_type ktype_cache = {
+ .default_attrs = default_attrs,
+ };
+
++#ifdef CONFIG_AMD_NB
++static struct kobj_type ktype_cache_amd_nb = {
++ .sysfs_ops = &sysfs_ops,
++ .default_attrs = default_attrs_amd_nb,
++};
++#endif
++
+ static struct kobj_type ktype_percpu_entry = {
+ .sysfs_ops = &sysfs_ops,
+ };
+@@ -1121,20 +1133,26 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ return retval;
+ }
+
++#ifdef CONFIG_AMD_NB
++ amd_l3_attrs();
++#endif
++
+ for (i = 0; i < num_cache_leaves; i++) {
++ struct kobj_type *ktype;
++
+ this_object = INDEX_KOBJECT_PTR(cpu, i);
+ this_object->cpu = cpu;
+ this_object->index = i;
+
+ this_leaf = CPUID4_INFO_IDX(cpu, i);
+
+- ktype_cache.default_attrs = default_attrs;
++ ktype = &ktype_cache;
+ #ifdef CONFIG_AMD_NB
+ if (this_leaf->base.nb)
+- ktype_cache.default_attrs = amd_l3_attrs();
++ ktype = &ktype_cache_amd_nb;
+ #endif
+ retval = kobject_init_and_add(&(this_object->kobj),
+- &ktype_cache,
++ ktype,
+ per_cpu(ici_cache_kobject, cpu),
+ "index%1lu", i);
+ if (unlikely(retval)) {
+@@ -1189,7 +1207,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
++static struct notifier_block cacheinfo_cpu_notifier = {
+ .notifier_call = cacheinfo_cpu_callback,
+ };
+
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 3b67877..77e760c 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -42,6 +42,7 @@
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+
+ #include "mce-internal.h"
+
+@@ -200,7 +201,7 @@ static void print_mce(struct mce *m)
+ !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+ m->cs, m->ip);
+
+- if (m->cs == __KERNEL_CS)
++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+ print_symbol("{%s}", m->ip);
+ pr_cont("\n");
+ }
+@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
+
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+
+-static atomic_t mce_paniced;
++static atomic_unchecked_t mce_paniced;
+
+ static int fake_panic;
+-static atomic_t mce_fake_paniced;
++static atomic_unchecked_t mce_fake_paniced;
+
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ /*
+ * Make sure only one CPU runs in machine check panic
+ */
+- if (atomic_inc_return(&mce_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
+ wait_for_panic();
+ barrier();
+
+@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ console_verbose();
+ } else {
+ /* Don't log too much for fake panic */
+- if (atomic_inc_return(&mce_fake_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
+ return;
+ }
+ /* First print corrected ones that are still unlogged */
+@@ -307,7 +308,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ if (!fake_panic) {
+ if (panic_timeout == 0)
+ panic_timeout = mce_panic_timeout;
+- panic(msg);
++ panic("%s", msg);
+ } else
+ pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
+ }
+@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
+ * might have been modified by someone else.
+ */
+ rmb();
+- if (atomic_read(&mce_paniced))
++ if (atomic_read_unchecked(&mce_paniced))
+ wait_for_panic();
+ if (!monarch_timeout)
+ goto out;
+@@ -1404,7 +1405,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+ }
+
+ /* Call the installed machine check handler for this CPU setup. */
+-void (*machine_check_vector)(struct pt_regs *, long error_code) =
++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
+ unexpected_machine_check;
+
+ /*
+@@ -1427,7 +1428,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+ return;
+ }
+
++ pax_open_kernel();
+ machine_check_vector = do_machine_check;
++ pax_close_kernel();
+
+ __mcheck_cpu_init_generic();
+ __mcheck_cpu_init_vendor(c);
+@@ -1441,7 +1444,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
+ */
+
+ static DEFINE_SPINLOCK(mce_chrdev_state_lock);
+-static int mce_chrdev_open_count; /* #times opened */
++static local_t mce_chrdev_open_count; /* #times opened */
+ static int mce_chrdev_open_exclu; /* already open exclusive? */
+
+ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1449,7 +1452,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+ spin_lock(&mce_chrdev_state_lock);
+
+ if (mce_chrdev_open_exclu ||
+- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
++ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_chrdev_state_lock);
+
+ return -EBUSY;
+@@ -1457,7 +1460,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+
+ if (file->f_flags & O_EXCL)
+ mce_chrdev_open_exclu = 1;
+- mce_chrdev_open_count++;
++ local_inc(&mce_chrdev_open_count);
+
+ spin_unlock(&mce_chrdev_state_lock);
+
+@@ -1468,7 +1471,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_chrdev_state_lock);
+
+- mce_chrdev_open_count--;
++ local_dec(&mce_chrdev_open_count);
+ mce_chrdev_open_exclu = 0;
+
+ spin_unlock(&mce_chrdev_state_lock);
+@@ -2099,7 +2102,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
++static struct notifier_block mce_cpu_notifier = {
+ .notifier_call = mce_cpu_callback,
+ };
+
+@@ -2109,7 +2112,7 @@ static __init void mce_init_banks(void)
+
+ for (i = 0; i < banks; i++) {
+ struct mce_bank *b = &mce_banks[i];
+- struct sysdev_attribute *a = &b->attr;
++ sysdev_attribute_no_const *a = &b->attr;
+
+ sysfs_attr_init(&a->attr);
+ a->attr.name = b->attrname;
+@@ -2177,7 +2180,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+ cpu_missing = 0;
+- atomic_set(&mce_fake_paniced, 0);
++ atomic_set_unchecked(&mce_fake_paniced, 0);
+ atomic_set(&mce_executing, 0);
+ atomic_set(&mce_callin, 0);
+ atomic_set(&global_nwo, 0);
+diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
+index 5c0e653..0882b0a 100644
+--- a/arch/x86/kernel/cpu/mcheck/p5.c
++++ b/arch/x86/kernel/cpu/mcheck/p5.c
+@@ -12,6 +12,7 @@
+ #include <asm/system.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+
+ /* By default disabled */
+ int mce_p5_enabled __read_mostly;
+@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+ if (!cpu_has(c, X86_FEATURE_MCE))
+ return;
+
++ pax_open_kernel();
+ machine_check_vector = pentium_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index ce04b58..b84acbd 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -290,7 +290,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
++static struct notifier_block thermal_throttle_cpu_notifier =
+ {
+ .notifier_call = thermal_throttle_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
+index 54060f5..c1a7577 100644
+--- a/arch/x86/kernel/cpu/mcheck/winchip.c
++++ b/arch/x86/kernel/cpu/mcheck/winchip.c
+@@ -11,6 +11,7 @@
+ #include <asm/system.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+
+ /* Machine check handler for WinChip C6: */
+ static void winchip_machine_check(struct pt_regs *regs, long error_code)
+@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
+ {
+ u32 lo, hi;
+
++ pax_open_kernel();
+ machine_check_vector = winchip_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 6b96110..0da73eb 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+ const struct mtrr_ops *mtrr_if;
+
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index df5e41f..816c719 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -25,7 +25,7 @@ struct mtrr_ops {
+ int (*validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+ int (*have_wrcomb)(void);
+-};
++} __do_const;
+
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+ int replace_reg);
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 2bda212..78cc605 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ break;
+
+ perf_callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (const void __force_user *)frame.next_frame;
+ }
+ }
+
+diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
+index 212a6a4..322f5d9 100644
+--- a/arch/x86/kernel/cpuid.c
++++ b/arch/x86/kernel/cpuid.c
+@@ -172,7 +172,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block __refdata cpuid_class_cpu_notifier =
++static struct notifier_block cpuid_class_cpu_notifier =
+ {
+ .notifier_call = cpuid_class_cpu_callback,
+ };
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 69e231b..8b4e1c6 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ struct pt_regs fixed_regs;
+-#endif
+
+-#ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
+index afa64ad..dce67dd 100644
+--- a/arch/x86/kernel/crash_dump_64.c
++++ b/arch/x86/kernel/crash_dump_64.c
+@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ return -ENOMEM;
+
+ if (userbuf) {
+- if (copy_to_user(buf, vaddr + offset, csize)) {
++ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
+ iounmap(vaddr);
+ return -EFAULT;
+ }
+diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
+index 37250fe..bf2ec74 100644
+--- a/arch/x86/kernel/doublefault_32.c
++++ b/arch/x86/kernel/doublefault_32.c
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 1aae78f..138ca1b 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -2,6 +2,9 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ {
+- struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ static inline void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ { }
+ #endif
+
+@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+-static inline int valid_stack_ptr(struct thread_info *tinfo,
+- void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+- void *t = tinfo;
+ if (end) {
+ if (p < end && p >= (end-THREAD_SIZE))
+ return 1;
+@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
+ }
+
+ unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+ {
+ struct stack_frame *frame = (struct stack_frame *)bp;
+
+- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+ unsigned long addr;
+
+ addr = *stack;
+@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
+ } else {
+ ops->address(data, addr, 0);
+ }
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+ stack++;
+ }
+@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
+ EXPORT_SYMBOL_GPL(print_context_stack);
+
+ unsigned long
+-print_context_stack_bp(struct thread_info *tinfo,
++print_context_stack_bp(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
+ struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long *ret_addr = &frame->return_address;
+
+- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
++ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
+ unsigned long addr = *ret_addr;
+
+ if (!__kernel_text_address(addr))
+@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+
+ return (unsigned long)frame;
+@@ -147,7 +147,7 @@ static int print_trace_stack(void *data, char *name)
+ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+ touch_nmi_watchdog();
+- printk(data);
++ printk("%s", (char *)data);
+ printk_address(addr, reliable);
+ }
+
+@@ -186,7 +186,7 @@ void dump_stack(void)
+
+ bp = stack_frame(current, NULL);
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
+ }
+ EXPORT_SYMBOL_GPL(oops_begin);
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+ if (regs && kexec_should_crash(current))
+@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++
++ gr_handle_kernel_exploit();
++
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+
+ show_registers(regs);
+ #ifdef CONFIG_X86_32
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ } else {
+@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index c99f9ed..025ebd3 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ bp = stack_frame(task, regs);
+
+ for (;;) {
+- struct thread_info *context;
++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+
+- context = (struct thread_info *)
+- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+
+- stack = (unsigned long *)context->previous_esp;
+- if (!stack)
++ if (stack_start == task_stack_page(task))
+ break;
++ stack = *(unsigned long **)stack_start;
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+ touch_nmi_watchdog();
+@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
+ printk(KERN_CONT " Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk(KERN_CONT "<%02x> ", c);
+ else
+ printk(KERN_CONT "%02x ", c);
+@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
+
+ return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_left;
++
++ /* all kernel stacks are of the same size */
++ stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 6d728d9..80f1867 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *irq_stack_end =
+ (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned used = 0;
+- struct thread_info *tinfo;
+ int graph = 0;
+ unsigned long dummy;
++ void *stack_start;
+
+ if (!task)
+ task = current;
+@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ * current stack address. If the stacks consist of nested
+ * exceptions
+ */
+- tinfo = task_thread_info(task);
+ for (;;) {
+ char *id;
+ unsigned long *estack_end;
++
+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
+ &used, &id);
+
+@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ if (ops->stack(data, id) < 0)
+ break;
+
+- bp = ops->walk_stack(tinfo, stack, bp, ops,
++ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
+ data, estack_end, &graph);
+ ops->stack(data, "<EOE>");
+ /*
+@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ * second-to-last pointer (index -2 to end) in the
+ * exception stack:
+ */
++ if ((u16)estack_end[-1] != __KERNEL_DS)
++ goto out;
+ stack = (unsigned long *) estack_end[-2];
+ continue;
+ }
+@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+- bp = ops->walk_stack(tinfo, stack, bp,
++ bp = ops->walk_stack(task, irq_stack, stack, bp,
+ ops, data, irq_stack_end, &graph);
+ /*
+ * We link to the next stack (which would be
+@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ /*
+ * This handles the process stack:
+ */
+- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
++out:
+ put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -249,7 +253,7 @@ void show_registers(struct pt_regs *regs)
+ {
+ int i;
+ unsigned long sp;
+- const int cpu = smp_processor_id();
++ const int cpu = raw_smp_processor_id();
+ struct task_struct *cur = current;
+
+ sp = regs->sp;
+@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
+
+ return ud2 == 0x0b0f;
+ }
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
++ unsigned cpu, used;
++ char *id;
++
++ /* check the process stack first */
++ stack_start = (unsigned long)task_stack_page(current);
++ stack_end = stack_start + THREAD_SIZE;
++ if (likely(stack_start <= sp && sp < stack_end)) {
++ unsigned long stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ cpu = get_cpu();
++
++ /* check the irq stacks */
++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
++ stack_start = stack_end - IRQ_STACK_SIZE;
++ if (stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ /* check the exception stacks */
++ used = 0;
++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
++ stack_start = stack_end - EXCEPTION_STKSZ;
++ if (stack_end && stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ put_cpu();
++
++ /* unknown stack */
++ BUG();
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 303a0e4..0aad351 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -829,8 +829,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
+
+ static void early_panic(char *msg)
+ {
+- early_printk(msg);
+- panic(msg);
++ early_printk("%s", msg);
++ panic("%s", msg);
+ }
+
+ static int userdef __initdata;
+diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
+index cd28a35..c72ed9a 100644
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index d2d488b8..a4f589f 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -180,13 +180,153 @@
+ /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $16, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $16, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %ecx
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $-0xBEEF, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %ecx
++ jb 3f
++ ud2
++3:
++
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %ecx
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl_cfi %fs
+@@ -209,7 +349,7 @@
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -217,6 +357,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+@@ -302,7 +451,7 @@ ENTRY(ret_from_fork)
+ popfl_cfi
+ jmp syscall_exit
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * Interrupt exit functions should be protected against kprobes
+@@ -336,7 +485,15 @@ resume_userspace_sig:
+ andl $SEGMENT_RPL_MASK, %eax
+ #endif
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ pax_exit_kernel
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -348,8 +505,8 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
+-END(ret_from_exception)
++ jmp restore_all_pax
++ENDPROC(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -364,7 +521,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+ CFI_ENDPROC
+ /*
+@@ -398,23 +555,34 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
++ pushl_cfi $0
+ CFI_REL_OFFSET eip, 0
+
+ pushl_cfi %eax
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp),%ebp
++ movl %ebp,PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -423,6 +591,10 @@ sysenter_past_esp:
+
+ GET_THREAD_INFO(%ebp)
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz sysenter_audit
+ sysenter_do_call:
+@@ -437,12 +609,24 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl_cfi %eax
++ movl %esp, %eax
++ call pax_randomize_kstack
++ popl_cfi %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -459,6 +643,9 @@ sysenter_audit:
+ movl %eax,%edx /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ pushl_cfi %ebx
+ movl PT_EAX(%esp),%eax /* reload syscall number */
+ jmp sysenter_do_call
+@@ -485,11 +672,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -504,6 +697,11 @@ ENTRY(system_call)
+ pushl_cfi %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ # system call tracing in operation / emulation
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz syscall_trace_entry
+@@ -522,6 +720,15 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ movl %esp, %eax
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -581,14 +788,34 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %esi
++ btr $16, %esi
++ mov %esi, %cr0
++#endif
++
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ bts $16, %esi
++ mov %esi, %cr0
++#endif
++
+ pushl_cfi $__ESPFIX_SS
+ pushl_cfi %eax /* new kernel esp */
+ /* Disable interrupts, but do not irqtrace this section: we
+@@ -617,34 +844,28 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl_cfi %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl_cfi %ecx
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp resume_userspace_sig
+-END(work_pending)
++ENDPROC(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -652,11 +873,14 @@ syscall_trace_entry:
+ movl $-ENOSYS,PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -669,20 +893,24 @@ syscall_exit_work:
+ movl %esp, %eax
+ call syscall_trace_leave
+ jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+ CFI_ENDPROC
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+ CFI_ENDPROC
+ /*
+ * End of kprobes section
+@@ -756,6 +984,36 @@ ptregs_clone:
+ CFI_ENDPROC
+ ENDPROC(ptregs_clone)
+
++ ALIGN;
++ENTRY(kernel_execve)
++ CFI_STARTPROC
++ pushl_cfi %ebp
++ sub $PT_OLDSS+4,%esp
++ pushl_cfi %edi
++ pushl_cfi %ecx
++ pushl_cfi %eax
++ lea 3*4(%esp),%edi
++ mov $PT_OLDSS/4+1,%ecx
++ xorl %eax,%eax
++ rep stosl
++ popl_cfi %eax
++ popl_cfi %ecx
++ popl_cfi %edi
++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
++ pushl_cfi %esp
++ call sys_execve
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ GET_THREAD_INFO(%ebp)
++ test %eax,%eax
++ jz syscall_exit
++ add $PT_OLDSS+4,%esp
++ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
++ popl_cfi %ebp
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_execve)
++
+ .macro FIXUP_ESPFIX_STACK
+ /*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+@@ -765,8 +1023,15 @@ ENDPROC(ptregs_clone)
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl_cfi $__KERNEL_DS
+@@ -819,7 +1084,7 @@ vector=vector+1
+ .endr
+ 2: jmp common_interrupt
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -867,7 +1132,7 @@ ENTRY(coprocessor_error)
+ pushl_cfi $do_coprocessor_error
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ RING0_INT_FRAME
+@@ -888,7 +1153,7 @@ ENTRY(simd_coprocessor_error)
+ #endif
+ jmp error_code
+ CFI_ENDPROC
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ RING0_INT_FRAME
+@@ -896,7 +1161,7 @@ ENTRY(device_not_available)
+ pushl_cfi $do_device_not_available
+ jmp error_code
+ CFI_ENDPROC
+-END(device_not_available)
++ENDPROC(device_not_available)
+
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+@@ -905,12 +1170,12 @@ ENTRY(native_iret)
+ .align 4
+ .long native_iret, iret_exc
+ .previous
+-END(native_iret)
++ENDPROC(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+
+ ENTRY(overflow)
+@@ -919,7 +1184,7 @@ ENTRY(overflow)
+ pushl_cfi $do_overflow
+ jmp error_code
+ CFI_ENDPROC
+-END(overflow)
++ENDPROC(overflow)
+
+ ENTRY(bounds)
+ RING0_INT_FRAME
+@@ -927,7 +1192,7 @@ ENTRY(bounds)
+ pushl_cfi $do_bounds
+ jmp error_code
+ CFI_ENDPROC
+-END(bounds)
++ENDPROC(bounds)
+
+ ENTRY(invalid_op)
+ RING0_INT_FRAME
+@@ -935,7 +1200,7 @@ ENTRY(invalid_op)
+ pushl_cfi $do_invalid_op
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_op)
++ENDPROC(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ RING0_INT_FRAME
+@@ -943,35 +1208,35 @@ ENTRY(coprocessor_segment_overrun)
+ pushl_cfi $do_coprocessor_segment_overrun
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ RING0_EC_FRAME
+ pushl_cfi $do_invalid_TSS
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ RING0_EC_FRAME
+ pushl_cfi $do_segment_not_present
+ jmp error_code
+ CFI_ENDPROC
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+
+ ENTRY(stack_segment)
+ RING0_EC_FRAME
+ pushl_cfi $do_stack_segment
+ jmp error_code
+ CFI_ENDPROC
+-END(stack_segment)
++ENDPROC(stack_segment)
+
+ ENTRY(alignment_check)
+ RING0_EC_FRAME
+ pushl_cfi $do_alignment_check
+ jmp error_code
+ CFI_ENDPROC
+-END(alignment_check)
++ENDPROC(alignment_check)
+
+ ENTRY(divide_error)
+ RING0_INT_FRAME
+@@ -979,7 +1244,7 @@ ENTRY(divide_error)
+ pushl_cfi $do_divide_error
+ jmp error_code
+ CFI_ENDPROC
+-END(divide_error)
++ENDPROC(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -988,7 +1253,7 @@ ENTRY(machine_check)
+ pushl_cfi machine_check_vector
+ jmp error_code
+ CFI_ENDPROC
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+
+ ENTRY(spurious_interrupt_bug)
+@@ -997,7 +1262,7 @@ ENTRY(spurious_interrupt_bug)
+ pushl_cfi $do_spurious_interrupt_bug
+ jmp error_code
+ CFI_ENDPROC
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+ /*
+ * End of kprobes section
+ */
+@@ -1113,7 +1378,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
+
+ ENTRY(mcount)
+ ret
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -1142,7 +1407,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+ ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+
+@@ -1178,7 +1443,7 @@ trace:
+ popl %ecx
+ popl %eax
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -1199,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
+ popl %ecx
+ popl %eax
+ ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ .globl return_to_handler
+ return_to_handler:
+@@ -1213,7 +1478,6 @@ return_to_handler:
+ jmp *%ecx
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1259,15 +1523,18 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp,%eax # pt_regs pointer
+ call *%edi
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(page_fault)
++ENDPROC(page_fault)
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1309,7 +1576,7 @@ debug_stack_correct:
+ call do_debug
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(debug)
++ENDPROC(debug)
+
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1346,6 +1613,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1382,12 +1652,15 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+ jmp irq_return
+ CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(int3)
+ RING0_INT_FRAME
+@@ -1399,14 +1672,14 @@ ENTRY(int3)
+ call do_int3
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(int3)
++ENDPROC(int3)
+
+ ENTRY(general_protection)
+ RING0_EC_FRAME
+ pushl_cfi $do_general_protection
+ jmp error_code
+ CFI_ENDPROC
+-END(general_protection)
++ENDPROC(general_protection)
+
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+@@ -1414,7 +1687,7 @@ ENTRY(async_page_fault)
+ pushl_cfi $do_async_page_fault
+ jmp error_code
+ CFI_ENDPROC
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+
+ /*
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 6274f5f..65df16d 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -55,6 +55,8 @@
+ #include <asm/paravirt.h>
+ #include <asm/ftrace.h>
+ #include <asm/percpu.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -68,8 +70,9 @@
+ #ifdef CONFIG_FUNCTION_TRACER
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ ENTRY(mcount)
++ pax_force_retaddr
+ retq
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+ ENTRY(mcount)
+@@ -112,6 +116,7 @@ ENTRY(mcount)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+
+ trace:
+@@ -121,12 +126,13 @@ trace:
+ movq 8(%rbp), %rsi
+ subq $MCOUNT_INSN_SIZE, %rdi
+
++ pax_force_fptr ftrace_trace_function
+ call *ftrace_trace_function
+
+ MCOUNT_RESTORE_FRAME
+
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
+
+ MCOUNT_RESTORE_FRAME
+
++ pax_force_retaddr
+ retq
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ GLOBAL(return_to_handler)
+ subq $24, %rsp
+@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+ addq $24, %rsp
++ pax_force_fptr %rdi
+ jmp *%rdi
+ #endif
+
+@@ -178,6 +186,285 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 3f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jnz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: ljmpq __KERNEL_CS,1b
++3: ljmpq __KERNEXEC_KERNEL_CS,4f
++4: SET_RDI_INTO_CR0
++ jmp 1b
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jz 2f
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 4f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: GET_CR0_INTO_RDI
++ btr $16,%rdi
++ jnc 4f
++ ljmpq __KERNEL_CS,3f
++3: SET_RDI_INTO_CR0
++ jmp 1b
++4: ud2
++ jmp 4b
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ pushq %rdi
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ popq %rdi
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2:
++#endif
++ SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ jnc 3f
++ SET_RDI_INTO_CR0
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++3: ud2
++ jmp 3b
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rcx
++ pushq %rax
++ pushq %r11
++
++ GET_THREAD_INFO(%r11)
++ mov TI_lowest_stack(%r11), %rdi
++ mov $-0xBEEF, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %rcx
++ jb 3f
++ ud2
++3:
++
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r11), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r11)
++
++ popq %r11
++ popq %rax
++ popq %rcx
++ popq %rdi
++ pax_force_retaddr
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
+
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -231,8 +518,8 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+- addq $8*6, %rsp
+- CFI_ADJUST_CFA_OFFSET -(6*8)
++ addq $8*6 + ARG_SKIP, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
+ .endm
+
+ /*
+@@ -301,25 +588,26 @@ ENDPROC(native_usergs_sysret64)
+ /* save partial stack frame */
+ .macro SAVE_ARGS_IRQ
+ cld
+- /* start from rbp in pt_regs and jump over */
+- movq_cfi rdi, RDI-RBP
+- movq_cfi rsi, RSI-RBP
+- movq_cfi rdx, RDX-RBP
+- movq_cfi rcx, RCX-RBP
+- movq_cfi rax, RAX-RBP
+- movq_cfi r8, R8-RBP
+- movq_cfi r9, R9-RBP
+- movq_cfi r10, R10-RBP
+- movq_cfi r11, R11-RBP
++ /* start from r15 in pt_regs and jump over */
++ movq_cfi rdi, RDI
++ movq_cfi rsi, RSI
++ movq_cfi rdx, RDX
++ movq_cfi rcx, RCX
++ movq_cfi rax, RAX
++ movq_cfi r8, R8
++ movq_cfi r9, R9
++ movq_cfi r10, R10
++ movq_cfi r11, R11
++ movq_cfi r12, R12
+
+ /* Save rbp so that we can unwind from get_irq_regs() */
+- movq_cfi rbp, 0
++ movq_cfi rbp, RBP
+
+ /* Save previous stack value */
+ movq %rsp, %rsi
+
+- leaq -RBP(%rsp),%rdi /* arg1 for handler */
+- testl $3, CS(%rdi)
++ movq %rsp,%rdi /* arg1 for handler */
++ testb $3, CS(%rsi)
+ je 1f
+ SWAPGS
+ /*
+@@ -345,19 +633,22 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ ENTRY(save_rest)
+- PARTIAL_FRAME 1 REST_SKIP+8
+- movq 5*8+16(%rsp), %r11 /* save return address */
++ PARTIAL_FRAME 1 8
+ movq_cfi rbx, RBX+16
+ movq_cfi rbp, RBP+16
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ movq_cfi r12, R12+16
++#endif
++
+ movq_cfi r13, R13+16
+ movq_cfi r14, R14+16
+ movq_cfi r15, R15+16
+- movq %r11, 8(%rsp) /* return address */
+ FIXUP_TOP_OF_STACK %r11, 16
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(save_rest)
++ENDPROC(save_rest)
+
+ /* save complete stack frame */
+ .pushsection .kprobes.text, "ax"
+@@ -386,9 +677,10 @@ ENTRY(save_paranoid)
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx,%ebx
+-1: ret
++1: pax_force_retaddr_bts
++ ret
+ CFI_ENDPROC
+-END(save_paranoid)
++ENDPROC(save_paranoid)
+ .popsection
+
+ /*
+@@ -410,7 +702,7 @@ ENTRY(ret_from_fork)
+
+ RESTORE_REST
+
+- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+
+ testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+@@ -420,7 +712,7 @@ ENTRY(ret_from_fork)
+ jmp ret_from_sys_call # go to the SYSRET fastpath
+
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * System call entry. Up to 6 arguments in registers are supported.
+@@ -456,7 +748,7 @@ END(ret_from_fork)
+ ENTRY(system_call)
+ CFI_STARTPROC simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+@@ -469,12 +761,18 @@ ENTRY(system_call_after_swapgs)
+
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,0
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs off/on section - it's straight
+ * and short:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,0
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+@@ -503,6 +801,8 @@ sysret_check:
+ andl %edi,%edx
+ jnz sysret_careful
+ CFI_REMEMBER_STATE
++ pax_exit_kernel_user
++ pax_erase_kstack
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -561,6 +861,9 @@ auditsys:
+ movq %rax,%rsi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ LOAD_ARGS 0 /* reload call-clobbered registers */
+ jmp system_call_fastpath
+
+@@ -591,12 +894,15 @@ tracesys:
+ FIXUP_TOP_OF_STACK %rdi
+ movq %rsp,%rdi
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /*
+ * Reload arg registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_enter() returned
+ * the value it wants us to use in the table lookup.
+ */
+- LOAD_ARGS ARGOFFSET, 1
++ LOAD_ARGS 1
+ RESTORE_REST
+ cmpq $__NR_syscall_max,%rax
+ ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
+@@ -612,7 +918,7 @@ tracesys:
+ GLOBAL(int_ret_from_sys_call)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_restore_args
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+@@ -623,7 +929,9 @@ GLOBAL(int_with_check)
+ andl %edi,%edx
+ jnz int_careful
+ andl $~TS_COMPAT,TI_status(%rcx)
+- jmp retint_swapgs
++ pax_exit_kernel_user
++ pax_erase_kstack
++ jmp retint_swapgs_pax
+
+ /* Either reschedule or signal or syscall exit tracking needed. */
+ /* First do a reschedule test. */
+@@ -669,7 +977,7 @@ int_restore_rest:
+ TRACE_IRQS_OFF
+ jmp int_with_check
+ CFI_ENDPROC
+-END(system_call)
++ENDPROC(system_call)
+
+ /*
+ * Certain special system calls that need to save a complete full stack frame.
+@@ -677,15 +985,13 @@ END(system_call)
+ .macro PTREGSCALL label,func,arg
+ ENTRY(\label)
+ PARTIAL_FRAME 1 8 /* offset 8: return address */
+- subq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET REST_SKIP
+ call save_rest
+ DEFAULT_FRAME 0 8 /* offset 8: return address */
+ leaq 8(%rsp), \arg /* pt_regs pointer */
+ call \func
+ jmp ptregscall_common
+ CFI_ENDPROC
+-END(\label)
++ENDPROC(\label)
+ .endm
+
+ PTREGSCALL stub_clone, sys_clone, %r8
+@@ -700,12 +1006,17 @@ ENTRY(ptregscall_common)
+ movq_cfi_restore R15+8, r15
+ movq_cfi_restore R14+8, r14
+ movq_cfi_restore R13+8, r13
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ movq_cfi_restore R12+8, r12
++#endif
++
+ movq_cfi_restore RBP+8, rbp
+ movq_cfi_restore RBX+8, rbx
+- ret $REST_SKIP /* pop extended registers */
++ pax_force_retaddr
++ ret
+ CFI_ENDPROC
+-END(ptregscall_common)
++ENDPROC(ptregscall_common)
+
+ ENTRY(stub_execve)
+ CFI_STARTPROC
+@@ -720,7 +1031,7 @@ ENTRY(stub_execve)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_execve)
++ENDPROC(stub_execve)
+
+ /*
+ * sigreturn is special because it needs to restore all registers on return.
+@@ -738,7 +1049,7 @@ ENTRY(stub_rt_sigreturn)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_rt_sigreturn)
++ENDPROC(stub_rt_sigreturn)
+
+ /*
+ * Build the entry stubs and pointer table with some assembler magic.
+@@ -773,7 +1084,7 @@ vector=vector+1
+ 2: jmp common_interrupt
+ .endr
+ CFI_ENDPROC
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -790,9 +1101,19 @@ END(interrupt)
+ /* 0(%rsp): ~(interrupt number) */
+ .macro interrupt func
+ /* reserve pt_regs for scratch regs and rbp */
+- subq $ORIG_RAX-RBP, %rsp
+- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
++ subq $ORIG_RAX, %rsp
++ CFI_ADJUST_CFA_OFFSET ORIG_RAX
+ SAVE_ARGS_IRQ
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ call \func
+ .endm
+
+@@ -818,13 +1139,13 @@ ret_from_intr:
+ /* Restore saved previous stack */
+ popq %rsi
+ CFI_DEF_CFA_REGISTER rsi
+- leaq ARGOFFSET-RBP(%rsi), %rsp
++ movq %rsi, %rsp
+ CFI_DEF_CFA_REGISTER rsp
+- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
++ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
+
+ exit_intr:
+ GET_THREAD_INFO(%rcx)
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_kernel
+
+ /* Interrupt came from user space */
+@@ -846,12 +1167,16 @@ retint_swapgs: /* return to user-space */
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
++retint_swapgs_pax:
+ TRACE_IRQS_IRETQ
+ SWAPGS
+ jmp restore_args
+
+ retint_restore_args: /* return to kernel space */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel
++ pax_force_retaddr (RIP-ARGOFFSET)
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -940,7 +1265,7 @@ ENTRY(retint_kernel)
+ #endif
+
+ CFI_ENDPROC
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+ /*
+ * End of kprobes section
+ */
+@@ -956,7 +1281,7 @@ ENTRY(\sym)
+ interrupt \do_sym
+ jmp ret_from_intr
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_SMP
+@@ -1021,12 +1346,22 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro paranoidzeroentry sym do_sym
+@@ -1038,15 +1373,25 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
+ .macro paranoidzeroentry_ist sym do_sym ist
+ ENTRY(\sym)
+ INTR_FRAME
+@@ -1056,14 +1401,30 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
++ lea init_tss(%r13), %r13
++#else
++ lea init_tss(%rip), %r13
++#endif
+ subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro errorentry sym do_sym
+@@ -1074,13 +1435,23 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ /* error code is on the stack already */
+@@ -1093,13 +1464,23 @@ ENTRY(\sym)
+ call save_paranoid
+ DEFAULT_FRAME 0
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ zeroentry divide_error do_divide_error
+@@ -1129,9 +1510,10 @@ gs_change:
+ 2: mfence /* workaround */
+ SWAPGS
+ popfq_cfi
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+
+ .section __ex_table,"a"
+ .align 8
+@@ -1153,13 +1535,14 @@ ENTRY(kernel_thread_helper)
+ * Here we are in the child and the registers are set as they were
+ * at kernel_thread() invocation in the parent.
+ */
++ pax_force_fptr %rsi
+ call *%rsi
+ # exit
+ mov %eax, %edi
+ call do_exit
+ ud2 # padding for call trace
+ CFI_ENDPROC
+-END(kernel_thread_helper)
++ENDPROC(kernel_thread_helper)
+
+ /*
+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
+@@ -1186,11 +1569,11 @@ ENTRY(kernel_execve)
+ RESTORE_REST
+ testq %rax,%rax
+ je int_ret_from_sys_call
+- RESTORE_ARGS
+ UNFAKE_STACK_FRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(kernel_execve)
++ENDPROC(kernel_execve)
+
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+@@ -1208,9 +1591,10 @@ ENTRY(call_softirq)
+ CFI_DEF_CFA_REGISTER rsp
+ CFI_ADJUST_CFA_OFFSET -8
+ decl PER_CPU_VAR(irq_count)
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(call_softirq)
++ENDPROC(call_softirq)
+
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+@@ -1248,7 +1632,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+ decl PER_CPU_VAR(irq_count)
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+
+ /*
+ * Hypervisor uses this for application faults while it executes.
+@@ -1307,7 +1691,7 @@ ENTRY(xen_failsafe_callback)
+ SAVE_ALL
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+
+ apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
+ xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -1356,16 +1740,31 @@ ENTRY(paranoid_exit)
+ TRACE_IRQS_OFF
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz paranoid_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ TRACE_IRQS_IRETQ 0
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ pax_force_retaddr_bts
++ jmp irq_return
++#endif
+ paranoid_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ 0
+ SWAPGS_UNSAFE_STACK
+ RESTORE_ALL 8
+ jmp irq_return
+ paranoid_restore:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ 0
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ jmp irq_return
+ paranoid_userspace:
+ GET_THREAD_INFO(%rcx)
+@@ -1394,7 +1793,7 @@ paranoid_schedule:
+ TRACE_IRQS_OFF
+ jmp paranoid_userspace
+ CFI_ENDPROC
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+
+ /*
+ * Exception entry point. This expects an error code/orig_rax on the stack.
+@@ -1421,12 +1820,13 @@ ENTRY(error_entry)
+ movq_cfi r14, R14+8
+ movq_cfi r15, R15+8
+ xorl %ebx,%ebx
+- testl $3,CS+8(%rsp)
++ testb $3,CS+8(%rsp)
+ je error_kernelspace
+ error_swapgs:
+ SWAPGS
+ error_sti:
+ TRACE_IRQS_OFF
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -1453,7 +1853,7 @@ bstep_iret:
+ movq %rcx,RIP+8(%rsp)
+ jmp error_swapgs
+ CFI_ENDPROC
+-END(error_entry)
++ENDPROC(error_entry)
+
+
+ /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
+@@ -1473,7 +1873,7 @@ ENTRY(error_exit)
+ jnz retint_careful
+ jmp retint_swapgs
+ CFI_ENDPROC
+-END(error_exit)
++ENDPROC(error_exit)
+
+
+ /* runs on exception stack */
+@@ -1485,6 +1885,17 @@ ENTRY(nmi)
+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+ call save_paranoid
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
++
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ movq %rsp,%rdi
+ movq $-1,%rsi
+@@ -1495,12 +1906,28 @@ ENTRY(nmi)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz nmi_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz nmi_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ pax_force_retaddr_bts
++ jmp irq_return
++#endif
+ nmi_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ jmp irq_return
+ nmi_restore:
++ pax_exit_kernel
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ jmp irq_return
+ nmi_userspace:
+ GET_THREAD_INFO(%rcx)
+@@ -1529,14 +1956,14 @@ nmi_schedule:
+ jmp paranoid_exit
+ CFI_ENDPROC
+ #endif
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(ignore_sysret)
+ CFI_STARTPROC
+ mov $-ENOSYS,%eax
+ sysret
+ CFI_ENDPROC
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+
+ /*
+ * End of kprobes section
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index c9a281f..3645760 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
+ static const void *mod_code_newcode; /* holds the text to write to the IP */
+
+ static unsigned nmi_wait_count;
+-static atomic_t nmi_update_count = ATOMIC_INIT(0);
++static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
+
+ int ftrace_arch_read_dyn_info(char *buf, int size)
+ {
+@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+- atomic_read(&nmi_update_count));
++ atomic_read_unchecked(&nmi_update_count));
+ return r;
+ }
+
+@@ -178,7 +178,7 @@ void ftrace_nmi_enter(void)
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
+ ftrace_mod_code();
+- atomic_inc(&nmi_update_count);
++ atomic_inc_unchecked(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+@@ -271,6 +271,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
+ {
+ unsigned char replaced[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+@@ -327,7 +329,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
++ memcpy(old, ktla_ktva((void *)ftrace_call), MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = ftrace_modify_code(ip, old, new);
+
+@@ -353,6 +355,8 @@ static int ftrace_mod_jmp(unsigned long ip,
+ {
+ unsigned char code[MCOUNT_INSN_SIZE];
+
++ ip = ktla_ktva(ip);
++
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
+index 3bb0850..55a56f4 100644
+--- a/arch/x86/kernel/head32.c
++++ b/arch/x86/kernel/head32.c
+@@ -19,6 +19,7 @@
+ #include <asm/io_apic.h>
+ #include <asm/bios_ebda.h>
+ #include <asm/tlbflush.h>
++#include <asm/boot.h>
+
+ static void __init i386_default_early_setup(void)
+ {
+@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
+ {
+ memblock_init();
+
+- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
++ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ /* Reserve INITRD */
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index ce0be7c..1252d68 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -25,6 +25,12 @@
+ /* Physical address */
+ #define pa(X) ((X) - __PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ /*
+ * References to members of the new_cpu_data structure.
+ */
+@@ -54,11 +60,7 @@
+ * and small than max_low_pfn, otherwise will waste some page table entries
+ */
+
+-#if PTRS_PER_PMD > 1
+-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
+-#else
+-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+-#endif
++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
+
+ /* Number of possible pages in the lowmem region */
+ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
+ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+ * can.
+ */
+ __HEAD
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jmp startup_32
++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
++.fill PAGE_SIZE-5,1,0xcc
++#endif
++
+ ENTRY(startup_32)
+ movl pa(stack_start),%ecx
+
+@@ -105,6 +120,59 @@ ENTRY(startup_32)
+ 2:
+ leal -__PAGE_OFFSET(%ecx),%esp
+
++#ifdef CONFIG_SMP
++ movl $pa(cpu_gdt_table),%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
++ rorl $16,%eax
++ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
++ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++1:
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $pa(boot_gdt),%edi
++ movl $__LOAD_PHYSICAL_ADDR,%eax
++ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
++ rorl $16,%eax
++ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
++ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
++ rorl $16,%eax
++
++ ljmp $(__BOOT_CS),$1f
++1:
++
++ movl $NR_CPUS,%ecx
++ movl $pa(cpu_gdt_table),%edi
++ addl $__PAGE_OFFSET,%eax
++1:
++ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
++ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
++ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
++ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
++ rorl $16,%eax
++ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
++ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
++ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
++ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
++ rorl $16,%eax
++ addl $PAGE_SIZE_asm,%edi
++ loop 1b
++#endif
++
+ /*
+ * Clear BSS first so that there are no surprises...
+ */
+@@ -195,8 +263,11 @@ ENTRY(startup_32)
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
++#endif
+ #else /* Not PAE */
+
+ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -226,8 +297,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ movl %eax, pa(max_pfn_mapped)
+
+ /* Do early initialization of the fixmap area */
+- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
+- movl %eax,pa(initial_page_table+0xffc)
++#ifdef CONFIG_COMPAT_VDSO
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
++#else
++ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
++#endif
+ #endif
+
+ #ifdef CONFIG_PARAVIRT
+@@ -241,9 +315,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+- movl pa(subarch_entries)(,%eax,4), %eax
+- subl $__PAGE_OFFSET, %eax
+- jmp *%eax
++ jmp *pa(subarch_entries)(,%eax,4)
+
+ bad_subarch:
+ WEAK(lguest_entry)
+@@ -255,10 +327,10 @@ WEAK(xen_entry)
+ __INITDATA
+
+ subarch_entries:
+- .long default_entry /* normal x86/PC */
+- .long lguest_entry /* lguest hypervisor */
+- .long xen_entry /* Xen hypervisor */
+- .long default_entry /* Moorestown MID */
++ .long ta(default_entry) /* normal x86/PC */
++ .long ta(lguest_entry) /* lguest hypervisor */
++ .long ta(xen_entry) /* Xen hypervisor */
++ .long ta(default_entry) /* Moorestown MID */
+ num_subarch_entries = (. - subarch_entries) / 4
+ .previous
+ #else
+@@ -312,6 +384,7 @@ default_entry:
+ orl %edx,%eax
+ movl %eax,%cr4
+
++#ifdef CONFIG_X86_PAE
+ testb $X86_CR4_PAE, %al # check if PAE is enabled
+ jz 6f
+
+@@ -340,6 +413,9 @@ default_entry:
+ /* Make changes effective */
+ wrmsr
+
++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
++#endif
++
+ 6:
+
+ /*
+@@ -443,7 +519,7 @@ is386: movl $2,%ecx # set MP
+ 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movl %eax,%ss # after changing gdt.
+
+- movl $(__USER_DS),%eax # DS/ES contains default USER segment
++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
+ movl %eax,%ds
+ movl %eax,%es
+
+@@ -457,15 +533,22 @@ is386: movl $2,%ecx # set MP
+ */
+ cmpb $0,ready
+ jne 1f
+- movl $gdt_page,%eax
++ movl $cpu_gdt_table,%eax
+ movl $stack_canary,%ecx
++#ifdef CONFIG_SMP
++ addl $__per_cpu_load,%ecx
++#endif
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+ movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+ 1:
+-#endif
+ movl $(__KERNEL_STACK_CANARY),%eax
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS),%eax
++#else
++ xorl %eax,%eax
++#endif
+ movl %eax,%gs
+
+ xorl %eax,%eax # Clear LDT
+@@ -558,22 +641,22 @@ early_page_fault:
+ jmp early_fault
+
+ early_fault:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $1,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pusha
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ movl %cr2,%eax
+ pushl %eax
+ pushl %edx /* trapno */
+ pushl $fault_msg
+ call printk
++; call dump_stack
+ #endif
+- call dump_stack
+ hlt_loop:
+ hlt
+ jmp hlt_loop
+@@ -581,8 +664,11 @@ hlt_loop:
+ /* This is the default interrupt "handler" :-) */
+ ALIGN
+ ignore_int:
+- cld
+ #ifdef CONFIG_PRINTK
++ cmpl $2,%ss:early_recursion_flag
++ je hlt_loop
++ incl %ss:early_recursion_flag
++ cld
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+@@ -591,9 +677,6 @@ ignore_int:
+ movl $(__KERNEL_DS),%eax
+ movl %eax,%ds
+ movl %eax,%es
+- cmpl $2,early_recursion_flag
+- je hlt_loop
+- incl early_recursion_flag
+ pushl 16(%esp)
+ pushl 24(%esp)
+ pushl 32(%esp)
+@@ -622,29 +705,43 @@ ENTRY(initial_code)
+ /*
+ * BSS section
+ */
+-__PAGE_ALIGNED_BSS
+- .align PAGE_SIZE
+ #ifdef CONFIG_X86_PAE
++.section .initial_pg_pmd,"a",@progbits
+ initial_pg_pmd:
+ .fill 1024*KPMDS,4,0
+ #else
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .fill 1024,4,0
+ #endif
++.section .initial_pg_fixmap,"a",@progbits
+ initial_pg_fixmap:
+ .fill 1024,4,0
++.section .empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
++.section .swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
++#ifdef CONFIG_X86_PAE
++ .fill 4,8,0
++#else
+ .fill 1024,4,0
++#endif
++
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
+
+ /*
+ * This starts the data section.
+ */
+ #ifdef CONFIG_X86_PAE
+-__PAGE_ALIGNED_DATA
+- /* Page-aligned for the benefit of paravirt? */
+- .align PAGE_SIZE
++.section .initial_page_table,"a",@progbits
+ ENTRY(initial_page_table)
+ .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
+ # if KPMDS == 3
+@@ -663,18 +760,27 @@ ENTRY(initial_page_table)
+ # error "Kernel PMDs should be 1, 2 or 3"
+ # endif
+ .align PAGE_SIZE /* needs to be page-sized too */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ENTRY(cpu_pgd)
++ .rept NR_CPUS
++ .fill 4,8,0
++ .endr
++#endif
++
+ #endif
+
+ .data
+ .balign 4
+ ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
++ .long init_thread_union+THREAD_SIZE-8
+
++ready: .byte 0
++
++.section .rodata,"a",@progbits
+ early_recursion_flag:
+ .long 0
+
+-ready: .byte 0
+-
+ int_msg:
+ .asciz "Unknown interrupt or fault at: %p %p %p\n"
+
+@@ -707,7 +813,7 @@ fault_msg:
+ .word 0 # 32 bit align gdt_desc.address
+ boot_gdt_descr:
+ .word __BOOT_DS+7
+- .long boot_gdt - __PAGE_OFFSET
++ .long pa(boot_gdt)
+
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+@@ -718,7 +824,7 @@ idt_descr:
+ .word 0 # 32 bit align gdt_desc.address
+ ENTRY(early_gdt_descr)
+ .word GDT_ENTRIES*8-1
+- .long gdt_page /* Overwritten for secondary CPUs */
++ .long cpu_gdt_table /* Overwritten for secondary CPUs */
+
+ /*
+ * The boot_gdt must mirror the equivalent in setup.S and is
+@@ -727,5 +833,65 @@ ENTRY(early_gdt_descr)
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
++
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
++#else
++ .quad 0x0000000000000000 /* 0x20 unused */
++#endif
++
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * The code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x00409b000000ffff /* 0x90 32-bit code */
++ .quad 0x00009b000000ffff /* 0x98 16-bit code */
++ .quad 0x000093000000ffff /* 0xa0 16-bit data */
++ .quad 0x0000930000000000 /* 0xa8 16-bit data */
++ .quad 0x0000930000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x004093000000ffff /* 0xc8 APM DS data */
++
++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
++ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++ /* Be sure this is zeroed to avoid false validations in Xen */
++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
++ .endr
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index e11e394..0a8c254 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -19,6 +19,8 @@
+ #include <asm/cache.h>
+ #include <asm/processor-flags.h>
+ #include <asm/percpu.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+ L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
+ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+ L3_START_KERNEL = pud_index(__START_KERNEL_map)
++L4_VMALLOC_START = pgd_index(VMALLOC_START)
++L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMALLOC_END = pgd_index(VMALLOC_END)
++L3_VMALLOC_END = pud_index(VMALLOC_END)
++L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
++L3_VMEMMAP_START = pud_index(VMEMMAP_START)
+
+ .text
+ __HEAD
+@@ -85,35 +93,23 @@ startup_64:
+ */
+ addq %rbp, init_level4_pgt + 0(%rip)
+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
+
+ addq %rbp, level3_ident_pgt + 0(%rip)
++#ifndef CONFIG_XEN
++ addq %rbp, level3_ident_pgt + 8(%rip)
++#endif
+
+- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
+- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
++
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+
+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
+-
+- /* Add an Identity mapping if I am above 1G */
+- leaq _text(%rip), %rdi
+- andq $PMD_PAGE_MASK, %rdi
+-
+- movq %rdi, %rax
+- shrq $PUD_SHIFT, %rax
+- andq $(PTRS_PER_PUD - 1), %rax
+- jz ident_complete
+-
+- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
+- leaq level3_ident_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-
+- movq %rdi, %rax
+- shrq $PMD_SHIFT, %rax
+- andq $(PTRS_PER_PMD - 1), %rax
+- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
+- leaq level2_spare_pgt(%rip), %rbx
+- movq %rdx, 0(%rbx, %rax, 8)
+-ident_complete:
++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
+ * after the boot processor executes this code.
+ */
+
+- /* Enable PAE mode and PGE */
+- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
++ /* Enable PAE mode and PSE/PGE */
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movq %rax, %cr4
+
+ /* Setup early boot stage 4 level pagetables. */
+@@ -183,9 +179,18 @@ ENTRY(secondary_startup_64)
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_SCE, %eax /* Enable System Call */
+- btl $20,%edi /* No Execute supported? */
++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
+ jnc 1f
+ btsl $_EFER_NX, %eax
++#ifndef CONFIG_EFI
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
++#endif
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
+ 1: wrmsr /* Make changes effective */
+
+ /* Setup cr0 */
+@@ -247,6 +252,7 @@ ENTRY(secondary_startup_64)
+ * jump. In addition we need to ensure %cs is set so we make this
+ * a far return.
+ */
++ pax_set_fptr_mask
+ movq initial_code(%rip),%rax
+ pushq $0 # fake return address to stop unwinder
+ pushq $__KERNEL_CS # set correct cs
+@@ -269,7 +275,7 @@ ENTRY(secondary_startup_64)
+ bad_address:
+ jmp bad_address
+
+- .section ".init.text","ax"
++ __INIT
+ #ifdef CONFIG_EARLY_PRINTK
+ .globl early_idt_handlers
+ early_idt_handlers:
+@@ -314,18 +320,23 @@ ENTRY(early_idt_handler)
+ #endif /* EARLY_PRINTK */
+ 1: hlt
+ jmp 1b
++ .previous
+
+ #ifdef CONFIG_EARLY_PRINTK
++ __INITDATA
+ early_recursion_flag:
+ .long 0
++ .previous
+
++ .section .rodata,"a",@progbits
+ early_idt_msg:
+ .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
+ early_idt_ripmsg:
+ .asciz "RIP %s\n"
++ .previous
+ #endif /* CONFIG_EARLY_PRINTK */
+- .previous
+
++ .section .rodata,"a",@progbits
+ #define NEXT_PAGE(name) \
+ .balign PAGE_SIZE; \
+ ENTRY(name)
+@@ -338,7 +349,6 @@ ENTRY(name)
+ i = i + 1 ; \
+ .endr
+
+- .data
+ /*
+ * This default setting generates an ident mapping at address 0x100000
+ * and a mapping for the kernel that precisely maps virtual address
+@@ -349,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_START*8, 0
++ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_END*8, 0
++ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_START_KERNEL*8, 0
+ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++NEXT_PAGE(cpu_pgd)
++ .rept NR_CPUS
++ .fill 512,8,0
++ .endr
++#endif
++
+ NEXT_PAGE(level3_ident_pgt)
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
++#ifdef CONFIG_XEN
+ .fill 511,8,0
++#else
++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
++ .fill 510,8,0
++#endif
++
++NEXT_PAGE(level3_vmalloc_start_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmalloc_end_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmemmap_pgt)
++ .fill L3_VMEMMAP_START,8,0
++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
+ NEXT_PAGE(level3_kernel_pgt)
+ .fill L3_START_KERNEL,8,0
+@@ -363,20 +401,27 @@ NEXT_PAGE(level3_kernel_pgt)
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+
++NEXT_PAGE(level2_vmemmap_pgt)
++ .fill 512,8,0
++
+ NEXT_PAGE(level2_fixmap_pgt)
+ .fill 506,8,0
+ .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
+- .fill 5,8,0
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
++ .fill 4,8,0
+
+ NEXT_PAGE(level1_fixmap_pgt)
+ .fill 512,8,0
+
+-NEXT_PAGE(level2_ident_pgt)
+- /* Since I easily can, map the first 1G.
++NEXT_PAGE(level1_vsyscall_pgt)
++ .fill 512,8,0
++
++ /* Since I easily can, map the first 2G.
+ * Don't set NX because code runs from these pages.
+ */
+- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
++NEXT_PAGE(level2_ident_pgt)
++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
+
+ NEXT_PAGE(level2_kernel_pgt)
+ /*
+@@ -389,35 +434,56 @@ NEXT_PAGE(level2_kernel_pgt)
+ * If you want to increase this then increase MODULES_VADDR
+ * too.)
+ */
+- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
+- KERNEL_IMAGE_SIZE/PMD_SIZE)
+-
+-NEXT_PAGE(level2_spare_pgt)
+- .fill 512, 8, 0
++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ #undef PMDS
+ #undef NEXT_PAGE
+
+- .data
++ .align PAGE_SIZE
++ENTRY(cpu_gdt_table)
++ .rept NR_CPUS
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
++ .quad 0x00cffb000000ffff /* __USER32_CS */
++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affb000000ffff /* __USER_CS */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
++#else
++ .quad 0x0 /* unused */
++#endif
++
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0x0000f40000000000 /* node/CPU stored in limit */
++ /* asm/segment.h:GDT_ENTRIES must match this */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++ .endr
++
+ .align 16
+ .globl early_gdt_descr
+ early_gdt_descr:
+ .word GDT_ENTRIES*8-1
+ early_gdt_descr_base:
+- .quad INIT_PER_CPU_VAR(gdt_page)
++ .quad cpu_gdt_table
+
+ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+ #include "../../x86/xen/xen-head.S"
+-
+- .section .bss, "aw", @nobits
++
++ .section .rodata,"a",@progbits
+ .align L1_CACHE_BYTES
+ ENTRY(idt_table)
+- .skip IDT_ENTRIES * 16
++ .fill 512,8,0
+
+- __PAGE_ALIGNED_BSS
+ .align PAGE_SIZE
+ ENTRY(empty_zero_page)
+ .skip PAGE_SIZE
+diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
+index 9c3bd4a..e1d9b35 100644
+--- a/arch/x86/kernel/i386_ksyms_32.c
++++ b/arch/x86/kernel/i386_ksyms_32.c
+@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
+ EXPORT_SYMBOL(cmpxchg8b_emu);
+ #endif
+
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
+ /* Networking helper routines. */
+ EXPORT_SYMBOL(csum_partial_copy_generic);
++EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
++EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
+
+ EXPORT_SYMBOL(csum_partial);
+ EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_PAX_KERNEXEC
++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
++#endif
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 6104852..47826ae 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -111,7 +111,7 @@ static int i8259A_irq_pending(unsigned int irq)
+ static void make_8259A_irq(unsigned int irq)
+ {
+ disable_irq_nosync(irq);
+- io_apic_irqs &= ~(1<<irq);
++ io_apic_irqs &= ~(1UL<<irq);
+ irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+ i8259A_chip.name);
+ enable_irq(irq);
+@@ -210,7 +210,7 @@ spurious_8259A_irq:
+ "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+@@ -334,14 +334,16 @@ static void init_8259A(int auto_eoi)
+ /* (slave's support for AEOI in flat mode is to be investigated) */
+ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
+
++ pax_open_kernel();
+ if (auto_eoi)
+ /*
+ * In AEOI mode we just have to mask the interrupt
+ * when acking.
+ */
+- i8259A_chip.irq_mask_ack = disable_8259A_irq;
++ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
+ else
+- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
++ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
++ pax_close_kernel();
+
+ udelay(100); /* wait for 8259A to initialize */
+
+diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
+index 43e9ccf..44ccf6f 100644
+--- a/arch/x86/kernel/init_task.c
++++ b/arch/x86/kernel/init_task.c
+@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+-union thread_union init_thread_union __init_task_data =
+- { INIT_THREAD_INFO(init_task) };
++union thread_union init_thread_union __init_task_data;
+
+ /*
+ * Initial task structure.
+@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */
+-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+-
++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
++EXPORT_SYMBOL(init_tss);
+diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
+index a979b5b..1d6db75 100644
+--- a/arch/x86/kernel/io_delay.c
++++ b/arch/x86/kernel/io_delay.c
+@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
+ * Quirk table for systems that misbehave (lock up, etc.) if port
+ * 0x80 is used:
+ */
+-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
++static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
+ {
+ .callback = dmi_io_delay_0xed_port,
+ .ident = "Compaq Presario V6000",
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 8c96897..be66bfa 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -6,6 +6,7 @@
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/ioport.h>
+@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on && grsec_disable_privio) {
++ gr_handle_ioperm();
++ return -EPERM;
++ }
++#endif
+ if (turn_on && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ * because the ->io_bitmap_max value must match the bitmap
+ * contents:
+ */
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+
+ if (turn_on)
+ bitmap_clear(t->io_bitmap_ptr, from, num);
+@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ if (grsec_disable_privio) {
++ gr_handle_iopl();
++ return -EPERM;
++ }
++#endif
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ }
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index fb2eb32..62793bd 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -18,7 +18,7 @@
+ #include <asm/mce.h>
+ #include <asm/hw_irq.h>
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ /* Function pointer for generic interrupt vector handling */
+ void (*x86_platform_ipi_callback)(void) = NULL;
+@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+ seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
+ seq_printf(p, " Machine check polls\n");
+ #endif
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ #if defined(CONFIG_X86_IO_APIC)
+- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
++ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
+ #endif
+ return 0;
+ }
+@@ -159,7 +159,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+
+ u64 arch_irq_stat(void)
+ {
+- u64 sum = atomic_read(&irq_err_count);
++ u64 sum = atomic_read_unchecked(&irq_err_count);
+ return sum;
+ }
+
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 7209070..cbcd71a 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
+ __asm__ __volatile__("andl %%esp,%0" :
+ "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+- return sp < (sizeof(struct thread_info) + STACK_WARN);
++ return sp < STACK_WARN;
+ }
+
+ static void print_stack_overflow(void)
+@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+ union irq_ctx {
+- struct thread_info tinfo;
+- u32 stack[THREAD_SIZE/sizeof(u32)];
++ unsigned long previous_esp;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
+ } __attribute__((aligned(THREAD_SIZE)));
+
+ static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
+@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ {
+- union irq_ctx *curctx, *irqctx;
++ union irq_ctx *irqctx;
+ u32 *isp, arg1, arg2;
+
+- curctx = (union irq_ctx *) current_thread_info();
+ irqctx = __this_cpu_read(hardirq_ctx);
+
+ /*
+@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+- if (unlikely(curctx == irqctx))
++ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
+ return 0;
+
+ /* build the stack frame on the IRQ stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+- irqctx->tinfo.task = curctx->tinfo.task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++ irqctx->previous_esp = current_stack_pointer;
+
+- /*
+- * Copy the softirq bits in preempt_count so that the
+- * softirq checks work in the hardirq context.
+- */
+- irqctx->tinfo.preempt_count =
+- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ if (unlikely(overflow))
+ call_on_stack(print_stack_overflow, isp);
+@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ : "0" (irq), "1" (desc), "2" (isp),
+ "D" (desc->handle_irq)
+ : "memory", "cc", "ecx");
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ return 1;
+ }
+
+@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ */
+ void __cpuinit irq_ctx_init(int cpu)
+ {
+- union irq_ctx *irqctx;
+-
+ if (per_cpu(hardirq_ctx, cpu))
+ return;
+
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(hardirq_ctx, cpu) = irqctx;
+-
+- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+- THREAD_FLAGS,
+- THREAD_ORDER));
+- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
+- irqctx->tinfo.cpu = cpu;
+- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+-
+- per_cpu(softirq_ctx, cpu) = irqctx;
++ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
++ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
+
+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+- struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+- curctx = current_thread_info();
+ irqctx = __this_cpu_read(softirq_ctx);
+- irqctx->tinfo.task = curctx->task;
+- irqctx->tinfo.previous_esp = current_stack_pointer;
++ irqctx->previous_esp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(MAKE_MM_SEG(0));
++#endif
+
+ call_on_stack(__do_softirq, isp);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ /*
+ * Shouldn't happen, we returned above if in_interrupt():
+ */
+diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
+index 69bca46..0bac999 100644
+--- a/arch/x86/kernel/irq_64.c
++++ b/arch/x86/kernel/irq_64.c
+@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ u64 curbase = (u64)task_stack_page(current);
+
+- if (user_mode_vm(regs))
++ if (user_mode(regs))
+ return;
+
+ WARN_ONCE(regs->sp >= curbase &&
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index 2f45c4c..3f51a0c 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+ #ifdef CONFIG_X86_32
+ switch (regno) {
+ case GDB_SS:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = __KERNEL_DS;
+ break;
+ case GDB_SP:
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ *(unsigned long *)mem = kernel_stack_pointer(regs);
+ break;
+ case GDB_GS:
+@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
+ bp->attr.bp_addr = breakinfo[breakno].addr;
+ bp->attr.bp_len = breakinfo[breakno].len;
+ bp->attr.bp_type = breakinfo[breakno].type;
+- info->address = breakinfo[breakno].addr;
++ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
++ info->address = ktla_ktva(breakinfo[breakno].addr);
++ else
++ info->address = breakinfo[breakno].addr;
+ info->len = breakinfo[breakno].len;
+ info->type = breakinfo[breakno].type;
+ val = arch_install_hw_breakpoint(bp);
+@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ case 'k':
+ /* clear the trace bit */
+ linux_regs->flags &= ~X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step, -1);
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
+
+ /* set the trace bit if we're stepping */
+ if (remcomInBuffer[0] == 's') {
+ linux_regs->flags |= X86_EFLAGS_TF;
+- atomic_set(&kgdb_cpu_doing_single_step,
++ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+
+@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+
+ switch (cmd) {
+ case DIE_DEBUG:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+ if (user_mode(regs))
+ return single_step_cont(regs, args);
+ break;
+@@ -748,11 +751,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+ char opc[BREAK_INSTR_SIZE];
+
+ bpt->type = BP_BREAKPOINT;
+- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
++ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+- err = probe_kernel_write((char *)bpt->bpt_addr,
++ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
+ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ #ifdef CONFIG_DEBUG_RODATA
+ if (!err)
+@@ -765,7 +768,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+ return -EBUSY;
+ text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
+@@ -790,13 +793,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+ if (mutex_is_locked(&text_mutex))
+ goto knl_write;
+ text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
+- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
+ if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
+ goto knl_write;
+ return err;
+ knl_write:
+ #endif /* CONFIG_DEBUG_RODATA */
+- return probe_kernel_write((char *)bpt->bpt_addr,
++ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
+ (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+ }
+
+diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
+index 7da647d..6e9fab5 100644
+--- a/arch/x86/kernel/kprobes.c
++++ b/arch/x86/kernel/kprobes.c
+@@ -117,9 +117,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
+ s32 raddr;
+ } __attribute__((packed)) *insn;
+
+- insn = (struct __arch_relative_insn *)from;
++ insn = (struct __arch_relative_insn *)ktla_ktva(from);
++
++ pax_open_kernel();
+ insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
+ insn->op = op;
++ pax_close_kernel();
+ }
+
+ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
+@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
+ kprobe_opcode_t opcode;
+ kprobe_opcode_t *orig_opcodes = opcodes;
+
+- if (search_exception_tables((unsigned long)opcodes))
++ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
+ return 0; /* Page fault may occur on this address. */
+
+ retry:
+@@ -228,7 +231,7 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
+ * for the first byte, we can recover the original instruction
+ * from it and kp->opcode.
+ */
+- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
++ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ buf[0] = kp->opcode;
+ return 0;
+ }
+@@ -264,7 +267,7 @@ static int __kprobes can_probe(unsigned long paddr)
+ * recover it.
+ */
+ return 0;
+- kernel_insn_init(&insn, buf);
++ kernel_insn_init(&insn, ktva_ktla(buf));
+ }
+ insn_get_length(&insn);
+ addr += insn.length;
+@@ -313,11 +316,13 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+ (unsigned long)src);
+ if (ret)
+ return 0;
+- kernel_insn_init(&insn, buf);
++ kernel_insn_init(&insn, ktva_ktla(buf));
+ }
+ }
+ insn_get_length(&insn);
++ pax_open_kernel();
+ memcpy(dest, insn.kaddr, insn.length);
++ pax_close_kernel();
+
+ #ifdef CONFIG_X86_64
+ if (insn_rip_relative(&insn)) {
+@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+ (u8 *) dest;
+ BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
+ disp = (u8 *) dest + insn_offset_displacement(&insn);
++ pax_open_kernel();
+ *(s32 *) disp = (s32) newdisp;
++ pax_close_kernel();
+ }
+ #endif
+ return insn.length;
+@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
+ */
+ __copy_instruction(p->ainsn.insn, p->addr, 0);
+
+- if (can_boost(p->addr))
++ if (can_boost(ktla_ktva(p->addr)))
+ p->ainsn.boostable = 0;
+ else
+ p->ainsn.boostable = -1;
+
+- p->opcode = *p->addr;
++ p->opcode = *(ktla_ktva(p->addr));
+ }
+
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ * nor set current_kprobe, because it doesn't use single
+ * stepping.
+ */
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ preempt_enable_no_resched();
+ return;
+ }
+@@ -494,9 +501,9 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ regs->flags &= ~X86_EFLAGS_IF;
+ /* single step inline if the instruction is an int3 */
+ if (p->opcode == BREAKPOINT_INSTRUCTION)
+- regs->ip = (unsigned long)p->addr;
++ regs->ip = ktla_ktva((unsigned long)p->addr);
+ else
+- regs->ip = (unsigned long)p->ainsn.insn;
++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ }
+
+ /*
+@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ setup_singlestep(p, regs, kcb, 0);
+ return 1;
+ }
+- } else if (*addr != BREAKPOINT_INSTRUCTION) {
++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
+ " movq %rax, 152(%rsp)\n"
+ RESTORE_REGS_STRING
+ " popfq\n"
++#ifdef KERNEXEC_PLUGIN
++ " btsq $63,(%rsp)\n"
++#endif
+ #else
+ " pushf\n"
+ SAVE_REGS_STRING
+@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+ {
+ unsigned long *tos = stack_addr(regs);
+- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
+ unsigned long orig_ip = (unsigned long)p->addr;
+ kprobe_opcode_t *insn = p->ainsn.insn;
+
+@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ struct die_args *args = data;
+ int ret = NOTIFY_DONE;
+
+- if (args->regs && user_mode_vm(args->regs))
++ if (args->regs && user_mode(args->regs))
+ return ret;
+
+ switch (val) {
+@@ -1120,6 +1130,7 @@ static void __kprobes synthesize_relcall(void *from, void *to)
+ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
+ unsigned long val)
+ {
++ pax_open_kernel();
+ #ifdef CONFIG_X86_64
+ *addr++ = 0x48;
+ *addr++ = 0xbf;
+@@ -1127,6 +1138,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
+ *addr++ = 0xb8;
+ #endif
+ *(unsigned long *)addr = val;
++ pax_close_kernel();
+ }
+
+ static void __used __kprobes kprobes_optinsn_template_holder(void)
+@@ -1307,7 +1319,7 @@ static int __kprobes can_optimize(unsigned long paddr)
+ ret = recover_probed_instruction(buf, addr);
+ if (ret)
+ return 0;
+- kernel_insn_init(&insn, buf);
++ kernel_insn_init(&insn, ktva_ktla(buf));
+ }
+ insn_get_length(&insn);
+ /* Recover address */
+@@ -1384,7 +1396,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+ * Verify if the address gap is in 2GB range, because this uses
+ * a relative jump.
+ */
+- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
++ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
+ if (abs(rel) > 0x7fffffff)
+ return -ERANGE;
+
+@@ -1399,16 +1411,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+ op->optinsn.size = ret;
+
+ /* Copy arch-dep-instance from template */
+- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
++ pax_open_kernel();
++ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
++ pax_close_kernel();
+
+ /* Set probe information */
+ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+ /* Set probe function call */
+- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
++ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
+
+ /* Set returning jmp instruction at the tail of out-of-line buffer */
+- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
++ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
+ (u8 *)op->kp.addr + op->optinsn.size);
+
+ flush_icache_range((unsigned long) buf,
+@@ -1431,7 +1445,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+ /* Backup instructions which will be replaced by jump address */
+- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
++ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ insn_buf[0] = RELATIVEJUMP_OPCODE;
+@@ -1530,7 +1544,7 @@ static int __kprobes setup_detour_execution(struct kprobe *p,
+ /* This kprobe is really able to run optimized path. */
+ op = container_of(p, struct optimized_kprobe, kp);
+ /* Detour through copied instructions */
+- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
++ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
+ if (!reenter)
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index a9c2116..94c1e1a 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
+ pv_mmu_ops.set_pud = kvm_set_pud;
+ #if PAGETABLE_LEVELS == 4
+ pv_mmu_ops.set_pgd = kvm_set_pgd;
++ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
+ #endif
+ #endif
+ pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
+@@ -579,7 +580,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
++static struct notifier_block kvm_cpu_notifier = {
+ .notifier_call = kvm_cpu_notify,
+ };
+ #endif
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index ea69726..604d066 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+ if (reload) {
+ #ifdef CONFIG_SMP
+ preempt_disable();
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ if (!cpumask_equal(mm_cpumask(current->mm),
+ cpumask_of(smp_processor_id())))
+ smp_call_function(flush_ldt, current->mm, 1);
+ preempt_enable();
+ #else
+- load_LDT(pc);
++ load_LDT_nolock(pc);
+ #endif
+ }
+ if (oldsize) {
+@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+ return err;
+
+ for (i = 0; i < old->size; i++)
+- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
++ write_ldt_entry(new->ldt, i, old->ldt + i);
+ return 0;
+ }
+
+@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ mutex_unlock(&old_mm->context.lock);
+ }
++
++ if (tsk == current) {
++ mm->context.vdso = 0;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++#endif
++#endif
++
++ }
++
+ return retval;
+ }
+
+@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index a3fa43b..8966f4c 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -27,7 +27,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/debugreg.h>
+
+-static void set_idt(void *newidt, __u16 limit)
++static void set_idt(struct desc_struct *newidt, __u16 limit)
+ {
+ struct desc_ptr curidt;
+
+@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
+ }
+
+
+-static void set_gdt(void *newgdt, __u16 limit)
++static void set_gdt(struct desc_struct *newgdt, __u16 limit)
+ {
+ struct desc_ptr curgdt;
+
+@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
+ }
+
+ control_page = page_address(image->control_code_page);
+- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
+
+ relocate_kernel_ptr = control_page;
+ page_list[PA_CONTROL_PAGE] = __pa(control_page);
+diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
+index 29c95d7..97b7b1b 100644
+--- a/arch/x86/kernel/microcode_core.c
++++ b/arch/x86/kernel/microcode_core.c
+@@ -507,7 +507,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata mc_cpu_notifier = {
++static struct notifier_block mc_cpu_notifier = {
+ .notifier_call = mc_cpu_callback,
+ };
+
+diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
+index 3ca42d0..7cff8cc 100644
+--- a/arch/x86/kernel/microcode_intel.c
++++ b/arch/x86/kernel/microcode_intel.c
+@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
+
+ static int get_ucode_user(void *to, const void *from, size_t n)
+ {
+- return copy_from_user(to, from, n);
++ return copy_from_user(to, (const void __force_user *)from, n);
+ }
+
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
+- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
++ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
+ }
+
+ static void microcode_fini_cpu(int cpu)
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 925179f..b151b74 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -36,15 +36,62 @@
+ #define DEBUGP(fmt...)
+ #endif
+
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+- if (PAGE_ALIGN(size) > MODULES_LEN)
++ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
+ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
+ -1, __builtin_return_address(0));
+ }
+
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++void *module_alloc_exec(unsigned long size) __size_overflow(1);
++void *module_alloc_exec(unsigned long size)
++{
++ struct vm_struct *area;
++
++ if (size == 0)
++ return NULL;
++
++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
++ return area ? area->addr : NULL;
++}
++EXPORT_SYMBOL(module_alloc_exec);
++
++void module_free_exec(struct module *mod, void *module_region)
++{
++ vunmap(module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++#else
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++EXPORT_SYMBOL(module_free_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_RX);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
++#endif
++
+ #ifdef CONFIG_X86_32
+ int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+@@ -55,14 +102,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+- uint32_t *location;
++ uint32_t *plocation, location;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+- + rel[i].r_offset;
++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
++ location = (uint32_t)plocation;
++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
++ plocation = ktla_ktva((void *)plocation);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+@@ -71,11 +120,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_386_32:
+ /* We add the value into the location given */
+- *location += sym->st_value;
++ pax_open_kernel();
++ *plocation += sym->st_value;
++ pax_close_kernel();
+ break;
+ case R_386_PC32:
+ /* Add the value, subtract its postition */
+- *location += sym->st_value - (uint32_t)location;
++ pax_open_kernel();
++ *plocation += sym->st_value - location;
++ pax_close_kernel();
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+@@ -120,21 +173,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
++ pax_open_kernel();
+ *(u64 *)loc = val;
++ pax_close_kernel();
+ break;
+ case R_X86_64_32:
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
+ if (val != *(u32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_32S:
++ pax_open_kernel();
+ *(s32 *)loc = val;
++ pax_close_kernel();
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+ break;
+ case R_X86_64_PC32:
+ val -= (u64)loc;
++ pax_open_kernel();
+ *(u32 *)loc = val;
++ pax_close_kernel();
++
+ #if 0
+ if ((s64)val != *(s32 *)loc)
+ goto overflow;
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index f7d1a64..399615a 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -235,7 +235,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block __refdata msr_class_cpu_notifier = {
++static struct notifier_block msr_class_cpu_notifier = {
+ .notifier_call = msr_class_cpu_callback,
+ };
+
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index e88f37b..45bb4ff 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -126,9 +126,9 @@ static int __setup_nmi(unsigned int type, struct nmiaction *action)
+ * event confuses some handlers (kdump uses this flag)
+ */
+ if (action->flags & NMI_FLAG_FIRST)
+- list_add_rcu(&action->list, &desc->head);
++ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
+ else
+- list_add_tail_rcu(&action->list, &desc->head);
++ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+@@ -150,7 +150,7 @@ static struct nmiaction *__free_nmi(unsigned int type, const char *name)
+ if (!strcmp(n->name, name)) {
+ WARN(in_nmi(),
+ "Trying to free NMI (%s) from NMI context!\n", n->name);
+- list_del_rcu(&n->list);
++ pax_list_del_rcu((struct list_head *)&n->list);
+ break;
+ }
+ }
+@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
+ dotraplinkage notrace __kprobes void
+ do_nmi(struct pt_regs *regs, long error_code)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (!user_mode(regs)) {
++ unsigned long cs = regs->cs & 0xFFFF;
++ unsigned long ip = ktva_ktla(regs->ip);
++
++ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
++ regs->ip = ip;
++ }
++#endif
++
+ nmi_enter();
+
+ inc_irq_stat(__nmi_count);
+diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
+index 676b8c7..870ba04 100644
+--- a/arch/x86/kernel/paravirt-spinlocks.c
++++ b/arch/x86/kernel/paravirt-spinlocks.c
+@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+ arch_spin_lock(lock);
+ }
+
+-struct pv_lock_ops pv_lock_ops = {
++struct pv_lock_ops pv_lock_ops __read_only = {
+ #ifdef CONFIG_SMP
+ .spin_is_locked = __ticket_spin_is_locked,
+ .spin_is_contended = __ticket_spin_is_contended,
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 84c938f..09fb3e0 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
+ {
+ return x;
+ }
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
++#endif
+
+ void __init default_banner(void)
+ {
+@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ if (opfunc == NULL)
+ /* If there's no function, patch it with a ud2a (BUG) */
+ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
+- else if (opfunc == _paravirt_nop)
++ else if (opfunc == (void *)_paravirt_nop)
+ /* If the operation is a nop, then nop the callsite */
+ ret = paravirt_patch_nop();
+
+ /* identity functions just return their single argument */
+- else if (opfunc == _paravirt_ident_32)
++ else if (opfunc == (void *)_paravirt_ident_32)
+ ret = paravirt_patch_ident_32(insnbuf, len);
+- else if (opfunc == _paravirt_ident_64)
++ else if (opfunc == (void *)_paravirt_ident_64)
+ ret = paravirt_patch_ident_64(insnbuf, len);
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
++ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
++ ret = paravirt_patch_ident_64(insnbuf, len);
++#endif
+
+ else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+ type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+ if (insn_len > len || start == NULL)
+ insn_len = len;
+ else
+- memcpy(insnbuf, start, insn_len);
++ memcpy(insnbuf, ktla_ktva(start), insn_len);
+
+ return insn_len;
+ }
+@@ -302,7 +309,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+ return percpu_read(paravirt_lazy_mode);
+ }
+
+-struct pv_info pv_info = {
++struct pv_info pv_info __read_only = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+@@ -313,16 +320,16 @@ struct pv_info pv_info = {
+ #endif
+ };
+
+-struct pv_init_ops pv_init_ops = {
++struct pv_init_ops pv_init_ops __read_only = {
+ .patch = native_patch,
+ };
+
+-struct pv_time_ops pv_time_ops = {
++struct pv_time_ops pv_time_ops __read_only = {
+ .sched_clock = native_sched_clock,
+ .steal_clock = native_steal_clock,
+ };
+
+-struct pv_irq_ops pv_irq_ops = {
++struct pv_irq_ops pv_irq_ops __read_only = {
+ .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+ .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
+ #endif
+ };
+
+-struct pv_cpu_ops pv_cpu_ops = {
++struct pv_cpu_ops pv_cpu_ops __read_only = {
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
+ .end_context_switch = paravirt_nop,
+ };
+
+-struct pv_apic_ops pv_apic_ops = {
++struct pv_apic_ops pv_apic_ops __read_only= {
+ #ifdef CONFIG_X86_LOCAL_APIC
+ .startup_ipi_hook = paravirt_nop,
+ #endif
+ };
+
+-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
++/* 64-bit pagetable entries */
++#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
++#else
+ /* 32-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
++#endif
+ #else
+ /* 64-bit pagetable entries */
+ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+ #endif
+
+-struct pv_mmu_ops pv_mmu_ops = {
++struct pv_mmu_ops pv_mmu_ops __read_only = {
+
+ .read_cr2 = native_read_cr2,
+ .write_cr2 = native_write_cr2,
+@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+ .make_pud = PTE_IDENT,
+
+ .set_pgd = native_set_pgd,
++ .set_pgd_batched = native_set_pgd_batched,
+ #endif
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+ },
+
+ .set_fixmap = native_set_fixmap,
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .pax_open_kernel = native_pax_open_kernel,
++ .pax_close_kernel = native_pax_close_kernel,
++#endif
++
+ };
+
+ EXPORT_SYMBOL_GPL(pv_time_ops);
+diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
+index 726494b..5d942a3 100644
+--- a/arch/x86/kernel/pci-calgary_64.c
++++ b/arch/x86/kernel/pci-calgary_64.c
+@@ -1341,7 +1341,7 @@ static void __init get_tce_space_from_tar(void)
+ tce_space = be64_to_cpu(readq(target));
+ tce_space = tce_space & TAR_SW_BITS;
+
+- tce_space = tce_space & (~specified_table_size);
++ tce_space = tce_space & (~(unsigned long)specified_table_size);
+ info->tce_space = (u64 *)__va(tce_space);
+ }
+ }
+diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
+index 35ccf75..7a15747 100644
+--- a/arch/x86/kernel/pci-iommu_table.c
++++ b/arch/x86/kernel/pci-iommu_table.c
+@@ -2,7 +2,7 @@
+ #include <asm/iommu_table.h>
+ #include <linux/string.h>
+ #include <linux/kallsyms.h>
+-
++#include <linux/sched.h>
+
+ #define DEBUG 1
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 59b9b37..f02ee42 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
+
+ void free_thread_info(struct thread_info *ti)
+ {
+- free_thread_xstate(ti->task);
+ free_pages((unsigned long)ti, THREAD_ORDER);
+ }
+
++static struct kmem_cache *task_struct_cachep;
++
+ void arch_task_cache_init(void)
+ {
+- task_xstate_cachep =
+- kmem_cache_create("task_xstate", xstate_size,
++ /* create a slab on which task_structs can be allocated */
++ task_struct_cachep =
++ kmem_cache_create("task_struct", sizeof(struct task_struct),
++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
++
++ task_xstate_cachep =
++ kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+- SLAB_PANIC | SLAB_NOTRACK, NULL);
++ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
++}
++
++struct task_struct *alloc_task_struct_node(int node)
++{
++ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
++}
++
++void free_task_struct(struct task_struct *task)
++{
++ free_thread_xstate(task);
++ kmem_cache_free(task_struct_cachep, task);
+ }
+
+ /*
+@@ -70,7 +87,7 @@ void exit_thread(void)
+ unsigned long *bp = t->io_bitmap_ptr;
+
+ if (bp) {
+- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++ struct tss_struct *tss = init_tss + get_cpu();
+
+ t->io_bitmap_ptr = NULL;
+ clear_thread_flag(TIF_IO_BITMAP);
+@@ -106,7 +123,7 @@ void show_regs_common(void)
+
+ printk(KERN_CONT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -120,6 +137,9 @@ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+
++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
++ loadsegment(gs, 0);
++#endif
+ flush_ptrace_hw_breakpoint(tsk);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ regs.di = (unsigned long) arg;
+
+ #ifdef CONFIG_X86_32
+- regs.ds = __USER_DS;
+- regs.es = __USER_DS;
++ regs.ds = __KERNEL_DS;
++ regs.es = __KERNEL_DS;
+ regs.fs = __KERNEL_PERCPU;
+- regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs.gs);
+ #else
+ regs.ss = __KERNEL_DS;
+ #endif
+@@ -387,7 +407,7 @@ bool set_pm_idle_to_default(void)
+
+ return ret;
+ }
+-void stop_this_cpu(void *dummy)
++__noreturn void stop_this_cpu(void *dummy)
+ {
+ local_irq_disable();
+ /*
+@@ -629,16 +649,37 @@ static int __init idle_setup(char *str)
+ }
+ early_param("idle", idle_setup);
+
+-unsigned long arch_align_stack(unsigned long sp)
++#ifdef CONFIG_PAX_RANDKSTACK
++void pax_randomize_kstack(struct pt_regs *regs)
+ {
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
++ struct thread_struct *thread = &current->thread;
++ unsigned long time;
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
++ if (!randomize_va_space)
++ return;
++
++ if (v8086_mode(regs))
++ return;
+
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 2;
++#elif defined(CONFIG_X86_64)
++ time &= 0xFUL;
++ time <<= 4;
++#else
++ time &= 0x1FUL;
++ time <<= 3;
++#endif
++
++ thread->sp0 ^= time;
++ load_sp0(init_tss + smp_processor_id(), thread);
++
++#ifdef CONFIG_X86_64
++ percpu_write(kernel_stack, thread->sp0);
++#endif
++}
++#endif
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 8598296..7c1af45 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.sp)[3];
++//XXX return tsk->thread.eip;
+ }
+
+ #ifndef CONFIG_SMP
+@@ -130,21 +131,20 @@ void __show_regs(struct pt_regs *regs, int all)
+ unsigned long sp;
+ unsigned short ss, gs;
+
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+- gs = get_user_gs(regs);
+ } else {
+ sp = kernel_stack_pointer(regs);
+ savesegment(ss, ss);
+- savesegment(gs, gs);
+ }
++ gs = get_user_gs(regs);
+
+ show_regs_common();
+
+ printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
+ (u16)regs->cs, regs->ip, regs->flags,
+- smp_processor_id());
++ raw_smp_processor_id());
+ print_symbol("EIP is at %s\n", regs->ip);
+
+ printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ struct task_struct *tsk;
+ int err;
+
+- childregs = task_pt_regs(p);
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
+ *childregs = *regs;
+ childregs->ax = 0;
+ childregs->sp = sp;
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ p->thread.ip = (unsigned long) ret_from_fork;
+
+@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ fpu_switch_t fpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+@@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ */
+ lazy_save_gs(prev->gs);
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ __set_fs(task_thread_info(next_p)->addr_limit);
++#endif
++
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ */
+@@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ */
+ arch_end_context_switch(next_p);
+
++ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
++
+ /*
+ * Restore %gs if needed (which is common)
+ */
+@@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+
+ switch_fpu_finish(next_p, fpu);
+
+- percpu_write(current_task, next_p);
+-
+ return prev_p;
+ }
+
+@@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
+ } while (count++ < 16);
+ return 0;
+ }
+-
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 6a364a6..b147d11 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -89,7 +89,7 @@ static void __exit_idle(void)
+ void exit_idle(void)
+ {
+ /* idle loop has pid 0 */
+- if (current->pid)
++ if (task_pid_nr(current))
+ return;
+ __exit_idle();
+ }
+@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ struct pt_regs *childregs;
+ struct task_struct *me = current;
+
+- childregs = ((struct pt_regs *)
+- (THREAD_SIZE + task_stack_page(p))) - 1;
++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
+ *childregs = *regs;
+
+ childregs->ax = 0;
+@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.sp0 = (unsigned long) (childregs+1);
+ p->thread.usersp = me->thread.usersp;
++ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
+
+ set_tsk_thread_flag(p, TIF_FORK);
+
+@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+ int cpu = smp_processor_id();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+ unsigned fsindex, gsindex;
+ fpu_switch_t fpu;
+
+@@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ prev->usersp = percpu_read(old_rsp);
+ percpu_write(old_rsp, next->usersp);
+ percpu_write(current_task, next_p);
++ percpu_write(current_tinfo, &next_p->tinfo);
+
+- percpu_write(kernel_stack,
+- (unsigned long)task_stack_page(next_p) +
+- THREAD_SIZE - KERNEL_STACK_OFFSET);
++ percpu_write(kernel_stack, next->sp0);
+
+ /*
+ * Now maybe reload the debug registers and handle I/O bitmaps
+@@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+- if (fp < (unsigned long)stack ||
+- fp >= (unsigned long)stack+THREAD_SIZE)
++ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 2dc4121..60e1086 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -181,14 +181,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
+ {
+ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+ unsigned long sp = (unsigned long)&regs->sp;
+- struct thread_info *tinfo;
+
+- if (context == (sp & ~(THREAD_SIZE - 1)))
++ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
+ return sp;
+
+- tinfo = (struct thread_info *)context;
+- if (tinfo->previous_esp)
+- return tinfo->previous_esp;
++ sp = *(unsigned long *)context;
++ if (sp)
++ return sp;
+
+ return (unsigned long)regs;
+ }
+@@ -585,7 +584,7 @@ static void ptrace_triggered(struct perf_event *bp,
+ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
+ {
+ int i;
+- int dr7 = 0;
++ unsigned long dr7 = 0;
+ struct arch_hw_breakpoint *info;
+
+ for (i = 0; i < HBP_NUM; i++) {
+@@ -852,7 +851,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+ {
+ int ret;
+- unsigned long __user *datap = (unsigned long __user *)data;
++ unsigned long __user *datap = (__force unsigned long __user *)data;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+@@ -937,14 +936,14 @@ long arch_ptrace(struct task_struct *child, long request,
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_get_thread_area(child, addr,
+- (struct user_desc __user *)data);
++ (__force struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ if ((int) addr < 0)
+ return -EIO;
+ ret = do_set_thread_area(child, addr,
+- (struct user_desc __user *)data, 0);
++ (__force struct user_desc __user *) data, 0);
+ break;
+ #endif
+
+@@ -1229,7 +1228,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+
+ #ifdef CONFIG_X86_64
+
+-static struct user_regset x86_64_regsets[] __read_mostly = {
++static user_regset_no_const x86_64_regsets[] __read_only = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct user_regs_struct) / sizeof(long),
+@@ -1273,7 +1272,7 @@ static const struct user_regset_view user_x86_64_view = {
+ #endif /* CONFIG_X86_64 */
+
+ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+-static struct user_regset x86_32_regsets[] __read_mostly = {
++static user_regset_no_const x86_32_regsets[] __read_only = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct user_regs_struct32) / sizeof(u32),
+@@ -1326,7 +1325,7 @@ static const struct user_regset_view user_x86_32_view = {
+ */
+ u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+
+-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
++void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
+ {
+ #ifdef CONFIG_X86_64
+ x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
+@@ -1361,7 +1360,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
+ }
+
+ void user_single_step_siginfo(struct task_struct *tsk,
+@@ -1390,6 +1389,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ # define IS_IA32 0
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * We must return the syscall number to actually look up in the table.
+ * This can be -1L to skip running any syscall at all.
+@@ -1398,6 +1401,11 @@ long syscall_trace_enter(struct pt_regs *regs)
+ {
+ long ret = 0;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ /*
+ * If we stepped into a sysenter/syscall insn, it trapped in
+ * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
+@@ -1409,7 +1417,11 @@ long syscall_trace_enter(struct pt_regs *regs)
+ regs->flags |= X86_EFLAGS_TF;
+
+ /* do the secure computing check first */
+- secure_computing(regs->orig_ax);
++ if (secure_computing(regs->orig_ax)) {
++ /* seccomp failures shouldn't expose any additional code. */
++ ret = -1L;
++ goto out;
++ }
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
+ ret = -1L;
+@@ -1436,6 +1448,7 @@ long syscall_trace_enter(struct pt_regs *regs)
+ #endif
+ }
+
++out:
+ return ret ?: regs->orig_ax;
+ }
+
+@@ -1443,6 +1456,11 @@ void syscall_trace_leave(struct pt_regs *regs)
+ {
+ bool step;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
+
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 42eb330..139955c 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
+ return pv_tsc_khz;
+ }
+
+-static atomic64_t last_value = ATOMIC64_INIT(0);
++static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
+
+ void pvclock_resume(void)
+ {
+- atomic64_set(&last_value, 0);
++ atomic64_set_unchecked(&last_value, 0);
+ }
+
+ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+- last = atomic64_read(&last_value);
++ last = atomic64_read_unchecked(&last_value);
+ do {
+ if (ret < last)
+ return last;
+- last = atomic64_cmpxchg(&last_value, last, ret);
++ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
+ return ret;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index f411aca..bd2aa3b 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+ static const struct desc_ptr no_idt = {};
+-static int reboot_mode;
++static unsigned short reboot_mode;
+ enum reboot_type reboot_type = BOOT_ACPI;
+ int reboot_force;
+
+@@ -324,13 +324,17 @@ core_initcall(reboot_init);
+ extern const unsigned char machine_real_restart_asm[];
+ extern const u64 machine_real_restart_gdt[3];
+
+-void machine_real_restart(unsigned int type)
++__noreturn void machine_real_restart(unsigned int type)
+ {
+ void *restart_va;
+ unsigned long restart_pa;
+- void (*restart_lowmem)(unsigned int);
++ void (* __noreturn restart_lowmem)(unsigned int);
+ u64 *lowmem_gdt;
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ struct desc_struct *gdt;
++#endif
++
+ local_irq_disable();
+
+ /* Write zero to CMOS register number 0x0f, which the BIOS POST
+@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
+ boot)". This seems like a fairly standard thing that gets set by
+ REBOOT.COM programs, and the previous reset routine did this
+ too. */
+- *((unsigned short *)0x472) = reboot_mode;
++ *(unsigned short *)(__va(0x472)) = reboot_mode;
+
+ /* Patch the GDT in the low memory trampoline */
+ lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
+
+ restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
+ restart_pa = virt_to_phys(restart_va);
+- restart_lowmem = (void (*)(unsigned int))restart_pa;
++ restart_lowmem = (void *)restart_pa;
+
+ /* GDT[0]: GDT self-pointer */
+ lowmem_gdt[0] =
+@@ -374,7 +378,35 @@ void machine_real_restart(unsigned int type)
+ GDT_ENTRY(0x009b, restart_pa, 0xffff);
+
+ /* Jump to the identity-mapped low memory code */
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ gdt = get_cpu_gdt_table(smp_processor_id());
++ pax_open_kernel();
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ loadsegment(ds, __KERNEL_DS);
++ loadsegment(es, __KERNEL_DS);
++ loadsegment(ss, __KERNEL_DS);
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
++ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
++ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
++ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
++#endif
++ pax_close_kernel();
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
++ unreachable();
++#else
+ restart_lowmem(type);
++#endif
++
+ }
+ #ifdef CONFIG_APM_MODULE
+ EXPORT_SYMBOL(machine_real_restart);
+@@ -564,7 +596,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
+ * try to force a triple fault and then cycle between hitting the keyboard
+ * controller and doing that
+ */
+-static void native_machine_emergency_restart(void)
++static void __noreturn native_machine_emergency_restart(void)
+ {
+ int i;
+ int attempt = 0;
+@@ -691,13 +723,13 @@ void native_machine_shutdown(void)
+ #endif
+ }
+
+-static void __machine_emergency_restart(int emergency)
++static __noreturn void __machine_emergency_restart(int emergency)
+ {
+ reboot_emergency = emergency;
+ machine_ops.emergency_restart();
+ }
+
+-static void native_machine_restart(char *__unused)
++static void __noreturn native_machine_restart(char *__unused)
+ {
+ printk("machine restart\n");
+
+@@ -706,7 +738,7 @@ static void native_machine_restart(char *__unused)
+ __machine_emergency_restart(0);
+ }
+
+-static void native_machine_halt(void)
++static void __noreturn native_machine_halt(void)
+ {
+ /* stop other cpus and apics */
+ machine_shutdown();
+@@ -717,7 +749,7 @@ static void native_machine_halt(void)
+ stop_this_cpu(NULL);
+ }
+
+-static void native_machine_power_off(void)
++static void __noreturn native_machine_power_off(void)
+ {
+ if (pm_power_off) {
+ if (!reboot_force)
+@@ -726,9 +758,10 @@ static void native_machine_power_off(void)
+ }
+ /* a fallback in case there is no PM info available */
+ tboot_shutdown(TB_SHUTDOWN_HALT);
++ unreachable();
+ }
+
+-struct machine_ops machine_ops = {
++struct machine_ops machine_ops __read_only = {
+ .power_off = native_machine_power_off,
+ .shutdown = native_machine_shutdown,
+ .emergency_restart = native_machine_emergency_restart,
+diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
+index c8e41e9..64049ef 100644
+--- a/arch/x86/kernel/reboot_fixups_32.c
++++ b/arch/x86/kernel/reboot_fixups_32.c
+@@ -57,7 +57,7 @@ struct device_fixup {
+ unsigned int vendor;
+ unsigned int device;
+ void (*reboot_fixup)(struct pci_dev *);
+-};
++} __do_const;
+
+ /*
+ * PCI ids solely used for fixups_table go here
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index f2bb9c9..bed145d7 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -11,6 +11,7 @@
+ #include <asm/kexec.h>
+ #include <asm/processor-flags.h>
+ #include <asm/pgtable_types.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Must be relocatable PIC code callable as a C function
+@@ -167,6 +168,7 @@ identity_mapped:
+ xorq %r14, %r14
+ xorq %r15, %r15
+
++ pax_force_retaddr 0, 1
+ ret
+
+ 1:
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index b506f41..33e9995 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
+
+ switch (data->type) {
+ case SETUP_E820_EXT:
+- parse_e820_ext(data);
++ parse_e820_ext((struct setup_data __force_kernel *)data);
+ break;
+ case SETUP_DTB:
+ add_dtb(pa_data);
+@@ -727,7 +727,7 @@ static void __init trim_bios_range(void)
+ * area (640->1Mb) as ram even though it is not.
+ * take them out.
+ */
+- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+@@ -852,14 +852,14 @@ void __init setup_arch(char **cmdline_p)
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+- init_mm.start_code = (unsigned long) _text;
+- init_mm.end_code = (unsigned long) _etext;
++ init_mm.start_code = ktla_ktva((unsigned long) _text);
++ init_mm.end_code = ktla_ktva((unsigned long) _etext);
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = _brk_end;
+
+- code_resource.start = virt_to_phys(_text);
+- code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ code_resource.start = virt_to_phys(ktla_ktva(_text));
++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
++ data_resource.start = virt_to_phys(_sdata);
+ data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
+diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
+index 5a98aa2..5aa4ffc 100644
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -21,19 +21,17 @@
+ #include <asm/cpu.h>
+ #include <asm/stackprotector.h>
+
+-DEFINE_PER_CPU(int, cpu_number);
++#ifdef CONFIG_SMP
++DEFINE_PER_CPU(unsigned int, cpu_number);
+ EXPORT_PER_CPU_SYMBOL(cpu_number);
++#endif
+
+-#ifdef CONFIG_X86_64
+ #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+-#else
+-#define BOOT_PERCPU_OFFSET 0
+-#endif
+
+ DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+ EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
++unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
+ [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+ };
+ EXPORT_SYMBOL(__per_cpu_offset);
+@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
+ {
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ pg_data_t *last = NULL;
+- unsigned int cpu;
++ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ int node = early_cpu_to_node(cpu);
+@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+ struct desc_struct gdt;
++ unsigned long base = per_cpu_offset(cpu);
+
+- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+- 0x2 | DESCTYPE_S, 0x8);
+- gdt.s = 1;
++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
++ 0x83 | DESCTYPE_S, 0xC);
+ write_gdt_entry(get_cpu_gdt_table(cpu),
+ GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ #endif
+@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
+ /* alrighty, percpu areas up and running */
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ unsigned long canary = per_cpu(stack_canary.canary, cpu);
++#endif
++#endif
+ per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
+ */
+ set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+ #endif
++#ifdef CONFIG_CC_STACKPROTECTOR
++#ifdef CONFIG_X86_32
++ if (!cpu)
++ per_cpu(stack_canary.canary, cpu) = canary;
++#endif
++#endif
+ /*
+ * Up to this point, the boot CPU has been using .init.data
+ * area. Reload any changed state for the boot CPU.
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 54ddaeb2..158e022 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
+ * Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0.
+ */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ #else /* !CONFIG_X86_32 */
+ sp = round_down(sp, 16) - 8;
+ #endif
+@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ /* save i387 state */
+ if (used_math() && save_i387_xstate(*fpstate) < 0)
+- return (void __user *)-1L;
++ return (__force void __user *)-1L;
+
+ return (void __user *)sp;
+ }
+@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+ }
+
+ if (current->mm->context.vdso)
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
+
+ if (err)
+ return -EFAULT;
+@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ if (current->mm->context.vdso)
++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
++ else
++ restorer = (void __user *)&frame->retcode;
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ put_user_ex(restorer, &frame->pretcode);
+@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ * reasons and because gdb uses it as a signature to notice
+ * signal handler stack frames.
+ */
+- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -655,19 +658,22 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ {
+ int usig = signr_convert(sig);
+ sigset_t *set = &current->blocked;
++ sigset_t sigcopy;
+ int ret;
+
+ if (current_thread_info()->status & TS_RESTORE_SIGMASK)
+ set = &current->saved_sigmask;
+
++ sigcopy = *set;
++
+ /* Set up the stack frame */
+ if (is_ia32) {
+ if (ka->sa.sa_flags & SA_SIGINFO)
+- ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
++ ret = ia32_setup_rt_frame(usig, ka, info, &sigcopy, regs);
+ else
+- ret = ia32_setup_frame(usig, ka, set, regs);
++ ret = ia32_setup_frame(usig, ka, &sigcopy, regs);
+ } else
+- ret = __setup_rt_frame(sig, ka, info, set, regs);
++ ret = __setup_rt_frame(sig, ka, info, &sigcopy, regs);
+
+ if (ret) {
+ force_sigsegv(sig, current);
+@@ -769,7 +775,7 @@ static void do_signal(struct pt_regs *regs)
+ * X86_32: vm86 regs switched out by assembly code before reaching
+ * here, so testing against kernel CS suffices.
+ */
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ return;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 16204dc..0e7d4b7 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -225,7 +225,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
+ irq_exit();
+ }
+
+-struct smp_ops smp_ops = {
++struct smp_ops smp_ops __read_only = {
+ .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = native_smp_prepare_cpus,
+ .smp_cpus_done = native_smp_cpus_done,
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 9f548cb..caf76f7 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
+ set_idle_for_cpu(cpu, c_idle.idle);
+ do_rest:
+ per_cpu(current_task, cpu) = c_idle.idle;
++ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
+ #ifdef CONFIG_X86_32
+ /* Stack for startup_32 can be just as for start_secondary onwards */
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+ initial_gs = per_cpu_offset(cpu);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(c_idle.idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
+ #endif
++
++ pax_open_kernel();
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
++ pax_close_kernel();
++
+ initial_code = (unsigned long)start_secondary;
+ stack_start = c_idle.idle->thread.sp;
+
+@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++#endif
++
+ err = do_boot_cpu(apicid, cpu);
+ if (err) {
+ pr_debug("do_boot_cpu failed %d\n", err);
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index d4f278e..86c58c0 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ struct desc_struct *desc;
+ unsigned long base;
+
+- seg &= ~7UL;
++ seg >>= 3;
+
+ mutex_lock(&child->mm->context.lock);
+- if (unlikely((seg >> 3) >= child->mm->context.size))
++ if (unlikely(seg >= child->mm->context.size))
+ addr = -1L; /* bogus selector, access would fault */
+ else {
+ desc = child->mm->context.ldt + seg;
+@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ addr += base;
+ }
+ mutex_unlock(&child->mm->context.lock);
+- }
++ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
++ addr = ktla_ktva(addr);
+
+ return addr;
+ }
+@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+ unsigned char opcode[15];
+ unsigned long addr = convert_ip_to_linear(child, regs);
+
++ if (addr == -EINVAL)
++ return 0;
++
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ for (i = 0; i < copied; i++) {
+ switch (opcode[i]) {
+diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
+index 0b0cb5f..560d0df 100644
+--- a/arch/x86/kernel/sys_i386_32.c
++++ b/arch/x86/kernel/sys_i386_32.c
+@@ -24,17 +24,228 @@
+
+ #include <asm/syscalls.h>
+
+-/*
+- * Do a system call from kernel instead of calling sys_execve so we
+- * end up with proper pt_regs.
+- */
+-int kernel_execve(const char *filename,
+- const char *const argv[],
+- const char *const envp[])
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+ {
+- long __res;
+- asm volatile ("int $0x80"
+- : "=a" (__res)
+- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
+- return __res;
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (flags & MAP_FIXED)
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ return addr;
++ }
++ }
++ if (len > mm->cached_hole_size) {
++ start_addr = addr = mm->free_area_cache;
++ } else {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
++ start_addr = 0x00110000UL;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ start_addr += mm->delta_mmap & 0x03FFF000UL;
++#endif
++
++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
++ start_addr = addr = mm->mmap_base;
++ else
++ addr = start_addr;
++ }
++#endif
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (pax_task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ break;
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++ addr = vma->vm_end;
++ if (mm->start_brk <= addr && addr < mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
++ goto full_search;
++ }
++ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++}
++
++unsigned long
++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
++ const unsigned long len, const unsigned long pgoff,
++ const unsigned long flags)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ /* requested length too big for entire address space */
++ if (len > pax_task_size)
++ return -ENOMEM;
++
++ if (flags & MAP_FIXED)
++ return addr;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
++ goto bottomup;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
++ /* requesting a specific address */
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ return addr;
++ }
++ }
++
++ /* check if free_area_cache is useful for us */
++ if (len <= mm->cached_hole_size) {
++ mm->cached_hole_size = 0;
++ mm->free_area_cache = mm->mmap_base;
++ }
++
++ /* either no address requested or can't fit in requested address hole */
++ addr = mm->free_area_cache;
++
++ /* make sure it can fit in the remaining address space */
++ if (addr > len) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++ }
++
++ if (mm->mmap_base < len)
++ goto bottomup;
++
++ addr = mm->mmap_base-len;
++
++ do {
++ /*
++ * Lookup failure means no vma is above this address,
++ * else if new region fits below vma->vm_start,
++ * return with success:
++ */
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ /* remember the address as a hint for next time */
++ return (mm->free_area_cache = addr);
++
++ /* remember the largest hole we saw so far */
++ if (addr + mm->cached_hole_size < vma->vm_start)
++ mm->cached_hole_size = vma->vm_start - addr;
++
++ /* try just below the current vma->vm_start */
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
++
++bottomup:
++ /*
++ * A failed mmap() very likely causes application failure,
++ * so fall back to the bottom-up function here. This scenario
++ * can happen with large stack limits and large mmap()
++ * allocations.
++ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
++ /*
++ * Restore the topdown base:
++ */
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
++ mm->cached_hole_size = ~0UL;
++
++ return addr;
+ }
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index cdb2fc9..a7264e0 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -95,8 +95,8 @@ out:
+ return error;
+ }
+
+-static void find_start_end(unsigned long flags, unsigned long *begin,
+- unsigned long *end)
++static void find_start_end(struct mm_struct *mm, unsigned long flags,
++ unsigned long *begin, unsigned long *end)
+ {
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
+ unsigned long new_begin;
+@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ *begin = new_begin;
+ }
+ } else {
+- *begin = current->mm->mmap_legacy_base;
++ *begin = mm->mmap_legacy_base;
+ *end = TASK_SIZE;
+ }
+ }
+@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+ unsigned long begin, end;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+- find_start_end(flags, &begin, &end);
++ find_start_end(mm, flags, &begin, &end);
+
+ if (len > end)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+@@ -172,7 +176,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
+ goto bottomup;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -232,7 +242,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ ALIGN_TOPDOWN);
+
+ vma = find_vma(mm, tmp_addr);
+- if (!vma || tmp_addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &tmp_addr, len, offset))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = tmp_addr;
+ }
+@@ -251,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -260,8 +270,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -270,13 +280,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
+index 9a0e312..e6f66f2 100644
+--- a/arch/x86/kernel/syscall_table_32.S
++++ b/arch/x86/kernel/syscall_table_32.S
+@@ -1,3 +1,4 @@
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
+index e2410e2..b98a4fd 100644
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
+
+ void tboot_shutdown(u32 shutdown_type)
+ {
+- void (*shutdown)(void);
++ void (* __noreturn shutdown)(void);
+
+ if (!tboot_enabled())
+ return;
+@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
+
+ switch_to_tboot_pt();
+
+- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
++ shutdown = (void *)tboot->shutdown_entry;
+ shutdown();
+
+ /* should not reach here */
+@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
+ tboot_shutdown(acpi_shutdown_map[sleep_state]);
+ }
+
+-static atomic_t ap_wfs_count;
++static atomic_unchecked_t ap_wfs_count;
+
+ static int tboot_wait_for_aps(int num_aps)
+ {
+@@ -322,16 +322,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
+ {
+ switch (action) {
+ case CPU_DYING:
+- atomic_inc(&ap_wfs_count);
++ atomic_inc_unchecked(&ap_wfs_count);
+ if (num_online_cpus() == 1)
+- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
++ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
+ return NOTIFY_BAD;
+ break;
+ }
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
++static struct notifier_block tboot_cpu_notifier =
+ {
+ .notifier_call = tboot_cpu_callback,
+ };
+@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
+
+ tboot_create_trampoline();
+
+- atomic_set(&ap_wfs_count, 0);
++ atomic_set_unchecked(&ap_wfs_count, 0);
+ register_hotcpu_notifier(&tboot_cpu_notifier);
+ return 0;
+ }
+diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
+index dd5fbf4..b7f2232 100644
+--- a/arch/x86/kernel/time.c
++++ b/arch/x86/kernel/time.c
+@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+
+- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ if (!user_mode(regs) && in_lock_functions(pc)) {
+ #ifdef CONFIG_FRAME_POINTER
+- return *(unsigned long *)(regs->bp + sizeof(long));
++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
+ #else
+ unsigned long *sp =
+ (unsigned long *)kernel_stack_pointer(regs);
+@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
+ * or above a saved flags. Eflags has bits 22-31 zero,
+ * kernel addresses don't.
+ */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return ktla_ktva(sp[0]);
++#else
+ if (sp[0] >> 22)
+ return sp[0];
+ if (sp[1] >> 22)
+ return sp[1];
+ #endif
++
++#endif
+ }
+ return pc;
+ }
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index bcfec2d..aeb81c2 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
++ return -EINVAL;
++#endif
++
+ set_tls_desc(p, idx, &info, 1);
+
+ return 0;
+@@ -205,7 +210,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+
+ if (kbuf)
+ info = kbuf;
+- else if (__copy_from_user(infobuf, ubuf, count))
++ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
+ return -EFAULT;
+ else
+ info = infobuf;
+diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
+index 451c0a7..e57f551 100644
+--- a/arch/x86/kernel/trampoline_32.S
++++ b/arch/x86/kernel/trampoline_32.S
+@@ -32,6 +32,12 @@
+ #include <asm/segment.h>
+ #include <asm/page_types.h>
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define ta(X) (X)
++#else
++#define ta(X) ((X) - __PAGE_OFFSET)
++#endif
++
+ #ifdef CONFIG_SMP
+
+ .section ".x86_trampoline","a"
+@@ -62,7 +68,7 @@ r_base = .
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $ta(startup_32_smp)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
+index 09ff517..df19fbff 100644
+--- a/arch/x86/kernel/trampoline_64.S
++++ b/arch/x86/kernel/trampoline_64.S
+@@ -90,7 +90,7 @@ startup_32:
+ movl $__KERNEL_DS, %eax # Initialize the %ds segment register
+ movl %eax, %ds
+
+- movl $X86_CR4_PAE, %eax
++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
+ movl %eax, %cr4 # Enable PAE mode
+
+ # Setup trampoline 4 level pagetables
+@@ -138,7 +138,7 @@ tidt:
+ # so the kernel can live anywhere
+ .balign 4
+ tgdt:
+- .short tgdt_end - tgdt # gdt limit
++ .short tgdt_end - tgdt - 1 # gdt limit
+ .long tgdt - r_base
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index e6fbb94..75e9d8c 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq;
+-
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.
+- */
+-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
+ #endif
+
+ DECLARE_BITMAP(used_vectors, NR_VECTORS);
+@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ }
+
+ static void __kprobes
+-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
+ long error_code, siginfo_t *info)
+ {
+ struct task_struct *tsk = current;
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ /*
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+ }
+ #endif
+
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto kernel_trap;
+
+ #ifdef CONFIG_X86_32
+@@ -148,7 +142,7 @@ trap_signal:
+ printk_ratelimit()) {
+ printk(KERN_INFO
+ "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+- tsk->comm, tsk->pid, str,
++ tsk->comm, task_pid_nr(tsk), str,
+ regs->ip, regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
+@@ -165,8 +159,20 @@ kernel_trap:
+ if (!fixup_exception(regs)) {
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
++ str = "PAX: suspicious stack segment fault";
++#endif
++
+ die(str, regs, error_code);
+ }
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (trapnr == 4)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ return;
+
+ #ifdef CONFIG_X86_32
+@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ conditional_sti(regs);
+
+ #ifdef CONFIG_X86_32
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ goto gp_in_vm86;
+ #endif
+
+ tsk = current;
+- if (!user_mode(regs))
++ if (!user_mode_novm(regs))
+ goto gp_in_kernel;
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = tsk->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = X86_TRAP_GP;
+
+@@ -299,6 +321,13 @@ gp_in_kernel:
+ if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+ X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
+ return;
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+
+@@ -419,7 +448,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ /* It's safe to allow irq's after DR6 has been saved */
+ preempt_conditional_sti(regs);
+
+- if (regs->flags & X86_VM_MASK) {
++ if (v8086_mode(regs)) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+ X86_TRAP_DB);
+ preempt_conditional_cli(regs);
+@@ -433,7 +462,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ * We already checked v86 mode above, so we can check for kernel mode
+ * by just checking the CPL of CS.
+ */
+- if ((dr6 & DR_STEP) && !user_mode(regs)) {
++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
+ tsk->thread.debugreg6 &= ~DR_STEP;
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->flags &= ~X86_EFLAGS_TF;
+@@ -463,7 +492,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ return;
+ conditional_sti(regs);
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+@@ -576,8 +605,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+ void __math_state_restore(struct task_struct *tsk)
+ {
+ /* We need a safe address that is cheap to find and that is already
+- in L1. We've just brought in "tsk->thread.has_fpu", so use that */
+-#define safe_address (tsk->thread.has_fpu)
++ in L1. */
++#define safe_address (init_tss[raw_smp_processor_id()].x86_tss.sp0)
+
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
+index b9242ba..50c5edd 100644
+--- a/arch/x86/kernel/verify_cpu.S
++++ b/arch/x86/kernel/verify_cpu.S
+@@ -20,6 +20,7 @@
+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
+ * arch/x86/kernel/head_32.S: processor startup
++ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
+ *
+ * verify_cpu, returns the status of longmode and SSE in register %eax.
+ * 0: Success 1: Failure
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index 04b8726..0c35b29 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -41,6 +41,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/audit.h>
+ #include <linux/stddef.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+ do_exit(SIGSEGV);
+ }
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &current->thread);
+@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
+ struct task_struct *tsk;
+ int tmp, ret = -EPERM;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ if (tsk->thread.saved_sp0)
+ goto out;
+@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
+ int tmp, ret;
+ struct vm86plus_struct __user *v86;
+
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ ret = -EPERM;
++ goto out;
++ }
++#endif
++
+ tsk = current;
+ switch (cmd) {
+ case VM86_REQUEST_IRQ:
+@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
+ tsk->thread.saved_fs = info->regs32->fs;
+ tsk->thread.saved_gs = get_user_gs(info->regs32);
+
+- tss = &per_cpu(init_tss, get_cpu());
++ tss = init_tss + get_cpu();
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+@@ -531,7 +547,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+ goto cannot_handle;
+ if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ goto cannot_handle;
+- intr_ptr = (unsigned long __user *) (i << 2);
++ intr_ptr = (__force unsigned long __user *) (i << 2);
+ if (get_user(segoffs, intr_ptr))
+ goto cannot_handle;
+ if ((segoffs >> 16) == BIOSSEG)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 0f703f1..cd7e91b 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -26,6 +26,13 @@
+ #include <asm/page_types.h>
+ #include <asm/cache.h>
+ #include <asm/boot.h>
++#include <asm/segment.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
++#else
++#define __KERNEL_TEXT_OFFSET 0
++#endif
+
+ #undef i386 /* in case the preprocessor is a 32bit one */
+
+@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
+
+ PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
++#ifdef CONFIG_X86_32
++ module PT_LOAD FLAGS(5); /* R_E */
++#endif
++#ifdef CONFIG_XEN
++ rodata PT_LOAD FLAGS(5); /* R_E */
++#else
++ rodata PT_LOAD FLAGS(4); /* R__ */
++#endif
+ data PT_LOAD FLAGS(6); /* RW_ */
+-#ifdef CONFIG_X86_64
++ init.begin PT_LOAD FLAGS(6); /* RW_ */
+ #ifdef CONFIG_SMP
+ percpu PT_LOAD FLAGS(6); /* RW_ */
+ #endif
++ text.init PT_LOAD FLAGS(5); /* R_E */
++ text.exit PT_LOAD FLAGS(5); /* R_E */
+ init PT_LOAD FLAGS(7); /* RWE */
+-#endif
+ note PT_NOTE FLAGS(0); /* ___ */
+ }
+
+ SECTIONS
+ {
+ #ifdef CONFIG_X86_32
+- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
+- phys_startup_32 = startup_32 - LOAD_OFFSET;
++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
+ #else
+- . = __START_KERNEL;
+- phys_startup_64 = startup_64 - LOAD_OFFSET;
++ . = __START_KERNEL;
+ #endif
+
+ /* Text and read-only data */
+- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+- _text = .;
++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+ /* bootstrapping code */
++#ifdef CONFIG_X86_32
++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#else
++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++#endif
++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
++ _text = .;
+ HEAD_TEXT
+ #ifdef CONFIG_X86_32
+ . = ALIGN(PAGE_SIZE);
+@@ -108,13 +128,48 @@ SECTIONS
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+- /* End of text section */
+- _etext = .;
+ } :text = 0x9090
+
+- NOTES :text :note
++ . += __KERNEL_TEXT_OFFSET;
+
+- EXCEPTION_TABLE(16) :text = 0x9090
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ MODULES_EXEC_VADDR = .;
++ BYTE(0)
++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
++ . = ALIGN(HPAGE_SIZE) - 1;
++ MODULES_EXEC_END = .;
++#endif
++
++ } :module
++#endif
++
++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
++ /* End of text section */
++ BYTE(0)
++ _etext = . - __KERNEL_TEXT_OFFSET;
++ }
++
++#ifdef CONFIG_X86_32
++ . = ALIGN(PAGE_SIZE);
++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
++ *(.idt)
++ . = ALIGN(PAGE_SIZE);
++ *(.empty_zero_page)
++ *(.initial_pg_fixmap)
++ *(.initial_pg_pmd)
++ *(.initial_page_table)
++ *(.swapper_pg_dir)
++ } :rodata
++#endif
++
++ . = ALIGN(PAGE_SIZE);
++ NOTES :rodata :note
++
++ EXCEPTION_TABLE(16) :rodata
+
+ #if defined(CONFIG_DEBUG_RODATA)
+ /* .text should occupy whole number of pages */
+@@ -126,16 +181,20 @@ SECTIONS
+
+ /* Data */
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ /* Start of data section */
+ _sdata = .;
+
+ /* init_task */
+ INIT_TASK_DATA(THREAD_SIZE)
+
+-#ifdef CONFIG_X86_32
+- /* 32 bit has nosave before _edata */
+ NOSAVE_DATA
+-#endif
+
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+
+@@ -176,12 +235,19 @@ SECTIONS
+ #endif /* CONFIG_X86_64 */
+
+ /* Init code and data - will be freed after init */
+- . = ALIGN(PAGE_SIZE);
+ .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
++ BYTE(0)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
++ . = ALIGN(PAGE_SIZE);
++#endif
++
+ __init_begin = .; /* paired with __init_end */
+- }
++ } :init.begin
+
+-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+ /*
+ * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
+ * output PHDR, so the next output section - .init.text - should
+@@ -190,12 +256,27 @@ SECTIONS
+ PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
+ #endif
+
+- INIT_TEXT_SECTION(PAGE_SIZE)
+-#ifdef CONFIG_X86_64
+- :init
+-#endif
++ . = ALIGN(PAGE_SIZE);
++ init_begin = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
++ VMLINUX_SYMBOL(_sinittext) = .;
++ INIT_TEXT
++ VMLINUX_SYMBOL(_einittext) = .;
++ . = ALIGN(PAGE_SIZE);
++ } :text.init
+
+- INIT_DATA_SECTION(16)
++ /*
++ * .exit.text is discard at runtime, not link time, to deal with
++ * references from .altinstructions and .eh_frame
++ */
++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
++ EXIT_TEXT
++ . = ALIGN(16);
++ } :text.exit
++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
++
++ . = ALIGN(PAGE_SIZE);
++ INIT_DATA_SECTION(16) :init
+
+ /*
+ * Code and data for a variety of lowlevel trampolines, to be
+@@ -269,19 +350,12 @@ SECTIONS
+ }
+
+ . = ALIGN(8);
+- /*
+- * .exit.text is discard at runtime, not link time, to deal with
+- * references from .altinstructions and .eh_frame
+- */
+- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+- EXIT_TEXT
+- }
+
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
+
+-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
++#ifndef CONFIG_SMP
+ PERCPU_SECTION(INTERNODE_CACHE_BYTES)
+ #endif
+
+@@ -300,16 +374,10 @@ SECTIONS
+ .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ __smp_locks = .;
+ *(.smp_locks)
+- . = ALIGN(PAGE_SIZE);
+ __smp_locks_end = .;
++ . = ALIGN(PAGE_SIZE);
+ }
+
+-#ifdef CONFIG_X86_64
+- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+- NOSAVE_DATA
+- }
+-#endif
+-
+ /* BSS */
+ . = ALIGN(PAGE_SIZE);
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+@@ -325,6 +393,7 @@ SECTIONS
+ __brk_base = .;
+ . += 64 * 1024; /* 64k alignment slop space */
+ *(.brk_reservation) /* areas brk users have reserved */
++ . = ALIGN(HPAGE_SIZE);
+ __brk_limit = .;
+ }
+
+@@ -351,13 +420,12 @@ SECTIONS
+ * for the boot processor.
+ */
+ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+-INIT_PER_CPU(gdt_page);
+ INIT_PER_CPU(irq_stack_union);
+
+ /*
+ * Build-time check on the image size:
+ */
+-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
+ "kernel image bigger than KERNEL_IMAGE_SIZE");
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
+index e4d4a22..47ee71f 100644
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
+ .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
+ };
+
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
+
+ static int __init vsyscall_setup(char *str)
+ {
+ if (str) {
+ if (!strcmp("emulate", str))
+ vsyscall_mode = EMULATE;
+- else if (!strcmp("native", str))
+- vsyscall_mode = NATIVE;
+ else if (!strcmp("none", str))
+ vsyscall_mode = NONE;
+ else
+@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+
+ tsk = current;
+ if (seccomp_mode(&tsk->seccomp))
+- do_exit(SIGKILL);
++ do_group_exit(SIGKILL);
+
+ switch (vsyscall_nr) {
+ case 0:
+@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+ return true;
+
+ sigsegv:
+- force_sig(SIGSEGV, current);
+- return true;
++ do_group_exit(SIGKILL);
+ }
+
+ /*
+@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
+ extern char __vvar_page;
+ unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
+
+- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
+- vsyscall_mode == NATIVE
+- ? PAGE_KERNEL_VSYSCALL
+- : PAGE_KERNEL_VVAR);
++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+ BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
+ (unsigned long)VSYSCALL_START);
+
+diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
+index 9796c2f..f686fbf 100644
+--- a/arch/x86/kernel/x8664_ksyms_64.c
++++ b/arch/x86/kernel/x8664_ksyms_64.c
+@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
+ EXPORT_SYMBOL(copy_user_generic_string);
+ EXPORT_SYMBOL(copy_user_generic_unrolled);
+ EXPORT_SYMBOL(__copy_user_nocache);
+-EXPORT_SYMBOL(_copy_from_user);
+-EXPORT_SYMBOL(_copy_to_user);
+
+ EXPORT_SYMBOL(copy_page);
+ EXPORT_SYMBOL(clear_page);
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index c1d6cd5..204ac00 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -90,14 +90,14 @@ struct x86_init_ops x86_init __initdata = {
+ },
+ };
+
+-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
++struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
+ .setup_percpu_clockev = setup_secondary_APIC_clock,
+ };
+
+ static void default_nmi_init(void) { };
+ static int default_i8042_detect(void) { return 1; };
+
+-struct x86_platform_ops x86_platform = {
++struct x86_platform_ops x86_platform __read_only = {
+ .calibrate_tsc = native_calibrate_tsc,
+ .wallclock_init = wallclock_init_noop,
+ .get_wallclock = mach_get_cmos_time,
+@@ -110,7 +110,7 @@ struct x86_platform_ops x86_platform = {
+ };
+
+ EXPORT_SYMBOL_GPL(x86_platform);
+-struct x86_msi_ops x86_msi = {
++struct x86_msi_ops x86_msi __read_only = {
+ .setup_msi_irqs = native_setup_msi_irqs,
+ .teardown_msi_irq = native_teardown_msi_irq,
+ .teardown_msi_irqs = default_teardown_msi_irqs,
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index 7110911..069da9c 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
+ fx_sw_user->xstate_size > fx_sw_user->extended_size)
+ return -EINVAL;
+
+- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
+ fx_sw_user->extended_size -
+ FP_XSTATE_MAGIC2_SIZE));
+ if (err)
+@@ -266,7 +266,7 @@ fx_only:
+ * the other extended state.
+ */
+ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
+- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
++ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
+ }
+
+ /*
+@@ -295,8 +295,7 @@ int restore_i387_xstate(void __user *buf)
+ if (use_xsave())
+ err = restore_user_xstate(buf);
+ else
+- err = fxrstor_checking((__force struct i387_fxsave_struct *)
+- buf);
++ err = fxrstor_checking((struct i387_fxsave_struct __user *)buf);
+ if (unlikely(err)) {
+ /*
+ * Encountered an error while doing the restore from the
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index f5302da..6ee193e 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -249,6 +249,7 @@ struct gprefix {
+
+ #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
+ do { \
++ unsigned long _tmp; \
+ __asm__ __volatile__ ( \
+ _PRE_EFLAGS("0", "4", "2") \
+ _op _suffix " %"_x"3,%1; " \
+@@ -263,8 +264,6 @@ struct gprefix {
+ /* Raw emulation: instruction has two explicit operands. */
+ #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+- \
+ switch ((ctxt)->dst.bytes) { \
+ case 2: \
+ ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
+@@ -280,7 +279,6 @@ struct gprefix {
+
+ #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
+ do { \
+- unsigned long _tmp; \
+ switch ((ctxt)->dst.bytes) { \
+ case 1: \
+ ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
+@@ -383,8 +381,7 @@ struct gprefix {
+ _ASM_EXTABLE(1b, 3b) \
+ : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
+ "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
+- : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
+- "a" (*rax), "d" (*rdx)); \
++ : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
+ } while (0)
+
+ /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 43e7753..873f4440 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -53,7 +53,7 @@
+ #define APIC_BUS_CYCLE_NS 1
+
+ /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
+-#define apic_debug(fmt, arg...)
++#define apic_debug(fmt, arg...) do {} while (0)
+
+ #define APIC_LVT_NUM 6
+ /* 14 is the version for Xeon and Pentium 8.4.8*/
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index f1b36cf..af8a124 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+
+ pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+
+- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
++ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
+
+ /*
+ * Assume that the pte write on a page table of the same type
+@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ }
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
++ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
+ gentry = 0;
+ kvm_mmu_free_some_pages(vcpu);
+ ++vcpu->kvm->stat.mmu_pte_write;
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 9299410..ade2f9b 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -197,7 +197,7 @@ retry_walk:
+ if (unlikely(kvm_is_error_hva(host_addr)))
+ goto error;
+
+- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
++ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
+ if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+ goto error;
+
+@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
+- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
++ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
+
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 94a4672..5c6b853 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3405,7 +3405,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+ int cpu = raw_smp_processor_id();
+
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++
++ pax_open_kernel();
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -3783,6 +3787,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ #endif
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ reload_tss(vcpu);
+
+ local_irq_disable();
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index aac5ea7..266eda9 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1099,12 +1099,12 @@ static void vmcs_write64(unsigned long field, u64 value)
+ #endif
+ }
+
+-static void vmcs_clear_bits(unsigned long field, u32 mask)
++static void vmcs_clear_bits(unsigned long field, unsigned long mask)
+ {
+ vmcs_writel(field, vmcs_readl(field) & ~mask);
+ }
+
+-static void vmcs_set_bits(unsigned long field, u32 mask)
++static void vmcs_set_bits(unsigned long field, unsigned long mask)
+ {
+ vmcs_writel(field, vmcs_readl(field) | mask);
+ }
+@@ -1305,7 +1305,11 @@ static void reload_tss(void)
+ struct desc_struct *descs;
+
+ descs = (void *)gdt->address;
++
++ pax_open_kernel();
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
++ pax_close_kernel();
++
+ load_TR_desc();
+ }
+
+@@ -1504,6 +1508,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
+ vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
++
+ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+ vmx->loaded_vmcs->cpu = cpu;
+@@ -2634,8 +2642,11 @@ static __init int hardware_setup(void)
+ if (!cpu_has_vmx_flexpriority())
+ flexpriority_enabled = 0;
+
+- if (!cpu_has_vmx_tpr_shadow())
+- kvm_x86_ops->update_cr8_intercept = NULL;
++ if (!cpu_has_vmx_tpr_shadow()) {
++ pax_open_kernel();
++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
++ pax_close_kernel();
++ }
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+@@ -3637,7 +3648,10 @@ static void vmx_set_constant_host_state(void)
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
++#endif
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+@@ -3649,7 +3663,7 @@ static void vmx_set_constant_host_state(void)
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
+- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
++ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
+
+ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+ vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+@@ -6178,6 +6192,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ ".Lkvm_vmx_return: "
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
++ ".Lkvm_vmx_return2: "
++#endif
++
+ /* Save guest registers, load host registers, keep flags */
+ "mov %0, %c[wordsize](%%"R"sp) \n\t"
+ "pop %0 \n\t"
+@@ -6226,6 +6246,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+ [wordsize]"i"(sizeof(ulong))
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ ,[cs]"i"(__KERNEL_CS)
++#endif
++
+ : "cc", "memory"
+ , R"ax", R"bx", R"di", R"si"
+ #ifdef CONFIG_X86_64
+@@ -6254,7 +6279,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ }
+ }
+
+- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
++ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ loadsegment(fs, __KERNEL_PERCPU);
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ __set_fs(current_thread_info()->addr_limit);
++#endif
++
+ vmx->loaded_vmcs->launched = 1;
+
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 57867e4..1d5ff81 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1341,8 +1341,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+ {
+ struct kvm *kvm = vcpu->kvm;
+ int lm = is_long_mode(vcpu);
+- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
++ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
++ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+ u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
+ : kvm->arch.xen_hvm_config.blob_size_32;
+ u32 page_num = data & ~PAGE_MASK;
+@@ -2159,6 +2159,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+ if (n < msr_list.nmsrs)
+ goto out;
+ r = -EFAULT;
++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
++ goto out;
+ if (copy_to_user(user_msr_list->indices, &msrs_to_save,
+ num_msrs_to_save * sizeof(u32)))
+ goto out;
+@@ -2334,15 +2336,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ goto out;
+ r = -EFAULT;
+- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < cpuid->nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry;
++ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
++ goto out;
++ vcpu->arch.cpuid_entries[i] = cpuid_entry;
++ }
+ vcpu->arch.cpuid_nent = cpuid->nent;
+ kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
+@@ -2357,15 +2364,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- int r;
++ int r, i;
+
+ r = -E2BIG;
+ if (cpuid->nent < vcpu->arch.cpuid_nent)
+ goto out;
+ r = -EFAULT;
+- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
++ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
++ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
++ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
++ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
++ goto out;
++ }
+ return 0;
+
+ out:
+@@ -2740,7 +2751,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+ {
+- if (irq->irq < 0 || irq->irq >= 256)
++ if (irq->irq >= 256)
+ return -EINVAL;
+ if (irqchip_in_kernel(vcpu->kvm))
+ return -ENXIO;
+@@ -5182,7 +5193,7 @@ static void kvm_set_mmio_spte_mask(void)
+ kvm_mmu_set_mmio_spte_mask(mask);
+ }
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int r;
+ struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index 8f4fda4..353d5cc 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+ * Rebooting also tells the Host we're finished, but the RESTART flag tells the
+ * Launcher to reboot us.
+ */
+-static void lguest_restart(char *reason)
++static __noreturn void lguest_restart(char *reason)
+ {
+ hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
++ BUG();
+ }
+
+ /*G:050
+diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
+index 042f682..c92afb6 100644
+--- a/arch/x86/lib/atomic64_32.c
++++ b/arch/x86/lib/atomic64_32.c
+@@ -8,18 +8,30 @@
+
+ long long atomic64_read_cx8(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_read_cx8);
++long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
+ long long atomic64_set_cx8(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_set_cx8);
++long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
+ long long atomic64_xchg_cx8(long long, unsigned high);
+ EXPORT_SYMBOL(atomic64_xchg_cx8);
+ long long atomic64_add_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_return_cx8);
++long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
+ long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_return_cx8);
++long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
+ long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_return_cx8);
++long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
+ long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_return_cx8);
++long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
+ long long atomic64_dec_if_positive_cx8(atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
+ int atomic64_inc_not_zero_cx8(atomic64_t *v);
+@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
+ #ifndef CONFIG_X86_CMPXCHG64
+ long long atomic64_read_386(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_read_386);
++long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_read_unchecked_386);
+ long long atomic64_set_386(long long, const atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_set_386);
++long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_set_unchecked_386);
+ long long atomic64_xchg_386(long long, unsigned high);
+ EXPORT_SYMBOL(atomic64_xchg_386);
+ long long atomic64_add_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_return_386);
++long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
+ long long atomic64_sub_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_return_386);
++long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
+ long long atomic64_inc_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_return_386);
++long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
+ long long atomic64_dec_return_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_return_386);
++long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
+ long long atomic64_add_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_add_386);
++long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_add_unchecked_386);
+ long long atomic64_sub_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_sub_386);
++long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_sub_unchecked_386);
+ long long atomic64_inc_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_inc_386);
++long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_inc_unchecked_386);
+ long long atomic64_dec_386(long long a, atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_386);
++long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
++EXPORT_SYMBOL(atomic64_dec_unchecked_386);
+ long long atomic64_dec_if_positive_386(atomic64_t *v);
+ EXPORT_SYMBOL(atomic64_dec_if_positive_386);
+ int atomic64_inc_not_zero_386(atomic64_t *v);
+diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
+index e8e7e0d..56fd1b0 100644
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -48,6 +48,10 @@ BEGIN(read)
+ movl (v), %eax
+ movl 4(v), %edx
+ RET_ENDP
++BEGIN(read_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -55,6 +59,10 @@ BEGIN(set)
+ movl %ebx, (v)
+ movl %ecx, 4(v)
+ RET_ENDP
++BEGIN(set_unchecked)
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_ENDP
+ #undef v
+
+ #define v %esi
+@@ -70,6 +78,20 @@ RET_ENDP
+ BEGIN(add)
+ addl %eax, (v)
+ adcl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl %eax, (v)
++ sbbl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(add_unchecked)
++ addl %eax, (v)
++ adcl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -77,6 +99,24 @@ RET_ENDP
+ BEGIN(add_return)
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(add_return_unchecked)
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -86,6 +126,20 @@ RET_ENDP
+ BEGIN(sub)
+ subl %eax, (v)
+ sbbl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl %eax, (v)
++ adcl %edx, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(sub_unchecked)
++ subl %eax, (v)
++ sbbl %edx, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -96,6 +150,27 @@ BEGIN(sub_return)
+ sbbl $0, %edx
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(sub_return_unchecked)
++ negl %edx
++ negl %eax
++ sbbl $0, %edx
++ addl (v), %eax
++ adcl 4(v), %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -105,6 +180,20 @@ RET_ENDP
+ BEGIN(inc)
+ addl $1, (v)
+ adcl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ subl $1, (v)
++ sbbl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(inc_unchecked)
++ addl $1, (v)
++ adcl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -114,6 +203,26 @@ BEGIN(inc_return)
+ movl 4(v), %edx
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(inc_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ addl $1, %eax
++ adcl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -123,6 +232,20 @@ RET_ENDP
+ BEGIN(dec)
+ subl $1, (v)
+ sbbl $0, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 0f
++ addl $1, (v)
++ adcl $0, 4(v)
++ int $4
++0:
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++RET_ENDP
++BEGIN(dec_unchecked)
++ subl $1, (v)
++ sbbl $0, 4(v)
+ RET_ENDP
+ #undef v
+
+@@ -132,6 +255,26 @@ BEGIN(dec_return)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
++ movl %eax, (v)
++ movl %edx, 4(v)
++
++#ifdef CONFIG_PAX_REFCOUNT
++2:
++#endif
++
++RET_ENDP
++BEGIN(dec_return_unchecked)
++ movl (v), %eax
++ movl 4(v), %edx
++ subl $1, %eax
++ sbbl $0, %edx
+ movl %eax, (v)
+ movl %edx, 4(v)
+ RET_ENDP
+@@ -143,6 +286,13 @@ BEGIN(add_unless)
+ adcl %edx, %edi
+ addl (v), %eax
+ adcl 4(v), %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ cmpl %eax, %esi
+ je 3f
+ 1:
+@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
+ 1:
+ addl $1, %eax
+ adcl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ movl %eax, (v)
+ movl %edx, 4(v)
+ movl $1, %eax
+@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
+ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 1f)
++#endif
++
+ js 1f
+ movl %eax, (v)
+ movl %edx, 4(v)
+diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
+index 391a083..3a2cf39 100644
+--- a/arch/x86/lib/atomic64_cx8_32.S
++++ b/arch/x86/lib/atomic64_cx8_32.S
+@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
+ CFI_STARTPROC
+
+ read64 %ecx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_read_cx8)
+
++ENTRY(atomic64_read_unchecked_cx8)
++ CFI_STARTPROC
++
++ read64 %ecx
++ pax_force_retaddr
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_read_unchecked_cx8)
++
+ ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
+ cmpxchg8b (%esi)
+ jne 1b
+
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_set_cx8)
+
++ENTRY(atomic64_set_unchecked_cx8)
++ CFI_STARTPROC
++
++1:
++/* we don't need LOCK_PREFIX since aligned 64-bit writes
++ * are atomic on 586 and newer */
++ cmpxchg8b (%esi)
++ jne 1b
++
++ pax_force_retaddr
++ ret
++ CFI_ENDPROC
++ENDPROC(atomic64_set_unchecked_cx8)
++
+ ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
+ cmpxchg8b (%esi)
+ jne 1b
+
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_xchg_cx8)
+
+-.macro addsub_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro addsub_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+-
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ addsub_return add add adc
+ addsub_return sub sub sbb
++addsub_return add add adc _unchecked
++addsub_return sub sub sbb _unchecked
+
+-.macro incdec_return func ins insc
+-ENTRY(atomic64_\func\()_return_cx8)
++.macro incdec_return func ins insc unchecked=""
++ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++2:
++ _ASM_EXTABLE(2b, 3f)
++#endif
++.endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+-10:
+ movl %ebx, %eax
+ movl %ecx, %edx
++
++.ifb \unchecked
++#ifdef CONFIG_PAX_REFCOUNT
++3:
++#endif
++.endif
++
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_return_cx8)
++ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
+ .endm
+
+ incdec_return inc add adc
+ incdec_return dec sub sbb
++incdec_return inc add adc _unchecked
++incdec_return dec sub sbb _unchecked
+
+ ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 2f)
++#endif
++
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(atomic64_dec_if_positive_cx8)
+@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
+ movl %edx, %ecx
+ addl %esi, %ebx
+ adcl %edi, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE ebx
+ RESTORE ebp
++ pax_force_retaddr
+ ret
+ 4:
+ cmpl %edx, 4(%esp)
+@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ movl %edx, %ecx
+ addl $1, %ebx
+ adcl $0, %ecx
++
++#ifdef CONFIG_PAX_REFCOUNT
++ into
++1234:
++ _ASM_EXTABLE(1234b, 3f)
++#endif
++
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
+ movl $1, %eax
+ 3:
+ RESTORE ebx
++ pax_force_retaddr
+ ret
+ 4:
+ testl %edx, %edx
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index 78d16a5..fbcf666 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+-
++#include <asm/segment.h>
++
+ /*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
+
+ #define ARGBASE 16
+ #define FP 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ subl $4,%esp
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl_cfi %edi
+@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
+ jmp 4f
+ SRC(1: movw (%esi), %bx )
+ addl $2, %esi
+-DST( movw %bx, (%edi) )
++DST( movw %bx, %es:(%edi) )
+ addl $2, %edi
+ addw %bx, %ax
+ adcl $0, %eax
+@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
+ SRC(1: movl (%esi), %ebx )
+ SRC( movl 4(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 4(%edi) )
++DST( movl %edx, %es:4(%edi) )
+
+ SRC( movl 8(%esi), %ebx )
+ SRC( movl 12(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 8(%edi) )
++DST( movl %ebx, %es:8(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 12(%edi) )
++DST( movl %edx, %es:12(%edi) )
+
+ SRC( movl 16(%esi), %ebx )
+ SRC( movl 20(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 16(%edi) )
++DST( movl %ebx, %es:16(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 20(%edi) )
++DST( movl %edx, %es:20(%edi) )
+
+ SRC( movl 24(%esi), %ebx )
+ SRC( movl 28(%esi), %edx )
+ adcl %ebx, %eax
+-DST( movl %ebx, 24(%edi) )
++DST( movl %ebx, %es:24(%edi) )
+ adcl %edx, %eax
+-DST( movl %edx, 28(%edi) )
++DST( movl %edx, %es:28(%edi) )
+
+ lea 32(%esi), %esi
+ lea 32(%edi), %edi
+@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
+ shrl $2, %edx # This clears CF
+ SRC(3: movl (%esi), %ebx )
+ adcl %ebx, %eax
+-DST( movl %ebx, (%edi) )
++DST( movl %ebx, %es:(%edi) )
+ lea 4(%esi), %esi
+ lea 4(%edi), %edi
+ dec %edx
+@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
+ jb 5f
+ SRC( movw (%esi), %cx )
+ leal 2(%esi), %esi
+-DST( movw %cx, (%edi) )
++DST( movw %cx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%ecx
+ SRC(5: movb (%esi), %cl )
+-DST( movb %cl, (%edi) )
++DST( movb %cl, %es:(%edi) )
+ 6: addl %ecx, %eax
+ adcl $0, %eax
+ 7:
+@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
+
+ 6001:
+ movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+
+ # zero the complete destination - computing the rest
+ # is too much work
+@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
+
+ 6002:
+ movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT,(%ebx)
++ movl $-EFAULT,%ss:(%ebx)
+ jmp 5000b
+
+ .previous
+
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
+ popl_cfi %ebx
+ CFI_RESTORE ebx
+ popl_cfi %esi
+@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
+ popl_cfi %ecx # equivalent to addl $4,%esp
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #else
+
+ /* Version for PentiumII/PPro */
+
+ #define ROUND1(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ addl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ROUND(x) \
++ nop; nop; nop; \
+ SRC(movl x(%esi), %ebx ) ; \
+ adcl %ebx, %eax ; \
+- DST(movl %ebx, x(%edi) ) ;
++ DST(movl %ebx, %es:x(%edi)) ;
+
+ #define ARGBASE 12
+-
+-ENTRY(csum_partial_copy_generic)
++
++ENTRY(csum_partial_copy_generic_to_user)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %es
++ jmp csum_partial_copy_generic
++#endif
++
++ENTRY(csum_partial_copy_generic_from_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %gs
++ popl_cfi %ds
++#endif
++
++ENTRY(csum_partial_copy_generic)
+ pushl_cfi %ebx
+ CFI_REL_OFFSET ebx, 0
+ pushl_cfi %edi
+@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
+ subl %ebx, %edi
+ lea -1(%esi),%edx
+ andl $-32,%edx
+- lea 3f(%ebx,%ebx), %ebx
++ lea 3f(%ebx,%ebx,2), %ebx
+ testl %esi, %esi
+ jmp *%ebx
+ 1: addl $64,%esi
+@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
+ jb 5f
+ SRC( movw (%esi), %dx )
+ leal 2(%esi), %esi
+-DST( movw %dx, (%edi) )
++DST( movw %dx, %es:(%edi) )
+ leal 2(%edi), %edi
+ je 6f
+ shll $16,%edx
+ 5:
+ SRC( movb (%esi), %dl )
+-DST( movb %dl, (%edi) )
++DST( movb %dl, %es:(%edi) )
+ 6: addl %edx, %eax
+ adcl $0, %eax
+ 7:
+ .section .fixup, "ax"
+ 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ # zero the complete destination (computing the rest is too much work)
+ movl ARGBASE+8(%esp),%edi # dst
+ movl ARGBASE+12(%esp),%ecx # len
+@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
+ rep; stosb
+ jmp 7b
+ 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
+- movl $-EFAULT, (%ebx)
++ movl $-EFAULT, %ss:(%ebx)
+ jmp 7b
+ .previous
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl_cfi %ss
++ popl_cfi %ds
++ pushl_cfi %ss
++ popl_cfi %es
++#endif
++
+ popl_cfi %esi
+ CFI_RESTORE esi
+ popl_cfi %edi
+@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
+ CFI_RESTORE ebx
+ ret
+ CFI_ENDPROC
+-ENDPROC(csum_partial_copy_generic)
++ENDPROC(csum_partial_copy_generic_to_user)
+
+ #undef ROUND
+ #undef ROUND1
+diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
+index f2145cf..cea889d 100644
+--- a/arch/x86/lib/clear_page_64.S
++++ b/arch/x86/lib/clear_page_64.S
+@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
+ movl $4096/8,%ecx
+ xorl %eax,%eax
+ rep stosq
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(clear_page_c)
+@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
+ movl $4096,%ecx
+ xorl %eax,%eax
+ rep stosb
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(clear_page_c_e)
+@@ -43,6 +45,7 @@ ENTRY(clear_page)
+ leaq 64(%rdi),%rdi
+ jnz .Lloop
+ nop
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ .Lclear_page_end:
+@@ -58,7 +61,7 @@ ENDPROC(clear_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
+ 2: .byte 0xeb /* jmp <disp8> */
+diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
+index 1e572c5..2a162cd 100644
+--- a/arch/x86/lib/cmpxchg16b_emu.S
++++ b/arch/x86/lib/cmpxchg16b_emu.S
+@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
+
+ popf
+ mov $1, %al
++ pax_force_retaddr
+ ret
+
+ not_same:
+ popf
+ xor %al,%al
++ pax_force_retaddr
+ ret
+
+ CFI_ENDPROC
+diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
+index 01c805b..16da7cf 100644
+--- a/arch/x86/lib/copy_page_64.S
++++ b/arch/x86/lib/copy_page_64.S
+@@ -9,6 +9,7 @@ copy_page_c:
+ CFI_STARTPROC
+ movl $4096/8,%ecx
+ rep movsq
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(copy_page_c)
+@@ -24,7 +25,7 @@ ENTRY(copy_page)
+ CFI_ADJUST_CFA_OFFSET 3*8
+ movq %rbx,(%rsp)
+ CFI_REL_OFFSET rbx, 0
+- movq %r12,1*8(%rsp)
++ movq %r14,1*8(%rsp)
+ CFI_REL_OFFSET r12, 1*8
+ movq %r13,2*8(%rsp)
+ CFI_REL_OFFSET r13, 2*8
+@@ -41,7 +42,7 @@ ENTRY(copy_page)
+ movq 32 (%rsi), %r9
+ movq 40 (%rsi), %r10
+ movq 48 (%rsi), %r11
+- movq 56 (%rsi), %r12
++ movq 56 (%rsi), %r14
+
+ prefetcht0 5*64(%rsi)
+
+@@ -52,7 +53,7 @@ ENTRY(copy_page)
+ movq %r9, 32 (%rdi)
+ movq %r10, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+- movq %r12, 56 (%rdi)
++ movq %r14, 56 (%rdi)
+
+ leaq 64 (%rsi), %rsi
+ leaq 64 (%rdi), %rdi
+@@ -71,7 +72,7 @@ ENTRY(copy_page)
+ movq 32 (%rsi), %r9
+ movq 40 (%rsi), %r10
+ movq 48 (%rsi), %r11
+- movq 56 (%rsi), %r12
++ movq 56 (%rsi), %r14
+
+ movq %rax, (%rdi)
+ movq %rbx, 8 (%rdi)
+@@ -80,7 +81,7 @@ ENTRY(copy_page)
+ movq %r9, 32 (%rdi)
+ movq %r10, 40 (%rdi)
+ movq %r11, 48 (%rdi)
+- movq %r12, 56 (%rdi)
++ movq %r14, 56 (%rdi)
+
+ leaq 64(%rdi),%rdi
+ leaq 64(%rsi),%rsi
+@@ -89,12 +90,13 @@ ENTRY(copy_page)
+
+ movq (%rsp),%rbx
+ CFI_RESTORE rbx
+- movq 1*8(%rsp),%r12
++ movq 1*8(%rsp),%r14
+ CFI_RESTORE r12
+ movq 2*8(%rsp),%r13
+ CFI_RESTORE r13
+ addq $3*8,%rsp
+ CFI_ADJUST_CFA_OFFSET -3*8
++ pax_force_retaddr
+ ret
+ .Lcopy_page_end:
+ CFI_ENDPROC
+@@ -105,7 +107,7 @@ ENDPROC(copy_page)
+
+ #include <asm/cpufeature.h>
+
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 1: .byte 0xeb /* jmp <disp8> */
+ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
+ 2:
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index 0248402..416b737 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -16,6 +16,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/cpufeature.h>
+ #include <asm/alternative-asm.h>
++#include <asm/pgtable.h>
+
+ /*
+ * By placing feature2 after feature1 in altinstructions section, we logically
+@@ -29,7 +30,7 @@
+ .byte 0xe9 /* 32bit jump */
+ .long \orig-1f /* by default jump to orig */
+ 1:
+- .section .altinstr_replacement,"ax"
++ .section .altinstr_replacement,"a"
+ 2: .byte 0xe9 /* near jump with 32bit immediate */
+ .long \alt1-1b /* offset */ /* or alternatively to alt1 */
+ 3: .byte 0xe9 /* near jump with 32bit immediate */
+@@ -71,47 +72,20 @@
+ #endif
+ .endm
+
+-/* Standard copy_to_user with segment limit checking */
+-ENTRY(_copy_to_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rdi,%rcx
+- addq %rdx,%rcx
+- jc bad_to_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_to_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+- copy_user_generic_unrolled,copy_user_generic_string, \
+- copy_user_enhanced_fast_string
+- CFI_ENDPROC
+-ENDPROC(_copy_to_user)
+-
+-/* Standard copy_from_user with segment limit checking */
+-ENTRY(_copy_from_user)
+- CFI_STARTPROC
+- GET_THREAD_INFO(%rax)
+- movq %rsi,%rcx
+- addq %rdx,%rcx
+- jc bad_from_user
+- cmpq TI_addr_limit(%rax),%rcx
+- ja bad_from_user
+- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
+- copy_user_generic_unrolled,copy_user_generic_string, \
+- copy_user_enhanced_fast_string
+- CFI_ENDPROC
+-ENDPROC(_copy_from_user)
+-
+ .section .fixup,"ax"
+ /* must zero dest */
+ ENTRY(bad_from_user)
+ bad_from_user:
+ CFI_STARTPROC
++ testl %edx,%edx
++ js bad_to_user
+ movl %edx,%ecx
+ xorl %eax,%eax
+ rep
+ stosb
+ bad_to_user:
+ movl %edx,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(bad_from_user)
+@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
+ decl %ecx
+ jnz 21b
+ 23: xor %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
+ 3: rep
+ movsb
+ 4: xorl %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
+ 1: rep
+ movsb
+ 2: xorl %eax,%eax
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
+index cb0c112..cb2d3c5 100644
+--- a/arch/x86/lib/copy_user_nocache_64.S
++++ b/arch/x86/lib/copy_user_nocache_64.S
+@@ -8,12 +8,14 @@
+
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ #define FIX_ALIGNMENT 1
+
+ #include <asm/current.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ .macro ALIGN_DESTINATION
+ #ifdef FIX_ALIGNMENT
+@@ -50,6 +52,15 @@
+ */
+ ENTRY(__copy_user_nocache)
+ CFI_STARTPROC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov pax_user_shadow_base,%rcx
++ cmp %rcx,%rsi
++ jae 1f
++ add %rcx,%rsi
++1:
++#endif
++
+ cmpl $8,%edx
+ jb 20f /* less then 8 bytes, go to byte copy loop */
+ ALIGN_DESTINATION
+@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
+ jnz 21b
+ 23: xorl %eax,%eax
+ sfence
++ pax_force_retaddr
+ ret
+
+ .section .fixup,"ax"
+diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
+index fb903b7..83cc6fb 100644
+--- a/arch/x86/lib/csum-copy_64.S
++++ b/arch/x86/lib/csum-copy_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Checksum copy with exception handling.
+@@ -64,8 +65,8 @@ ENTRY(csum_partial_copy_generic)
+ CFI_ADJUST_CFA_OFFSET 7*8
+ movq %rbx, 2*8(%rsp)
+ CFI_REL_OFFSET rbx, 2*8
+- movq %r12, 3*8(%rsp)
+- CFI_REL_OFFSET r12, 3*8
++ movq %r15, 3*8(%rsp)
++ CFI_REL_OFFSET r15, 3*8
+ movq %r14, 4*8(%rsp)
+ CFI_REL_OFFSET r14, 4*8
+ movq %r13, 5*8(%rsp)
+@@ -80,16 +81,16 @@ ENTRY(csum_partial_copy_generic)
+ movl %edx, %ecx
+
+ xorl %r9d, %r9d
+- movq %rcx, %r12
++ movq %rcx, %r15
+
+- shrq $6, %r12
++ shrq $6, %r15
+ jz .Lhandle_tail /* < 64 */
+
+ clc
+
+ /* main loop. clear in 64 byte blocks */
+ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
+- /* r11: temp3, rdx: temp4, r12 loopcnt */
++ /* r11: temp3, rdx: temp4, r15 loopcnt */
+ /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ .p2align 4
+ .Lloop:
+@@ -123,7 +124,7 @@ ENTRY(csum_partial_copy_generic)
+ adcq %r14, %rax
+ adcq %r13, %rax
+
+- decl %r12d
++ decl %r15d
+
+ dest
+ movq %rbx, (%rsi)
+@@ -218,8 +219,8 @@ ENTRY(csum_partial_copy_generic)
+ .Lende:
+ movq 2*8(%rsp), %rbx
+ CFI_RESTORE rbx
+- movq 3*8(%rsp), %r12
+- CFI_RESTORE r12
++ movq 3*8(%rsp), %r15
++ CFI_RESTORE r15
+ movq 4*8(%rsp), %r14
+ CFI_RESTORE r14
+ movq 5*8(%rsp), %r13
+@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
+ CFI_RESTORE rbp
+ addq $7*8, %rsp
+ CFI_ADJUST_CFA_OFFSET -7*8
++ pax_force_retaddr
+ ret
+ CFI_RESTORE_STATE
+
+diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
+index 459b58a..d67737f 100644
+--- a/arch/x86/lib/csum-wrappers_64.c
++++ b/arch/x86/lib/csum-wrappers_64.c
+@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
+ len -= 2;
+ }
+ }
+- isum = csum_partial_copy_generic((__force const void *)src,
++ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
+ dst, len, isum, errp, NULL);
+ if (unlikely(*errp))
+ goto out_err;
+@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
+ }
+
+ *errp = 0;
+- return csum_partial_copy_generic(src, (void __force *)dst,
++ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
+ len, isum, NULL, errp);
+ }
+ EXPORT_SYMBOL(csum_partial_copy_to_user);
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index 51f1504..144f6bd 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -33,15 +33,38 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm.h>
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
+
+ .text
+ ENTRY(__get_user_1)
+ CFI_STARTPROC
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-1: movzb (%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++1: __copyuser_seg movzb (%_ASM_AX),%edx
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_1)
+@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
+ ENTRY(__get_user_2)
+ CFI_STARTPROC
+ add $1,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ jc bad_get_user
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-2: movzwl -1(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_2)
+@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
+ ENTRY(__get_user_4)
+ CFI_STARTPROC
+ add $3,%_ASM_AX
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+ jc bad_get_user
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
+-3: mov -3(%_ASM_AX),%edx
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
++#endif
++
++3: __copyuser_seg mov -3(%_ASM_AX),%edx
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_4)
+@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov pax_user_shadow_base,%_ASM_DX
++ cmp %_ASM_DX,%_ASM_AX
++ jae 1234f
++ add %_ASM_DX,%_ASM_AX
++1234:
++#endif
++
+ 4: movq -7(%_ASM_AX),%_ASM_DX
+ xor %eax,%eax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__get_user_8)
+@@ -91,6 +152,7 @@ bad_get_user:
+ CFI_STARTPROC
+ xor %edx,%edx
+ mov $(-EFAULT),%_ASM_AX
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ END(bad_get_user)
+diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
+index 374562e..a75830b 100644
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -21,6 +21,11 @@
+ #include <linux/string.h>
+ #include <asm/inat.h>
+ #include <asm/insn.h>
++#ifdef __KERNEL__
++#include <asm/pgtable_types.h>
++#else
++#define ktla_ktva(addr) addr
++#endif
+
+ /* Verify next sizeof(t) bytes can be on the same instruction */
+ #define validate_next(t, insn, n) \
+@@ -49,8 +54,8 @@
+ void insn_init(struct insn *insn, const void *kaddr, int x86_64)
+ {
+ memset(insn, 0, sizeof(*insn));
+- insn->kaddr = kaddr;
+- insn->next_byte = kaddr;
++ insn->kaddr = ktla_ktva(kaddr);
++ insn->next_byte = ktla_ktva(kaddr);
+ insn->x86_64 = x86_64 ? 1 : 0;
+ insn->opnd_bytes = 4;
+ if (x86_64)
+diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
+index 05a95e7..326f2fa 100644
+--- a/arch/x86/lib/iomap_copy_64.S
++++ b/arch/x86/lib/iomap_copy_64.S
+@@ -17,6 +17,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * override generic version in lib/iomap_copy.c
+@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
+ CFI_STARTPROC
+ movl %edx,%ecx
+ rep movsd
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(__iowrite32_copy)
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index efbf2a0..8090894 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -34,6 +34,7 @@
+ rep movsq
+ movl %edx, %ecx
+ rep movsb
++ pax_force_retaddr
+ ret
+ .Lmemcpy_e:
+ .previous
+@@ -51,6 +52,7 @@
+
+ movl %edx, %ecx
+ rep movsb
++ pax_force_retaddr
+ ret
+ .Lmemcpy_e_e:
+ .previous
+@@ -141,6 +143,7 @@ ENTRY(memcpy)
+ movq %r9, 1*8(%rdi)
+ movq %r10, -2*8(%rdi, %rdx)
+ movq %r11, -1*8(%rdi, %rdx)
++ pax_force_retaddr
+ retq
+ .p2align 4
+ .Lless_16bytes:
+@@ -153,6 +156,7 @@ ENTRY(memcpy)
+ movq -1*8(%rsi, %rdx), %r9
+ movq %r8, 0*8(%rdi)
+ movq %r9, -1*8(%rdi, %rdx)
++ pax_force_retaddr
+ retq
+ .p2align 4
+ .Lless_8bytes:
+@@ -166,6 +170,7 @@ ENTRY(memcpy)
+ movl -4(%rsi, %rdx), %r8d
+ movl %ecx, (%rdi)
+ movl %r8d, -4(%rdi, %rdx)
++ pax_force_retaddr
+ retq
+ .p2align 4
+ .Lless_3bytes:
+@@ -183,6 +188,7 @@ ENTRY(memcpy)
+ jnz .Lloop_1
+
+ .Lend:
++ pax_force_retaddr
+ retq
+ CFI_ENDPROC
+ ENDPROC(memcpy)
+diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
+index ee16461..c4f4918 100644
+--- a/arch/x86/lib/memmove_64.S
++++ b/arch/x86/lib/memmove_64.S
+@@ -202,6 +202,7 @@ ENTRY(memmove)
+ movb (%rsi), %r11b
+ movb %r11b, (%rdi)
+ 13:
++ pax_force_retaddr
+ retq
+ CFI_ENDPROC
+
+@@ -210,6 +211,7 @@ ENTRY(memmove)
+ /* Forward moving data. */
+ movq %rdx, %rcx
+ rep movsb
++ pax_force_retaddr
+ retq
+ .Lmemmove_end_forward_efs:
+ .previous
+diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
+index 79bd454..24b3780 100644
+--- a/arch/x86/lib/memset_64.S
++++ b/arch/x86/lib/memset_64.S
+@@ -31,6 +31,7 @@
+ movl %r8d,%ecx
+ rep stosb
+ movq %r9,%rax
++ pax_force_retaddr
+ ret
+ .Lmemset_e:
+ .previous
+@@ -53,6 +54,7 @@
+ movl %edx,%ecx
+ rep stosb
+ movq %r9,%rax
++ pax_force_retaddr
+ ret
+ .Lmemset_e_e:
+ .previous
+@@ -121,6 +123,7 @@ ENTRY(__memset)
+
+ .Lende:
+ movq %r10,%rax
++ pax_force_retaddr
+ ret
+
+ CFI_RESTORE_STATE
+diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
+index c9f2d9b..e7fd2c0 100644
+--- a/arch/x86/lib/mmx_32.c
++++ b/arch/x86/lib/mmx_32.c
+@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+ {
+ void *p;
+ int i;
++ unsigned long cr0;
+
+ if (unlikely(in_interrupt()))
+ return __memcpy(to, from, len);
+@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n" /* This set is 28 bytes */
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n" /* This set is 28 bytes */
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from));
++ : "=&r" (cr0) : "r" (from) : "ax");
+
+ for ( ; i > 5; i--) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
+ * but that is for later. -AV
+ */
+ __asm__ __volatile__(
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < (4096-320)/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movntq %%mm0, (%1)\n"
+- " movq 8(%0), %%mm1\n"
+- " movntq %%mm1, 8(%1)\n"
+- " movq 16(%0), %%mm2\n"
+- " movntq %%mm2, 16(%1)\n"
+- " movq 24(%0), %%mm3\n"
+- " movntq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm4\n"
+- " movntq %%mm4, 32(%1)\n"
+- " movq 40(%0), %%mm5\n"
+- " movntq %%mm5, 40(%1)\n"
+- " movq 48(%0), %%mm6\n"
+- " movntq %%mm6, 48(%1)\n"
+- " movq 56(%0), %%mm7\n"
+- " movntq %%mm7, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movntq %%mm0, (%2)\n"
++ " movq 8(%1), %%mm1\n"
++ " movntq %%mm1, 8(%2)\n"
++ " movq 16(%1), %%mm2\n"
++ " movntq %%mm2, 16(%2)\n"
++ " movq 24(%1), %%mm3\n"
++ " movntq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm4\n"
++ " movntq %%mm4, 32(%2)\n"
++ " movq 40(%1), %%mm5\n"
++ " movntq %%mm5, 40(%2)\n"
++ " movq 48(%1), %%mm6\n"
++ " movntq %%mm6, 48(%2)\n"
++ " movq 56(%1), %%mm7\n"
++ " movntq %%mm7, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
+ static void fast_copy_page(void *to, void *from)
+ {
+ int i;
++ unsigned long cr0;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+- "1: prefetch (%0)\n"
+- " prefetch 64(%0)\n"
+- " prefetch 128(%0)\n"
+- " prefetch 192(%0)\n"
+- " prefetch 256(%0)\n"
++ "1: prefetch (%1)\n"
++ " prefetch 64(%1)\n"
++ " prefetch 128(%1)\n"
++ " prefetch 192(%1)\n"
++ " prefetch 256(%1)\n"
+ "2: \n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++ "3: \n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+- _ASM_EXTABLE(1b, 3b) : : "r" (from));
++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
+
+ for (i = 0; i < 4096/64; i++) {
+ __asm__ __volatile__ (
+- "1: prefetch 320(%0)\n"
+- "2: movq (%0), %%mm0\n"
+- " movq 8(%0), %%mm1\n"
+- " movq 16(%0), %%mm2\n"
+- " movq 24(%0), %%mm3\n"
+- " movq %%mm0, (%1)\n"
+- " movq %%mm1, 8(%1)\n"
+- " movq %%mm2, 16(%1)\n"
+- " movq %%mm3, 24(%1)\n"
+- " movq 32(%0), %%mm0\n"
+- " movq 40(%0), %%mm1\n"
+- " movq 48(%0), %%mm2\n"
+- " movq 56(%0), %%mm3\n"
+- " movq %%mm0, 32(%1)\n"
+- " movq %%mm1, 40(%1)\n"
+- " movq %%mm2, 48(%1)\n"
+- " movq %%mm3, 56(%1)\n"
++ "1: prefetch 320(%1)\n"
++ "2: movq (%1), %%mm0\n"
++ " movq 8(%1), %%mm1\n"
++ " movq 16(%1), %%mm2\n"
++ " movq 24(%1), %%mm3\n"
++ " movq %%mm0, (%2)\n"
++ " movq %%mm1, 8(%2)\n"
++ " movq %%mm2, 16(%2)\n"
++ " movq %%mm3, 24(%2)\n"
++ " movq 32(%1), %%mm0\n"
++ " movq 40(%1), %%mm1\n"
++ " movq 48(%1), %%mm2\n"
++ " movq 56(%1), %%mm3\n"
++ " movq %%mm0, 32(%2)\n"
++ " movq %%mm1, 40(%2)\n"
++ " movq %%mm2, 48(%2)\n"
++ " movq %%mm3, 56(%2)\n"
+ ".section .fixup, \"ax\"\n"
+- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++ "3:\n"
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %%cr0, %0\n"
++ " movl %0, %%eax\n"
++ " andl $0xFFFEFFFF, %%eax\n"
++ " movl %%eax, %%cr0\n"
++#endif
++
++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ " movl %0, %%cr0\n"
++#endif
++
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : : "r" (from), "r" (to) : "memory");
++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
+
+ from += 64;
+ to += 64;
+diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
+index 69fa106..234ac7f 100644
+--- a/arch/x86/lib/msr-reg.S
++++ b/arch/x86/lib/msr-reg.S
+@@ -3,6 +3,7 @@
+ #include <asm/dwarf2.h>
+ #include <asm/asm.h>
+ #include <asm/msr.h>
++#include <asm/alternative-asm.h>
+
+ #ifdef CONFIG_X86_64
+ /*
+@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
+ movl %edi, 28(%r10)
+ popq_cfi %rbp
+ popq_cfi %rbx
++ pax_force_retaddr
+ ret
+ 3:
+ CFI_RESTORE_STATE
+diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
+index 36b0d15..3edf573 100644
+--- a/arch/x86/lib/putuser.S
++++ b/arch/x86/lib/putuser.S
+@@ -15,7 +15,9 @@
+ #include <asm/thread_info.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-
++#include <asm/segment.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * __put_user_X
+@@ -29,52 +31,119 @@
+ * as they get called from within inline assembly.
+ */
+
+-#define ENTER CFI_STARTPROC ; \
+- GET_THREAD_INFO(%_ASM_BX)
+-#define EXIT ret ; \
++#define ENTER CFI_STARTPROC
++#define EXIT pax_force_retaddr; ret ; \
+ CFI_ENDPROC
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define _DEST %_ASM_CX,%_ASM_BX
++#else
++#define _DEST %_ASM_CX
++#endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg gs;
++#else
++#define __copyuser_seg
++#endif
++
+ .text
+ ENTRY(__put_user_1)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ jae bad_put_user
+-1: movb %al,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++1: __copyuser_seg movb %al,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_1)
+
+ ENTRY(__put_user_2)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $1,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-2: movw %ax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++2: __copyuser_seg movw %ax,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_2)
+
+ ENTRY(__put_user_4)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $3,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-3: movl %eax,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++3: __copyuser_seg movl %eax,(_DEST)
+ xor %eax,%eax
+ EXIT
+ ENDPROC(__put_user_4)
+
+ ENTRY(__put_user_8)
+ ENTER
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
++ GET_THREAD_INFO(%_ASM_BX)
+ mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ sub $7,%_ASM_BX
+ cmp %_ASM_BX,%_ASM_CX
+ jae bad_put_user
+-4: mov %_ASM_AX,(%_ASM_CX)
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ mov pax_user_shadow_base,%_ASM_BX
++ cmp %_ASM_BX,%_ASM_CX
++ jb 1234f
++ xor %ebx,%ebx
++1234:
++#endif
++
++#endif
++
++4: __copyuser_seg mov %_ASM_AX,(_DEST)
+ #ifdef CONFIG_X86_32
+-5: movl %edx,4(%_ASM_CX)
++5: __copyuser_seg movl %edx,4(_DEST)
+ #endif
+ xor %eax,%eax
+ EXIT
+diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
+index 1cad221..de671ee 100644
+--- a/arch/x86/lib/rwlock.S
++++ b/arch/x86/lib/rwlock.S
+@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
+ FRAME
+ 0: LOCK_PREFIX
+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1: rep; nop
+ cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
+ jne 1b
+ LOCK_PREFIX
+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ jnz 0b
+ ENDFRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ END(__write_lock_failed)
+@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
+ FRAME
+ 0: LOCK_PREFIX
+ READ_LOCK_SIZE(inc) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ READ_LOCK_SIZE(dec) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1: rep; nop
+ READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
+ js 1b
+ LOCK_PREFIX
+ READ_LOCK_SIZE(dec) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ READ_LOCK_SIZE(inc) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ js 0b
+ ENDFRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ END(__read_lock_failed)
+diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
+index 5dff5f0..cadebf4 100644
+--- a/arch/x86/lib/rwsem.S
++++ b/arch/x86/lib/rwsem.S
+@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+ CFI_RESTORE __ASM_REG(dx)
+ restore_common_regs
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_read_failed)
+@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
+ movq %rax,%rdi
+ call rwsem_down_write_failed
+ restore_common_regs
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_write_failed)
+@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
+ movq %rax,%rdi
+ call rwsem_wake
+ restore_common_regs
+-1: ret
++1: pax_force_retaddr
++ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_wake)
+
+@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
+ CFI_RESTORE __ASM_REG(dx)
+ restore_common_regs
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_downgrade_wake)
+diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
+index a63efd6..8149fbe 100644
+--- a/arch/x86/lib/thunk_64.S
++++ b/arch/x86/lib/thunk_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
+ #include <asm/calling.h>
++#include <asm/alternative-asm.h>
+
+ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+ .macro THUNK name, func, put_ret_addr_in_rdi=0
+@@ -15,11 +16,11 @@
+ \name:
+ CFI_STARTPROC
+
+- /* this one pushes 9 elems, the next one would be %rIP */
+- SAVE_ARGS
++ /* this one pushes 15+1 elems, the next one would be %rIP */
++ SAVE_ARGS 8
+
+ .if \put_ret_addr_in_rdi
+- movq_cfi_restore 9*8, rdi
++ movq_cfi_restore RIP, rdi
+ .endif
+
+ call \func
+@@ -38,8 +39,9 @@
+
+ /* SAVE_ARGS below is used only for the .cfi directives it contains. */
+ CFI_STARTPROC
+- SAVE_ARGS
++ SAVE_ARGS 8
+ restore:
+- RESTORE_ARGS
++ RESTORE_ARGS 1,8
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
+index e218d5d..3966c85 100644
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -43,7 +43,7 @@ do { \
+ __asm__ __volatile__( \
+ " testl %1,%1\n" \
+ " jz 2f\n" \
+- "0: lodsb\n" \
++ "0: "__copyuser_seg"lodsb\n" \
+ " stosb\n" \
+ " testb %%al,%%al\n" \
+ " jz 1f\n" \
+@@ -128,10 +128,12 @@ do { \
+ int __d0; \
+ might_fault(); \
+ __asm__ __volatile__( \
++ __COPYUSER_SET_ES \
+ "0: rep; stosl\n" \
+ " movl %2,%0\n" \
+ "1: rep; stosb\n" \
+ "2:\n" \
++ __COPYUSER_RESTORE_ES \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%2,%0,4),%0\n" \
+ " jmp 2b\n" \
+@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
+ might_fault();
+
+ __asm__ __volatile__(
++ __COPYUSER_SET_ES
+ " testl %0, %0\n"
+ " jz 3f\n"
+ " andl %0,%%ecx\n"
+@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
+ " subl %%ecx,%0\n"
+ " addl %0,%%eax\n"
+ "1:\n"
++ __COPYUSER_RESTORE_ES
+ ".section .fixup,\"ax\"\n"
+ "2: xorl %%eax,%%eax\n"
+ " jmp 1b\n"
+@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
+
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ static unsigned long
+-__copy_user_intel(void __user *to, const void *from, unsigned long size)
++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
+ {
+ int d0, d1;
+ __asm__ __volatile__(
+@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+ " .align 2,0x90\n"
+ "3: movl 0(%4), %%eax\n"
+ "4: movl 4(%4), %%edx\n"
+- "5: movl %%eax, 0(%3)\n"
+- "6: movl %%edx, 4(%3)\n"
++ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
++ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
+ "7: movl 8(%4), %%eax\n"
+ "8: movl 12(%4),%%edx\n"
+- "9: movl %%eax, 8(%3)\n"
+- "10: movl %%edx, 12(%3)\n"
++ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
++ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
+ "11: movl 16(%4), %%eax\n"
+ "12: movl 20(%4), %%edx\n"
+- "13: movl %%eax, 16(%3)\n"
+- "14: movl %%edx, 20(%3)\n"
++ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
++ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
+ "15: movl 24(%4), %%eax\n"
+ "16: movl 28(%4), %%edx\n"
+- "17: movl %%eax, 24(%3)\n"
+- "18: movl %%edx, 28(%3)\n"
++ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
++ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
+ "19: movl 32(%4), %%eax\n"
+ "20: movl 36(%4), %%edx\n"
+- "21: movl %%eax, 32(%3)\n"
+- "22: movl %%edx, 36(%3)\n"
++ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
++ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
+ "23: movl 40(%4), %%eax\n"
+ "24: movl 44(%4), %%edx\n"
+- "25: movl %%eax, 40(%3)\n"
+- "26: movl %%edx, 44(%3)\n"
++ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
++ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
+ "27: movl 48(%4), %%eax\n"
+ "28: movl 52(%4), %%edx\n"
+- "29: movl %%eax, 48(%3)\n"
+- "30: movl %%edx, 52(%3)\n"
++ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
++ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
+ "31: movl 56(%4), %%eax\n"
+ "32: movl 60(%4), %%edx\n"
+- "33: movl %%eax, 56(%3)\n"
+- "34: movl %%edx, 60(%3)\n"
++ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
++ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
++ __COPYUSER_SET_ES
+ "99: rep; movsl\n"
+ "36: movl %%eax, %0\n"
+ "37: rep; movsb\n"
+ "100:\n"
++ __COPYUSER_RESTORE_ES
++ ".section .fixup,\"ax\"\n"
++ "101: lea 0(%%eax,%0,4),%0\n"
++ " jmp 100b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,100b\n"
++ " .long 2b,100b\n"
++ " .long 3b,100b\n"
++ " .long 4b,100b\n"
++ " .long 5b,100b\n"
++ " .long 6b,100b\n"
++ " .long 7b,100b\n"
++ " .long 8b,100b\n"
++ " .long 9b,100b\n"
++ " .long 10b,100b\n"
++ " .long 11b,100b\n"
++ " .long 12b,100b\n"
++ " .long 13b,100b\n"
++ " .long 14b,100b\n"
++ " .long 15b,100b\n"
++ " .long 16b,100b\n"
++ " .long 17b,100b\n"
++ " .long 18b,100b\n"
++ " .long 19b,100b\n"
++ " .long 20b,100b\n"
++ " .long 21b,100b\n"
++ " .long 22b,100b\n"
++ " .long 23b,100b\n"
++ " .long 24b,100b\n"
++ " .long 25b,100b\n"
++ " .long 26b,100b\n"
++ " .long 27b,100b\n"
++ " .long 28b,100b\n"
++ " .long 29b,100b\n"
++ " .long 30b,100b\n"
++ " .long 31b,100b\n"
++ " .long 32b,100b\n"
++ " .long 33b,100b\n"
++ " .long 34b,100b\n"
++ " .long 35b,100b\n"
++ " .long 36b,100b\n"
++ " .long 37b,100b\n"
++ " .long 99b,101b\n"
++ ".previous"
++ : "=&c"(size), "=&D" (d0), "=&S" (d1)
++ : "1"(to), "2"(from), "0"(size)
++ : "eax", "edx", "memory");
++ return size;
++}
++
++static unsigned long
++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
++{
++ int d0, d1;
++ __asm__ __volatile__(
++ " .align 2,0x90\n"
++ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
++ " cmpl $67, %0\n"
++ " jbe 3f\n"
++ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
++ " .align 2,0x90\n"
++ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
++ "5: movl %%eax, 0(%3)\n"
++ "6: movl %%edx, 4(%3)\n"
++ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
++ "9: movl %%eax, 8(%3)\n"
++ "10: movl %%edx, 12(%3)\n"
++ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
++ "13: movl %%eax, 16(%3)\n"
++ "14: movl %%edx, 20(%3)\n"
++ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
++ "17: movl %%eax, 24(%3)\n"
++ "18: movl %%edx, 28(%3)\n"
++ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
++ "21: movl %%eax, 32(%3)\n"
++ "22: movl %%edx, 36(%3)\n"
++ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
++ "25: movl %%eax, 40(%3)\n"
++ "26: movl %%edx, 44(%3)\n"
++ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
++ "29: movl %%eax, 48(%3)\n"
++ "30: movl %%edx, 52(%3)\n"
++ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
++ "33: movl %%eax, 56(%3)\n"
++ "34: movl %%edx, 60(%3)\n"
++ " addl $-64, %0\n"
++ " addl $64, %4\n"
++ " addl $64, %3\n"
++ " cmpl $63, %0\n"
++ " ja 1b\n"
++ "35: movl %0, %%eax\n"
++ " shrl $2, %0\n"
++ " andl $3, %%eax\n"
++ " cld\n"
++ "99: rep; "__copyuser_seg" movsl\n"
++ "36: movl %%eax, %0\n"
++ "37: rep; "__copyuser_seg" movsb\n"
++ "100:\n"
+ ".section .fixup,\"ax\"\n"
+ "101: lea 0(%%eax,%0,4),%0\n"
+ " jmp 100b\n"
+@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ int d0, d1;
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movl %%eax, 0(%3)\n"
+ " movl %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movl %%eax, 8(%3)\n"
+ " movl %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movl %%eax, 16(%3)\n"
+ " movl %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movl %%eax, 24(%3)\n"
+ " movl %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movl %%eax, 32(%3)\n"
+ " movl %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movl %%eax, 40(%3)\n"
+ " movl %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movl %%eax, 48(%3)\n"
+ " movl %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movl %%eax, 56(%3)\n"
+ " movl %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+- "0: movl 32(%4), %%eax\n"
++ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+- "1: movl 64(%4), %%eax\n"
++ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+- "2: movl 0(%4), %%eax\n"
+- "21: movl 4(%4), %%edx\n"
++ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
++ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+- "3: movl 8(%4), %%eax\n"
+- "31: movl 12(%4),%%edx\n"
++ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
++ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+- "4: movl 16(%4), %%eax\n"
+- "41: movl 20(%4), %%edx\n"
++ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
++ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+- "10: movl 24(%4), %%eax\n"
+- "51: movl 28(%4), %%edx\n"
++ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
++ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+- "11: movl 32(%4), %%eax\n"
+- "61: movl 36(%4), %%edx\n"
++ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
++ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+- "12: movl 40(%4), %%eax\n"
+- "71: movl 44(%4), %%edx\n"
++ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
++ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+- "13: movl 48(%4), %%eax\n"
+- "81: movl 52(%4), %%edx\n"
++ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
++ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+- "14: movl 56(%4), %%eax\n"
+- "91: movl 60(%4), %%edx\n"
++ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
++ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+- "6: rep; movsl\n"
++ "6: rep; "__copyuser_seg" movsl\n"
+ " movl %%eax,%0\n"
+- "7: rep; movsb\n"
++ "7: rep; "__copyuser_seg" movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
+ */
+ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+-unsigned long __copy_user_intel(void __user *to, const void *from,
++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
++ unsigned long size);
++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
+ unsigned long size);
+ unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
+ #endif /* CONFIG_X86_INTEL_USERCOPY */
+
+ /* Generic arbitrary sized copy. */
+-#define __copy_user(to, from, size) \
++#define __copy_user(to, from, size, prefix, set, restore) \
+ do { \
+ int __d0, __d1, __d2; \
+ __asm__ __volatile__( \
++ set \
+ " cmp $7,%0\n" \
+ " jbe 1f\n" \
+ " movl %1,%0\n" \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
+- "4: rep; movsb\n" \
++ "4: rep; "prefix"movsb\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
++ "0: rep; "prefix"movsl\n" \
+ " movl %3,%0\n" \
+- "1: rep; movsb\n" \
++ "1: rep; "prefix"movsb\n" \
+ "2:\n" \
++ restore \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+ " jmp 2b\n" \
+@@ -682,14 +799,14 @@ do { \
+ " negl %0\n" \
+ " andl $7,%0\n" \
+ " subl %0,%3\n" \
+- "4: rep; movsb\n" \
++ "4: rep; "__copyuser_seg"movsb\n" \
+ " movl %3,%0\n" \
+ " shrl $2,%0\n" \
+ " andl $3,%3\n" \
+ " .align 2,0x90\n" \
+- "0: rep; movsl\n" \
++ "0: rep; "__copyuser_seg"movsl\n" \
+ " movl %3,%0\n" \
+- "1: rep; movsb\n" \
++ "1: rep; "__copyuser_seg"movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "5: addl %3,%0\n" \
+@@ -775,9 +892,9 @@ survive:
+ }
+ #endif
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
+ else
+- n = __copy_user_intel(to, from, n);
++ n = __generic_copy_to_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_to_user_ll);
+@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
+ unsigned long n)
+ {
+ if (movsl_is_ok(to, from, n))
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ else
+- n = __copy_user_intel((void __user *)to,
+- (const void *)from, n);
++ n = __generic_copy_from_user_intel(to, from, n);
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
+@@ -827,65 +943,49 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_intel_nocache(to, from, n);
+ else
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ #else
+- __copy_user(to, from, n);
++ __copy_user(to, from, n, __copyuser_seg, "", "");
+ #endif
+ return n;
+ }
+ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
+
+-/**
+- * copy_to_user: - Copy a block of data into user space.
+- * @to: Destination address, in user space.
+- * @from: Source address, in kernel space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from kernel space to user space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- */
+-unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (access_ok(VERIFY_WRITE, to, n))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-EXPORT_SYMBOL(copy_to_user);
+-
+-/**
+- * copy_from_user: - Copy a block of data from user space.
+- * @to: Destination address, in kernel space.
+- * @from: Source address, in user space.
+- * @n: Number of bytes to copy.
+- *
+- * Context: User context only. This function may sleep.
+- *
+- * Copy data from user space to kernel space.
+- *
+- * Returns number of bytes that could not be copied.
+- * On success, this will be zero.
+- *
+- * If some data could not be copied, this function will pad the copied
+- * data to the requested size using zero bytes.
+- */
+-unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (access_ok(VERIFY_READ, from, n))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-EXPORT_SYMBOL(_copy_from_user);
+-
+ void copy_from_user_overflow(void)
+ {
+ WARN(1, "Buffer overflow detected!\n");
+ }
+ EXPORT_SYMBOL(copy_from_user_overflow);
++
++void copy_to_user_overflow(void)
++{
++ WARN(1, "Buffer overflow detected!\n");
++}
++EXPORT_SYMBOL(copy_to_user_overflow);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++void __set_fs(mm_segment_t x)
++{
++ switch (x.seg) {
++ case 0:
++ loadsegment(gs, 0);
++ break;
++ case TASK_SIZE_MAX:
++ loadsegment(gs, __USER_DS);
++ break;
++ case -1UL:
++ loadsegment(gs, __KERNEL_DS);
++ break;
++ default:
++ BUG();
++ }
++}
++EXPORT_SYMBOL(__set_fs);
++
++void set_fs(mm_segment_t x)
++{
++ current_thread_info()->addr_limit = x;
++ __set_fs(x);
++}
++EXPORT_SYMBOL(set_fs);
++#endif
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 554b7b5..4027e2c 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -42,6 +42,12 @@ long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+ long res;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if ((unsigned long)src < pax_user_shadow_base)
++ src += pax_user_shadow_base;
++#endif
++
+ __do_strncpy_from_user(dst, src, count, res);
+ return res;
+ }
+@@ -87,7 +93,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
+ _ASM_EXTABLE(0b,3b)
+ _ASM_EXTABLE(1b,2b)
+ : [size8] "=&c"(size), [dst] "=&D" (__d0)
+- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
++ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
+ [zero] "r" (0UL), [eight] "r" (8UL));
+ return size;
+ }
+@@ -149,12 +155,11 @@ long strlen_user(const char __user *s)
+ }
+ EXPORT_SYMBOL(strlen_user);
+
+-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
+ {
+- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
+- return copy_user_generic((__force void *)to, (__force void *)from, len);
+- }
+- return len;
++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
++ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
++ return len;
+ }
+ EXPORT_SYMBOL(copy_in_user);
+
+@@ -164,7 +169,7 @@ EXPORT_SYMBOL(copy_in_user);
+ * it is not necessary to optimize tail handling.
+ */
+ unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
+ {
+ char c;
+ unsigned zero_len;
+@@ -181,3 +186,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+ break;
+ return len;
+ }
++
++void copy_from_user_overflow(void)
++{
++ WARN(1, "Buffer overflow detected!\n");
++}
++EXPORT_SYMBOL(copy_from_user_overflow);
++
++void copy_to_user_overflow(void)
++{
++ WARN(1, "Buffer overflow detected!\n");
++}
++EXPORT_SYMBOL(copy_to_user_overflow);
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index d0474ad..36e9257 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
+ const struct exception_table_entry *fixup;
+
+ #ifdef CONFIG_PNPBIOS
+- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 53a7b69..8cc6fea 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -13,11 +13,18 @@
+ #include <linux/perf_event.h> /* perf_sw_event */
+ #include <linux/hugetlb.h> /* hstate_index_to_shift */
+ #include <linux/prefetch.h> /* prefetchw */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+
+ #include <asm/traps.h> /* dotraplinkage, ... */
+ #include <asm/pgalloc.h> /* pgd_*(), ... */
+ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
+ #include <asm/fixmap.h> /* VSYSCALL_START */
++#include <asm/tlbflush.h>
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/stacktrace.h>
++#endif
+
+ /*
+ * Page fault error code bits:
+@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
+ int ret = 0;
+
+ /* kprobe_running() needs smp_processor_id() */
+- if (kprobes_built_in() && !user_mode_vm(regs)) {
++ if (kprobes_built_in() && !user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
+ ret = 1;
+@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+ return !instr_lo || (instr_lo>>1) == 1;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++ return 0;
++ } else if (probe_kernel_address(instr, opcode))
+ return 0;
+
+ *prefetch = (instr_lo == 0xF) &&
+@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+ while (instr < max_instr) {
+ unsigned char opcode;
+
+- if (probe_kernel_address(instr, opcode))
++ if (user_mode(regs)) {
++ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
++ break;
++ } else if (probe_kernel_address(instr, opcode))
+ break;
+
+ instr++;
+@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+ force_sig_info(si_signo, &info, tsk);
+ }
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd_present(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ return pmd;
++}
++#endif
++
+ DEFINE_SPINLOCK(pgd_lock);
+ LIST_HEAD(pgd_list);
+
+@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
+ for (address = VMALLOC_START & PMD_MASK;
+ address >= TASK_SIZE && address < FIXADDR_TOP;
+ address += PMD_SIZE) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++ pgd_t *pgd = get_cpu_pgd(cpu);
++ pmd_t *ret;
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
++ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+ pmd_t *ret;
+
+@@ -242,8 +295,14 @@ void vmalloc_sync_all(void)
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
++ pgd = page_address(page);
++#endif
++
++ ret = vmalloc_sync_one(pgd, address);
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
+
+ if (!ret)
+ break;
+@@ -277,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3();
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
++#endif
++
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+@@ -372,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+ * happen within a race in page table update. In the later
+ * case just flush:
+ */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
++ pgd = pgd_offset_cpu(smp_processor_id(), address);
++#else
+ pgd = pgd_offset(current->active_mm, address);
++#endif
++
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+@@ -542,7 +613,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+ static int is_errata100(struct pt_regs *regs, unsigned long address)
+ {
+ #ifdef CONFIG_X86_64
+- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
+ return 1;
+ #endif
+ return 0;
+@@ -569,7 +640,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+ }
+
+ static const char nx_warning[] = KERN_CRIT
+-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
+
+ static void
+ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -578,15 +649,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+ if (!oops_may_print())
+ return;
+
+- if (error_code & PF_INSTR) {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
+ unsigned int level;
+
+ pte_t *pte = lookup_address(address, &level);
+
+ if (pte && pte_present(*pte) && !pte_exec(*pte))
+- printk(nx_warning, current_uid());
++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (init_mm.start_code <= address && address < init_mm.end_code) {
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ }
++#endif
++
+ printk(KERN_ALERT "BUG: unable to handle kernel ");
+ if (address < PAGE_SIZE)
+ printk(KERN_CONT "NULL pointer dereference");
+@@ -740,6 +822,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ return;
+ }
+ #endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (pax_is_fetch_fault(regs, error_code, address)) {
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Kernel addresses are always protection faults: */
+ if (address >= TASK_SIZE)
+ error_code |= PF_PROT;
+@@ -839,7 +937,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ printk(KERN_ERR
+ "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
+- tsk->comm, tsk->pid, address);
++ tsk->comm, task_pid_nr(tsk), address);
+ code = BUS_MCEERR_AR;
+ }
+ #endif
+@@ -894,6 +992,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+ return 1;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
++{
++ pte_t *pte;
++ pmd_t *pmd;
++ spinlock_t *ptl;
++ unsigned char pte_mask;
++
++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
++ !(mm->pax_flags & MF_PAX_PAGEEXEC))
++ return 0;
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ up_read(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ switch (pax_handle_fetch_fault(regs)) {
++ case 2:
++ return 1;
++ }
++#endif
++
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++
++ pmd = pax_get_pmd(mm, address);
++ if (unlikely(!pmd))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap_unlock(pte, ptl);
++ return 0;
++ }
++
++#ifdef CONFIG_SMP
++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
++#else
++ if (likely(address > get_limit(regs->cs)))
++#endif
++ {
++ set_pte(pte, pte_mkread(*pte));
++ __flush_tlb_one(address);
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++ "orb %2,(%1)\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg (%0)\n"
++#endif
++ __copyuser_seg"testb $0,(%0)\n"
++ "xorb %3,(%1)\n"
++ :
++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
++ : "memory", "cc");
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return 1;
++}
++#endif
++
+ /*
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+@@ -966,6 +1157,9 @@ int show_unhandled_signals = 1;
+ static inline int
+ access_error(unsigned long error_code, struct vm_area_struct *vma)
+ {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
++ return 1;
++
+ if (error_code & PF_WRITE) {
+ /* write, present and write, not present: */
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+@@ -999,18 +1193,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ {
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+- unsigned long address;
+ struct mm_struct *mm;
+ int fault;
+ int write = error_code & PF_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
+- tsk = current;
+- mm = tsk->mm;
+-
+ /* Get the faulting address: */
+- address = read_cr2();
++ unsigned long address = read_cr2();
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
++ if (!search_exception_tables(regs->ip)) {
++ bad_area_nosemaphore(regs, error_code, address);
++ return;
++ }
++ if (address < pax_user_shadow_base) {
++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
++ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
++ } else
++ address -= pax_user_shadow_base;
++ }
++#endif
++
++ tsk = current;
++ mm = tsk->mm;
+
+ /*
+ * Detect and handle instructions that would cause a page fault for
+@@ -1071,7 +1279,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ * User-mode registers count as a user access even for any
+ * potential system fault or CPU buglet:
+ */
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ local_irq_enable();
+ error_code |= PF_USER;
+ } else {
+@@ -1126,6 +1334,11 @@ retry:
+ might_sleep();
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
++ return;
++#endif
++
+ vma = find_vma(mm, address);
+ if (unlikely(!vma)) {
+ bad_area(regs, error_code, address);
+@@ -1137,18 +1350,24 @@ retry:
+ bad_area(regs, error_code, address);
+ return;
+ }
+- if (error_code & PF_USER) {
+- /*
+- * Accessing the stack below %sp is always a bug.
+- * The large cushion allows instructions like enter
+- * and pusha to work. ("enter $65535, $31" pushes
+- * 32 pointers and then decrements %sp by 65535.)
+- */
+- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+- bad_area(regs, error_code, address);
+- return;
+- }
++ /*
++ * Accessing the stack below %sp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535, $31" pushes
++ * 32 pointers and then decrements %sp by 65535.)
++ */
++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
++ bad_area(regs, error_code, address);
++ return;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
++ bad_area(regs, error_code, address);
++ return;
++ }
++#endif
++
+ if (unlikely(expand_stack(vma, address))) {
+ bad_area(regs, error_code, address);
+ return;
+@@ -1203,3 +1422,292 @@ good_area:
+
+ up_read(&mm->mmap_sem);
+ }
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long ip = regs->ip;
++
++ if (v8086_mode(regs))
++ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
++ return true;
++ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
++ return true;
++ return false;
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
++ return true;
++ return false;
++ }
++#endif
++
++ return false;
++}
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++static int pax_handle_fetch_fault_32(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: libffi trampoline emulation */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB8 && jmp == 0xE9) {
++ regs->ax = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 11) >> 32)
++ break;
++#endif
++
++ err = get_user(mov1, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
++ regs->cx = addr1;
++ regs->ax = addr2;
++ regs->ip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned int addr1, addr2;
++
++#ifdef CONFIG_X86_64
++ if ((regs->ip + 9) >> 32)
++ break;
++#endif
++
++ err = get_user(mov, (unsigned char __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
++
++ if (err)
++ break;
++
++ if (mov == 0xB9 && jmp == 0xE9) {
++ regs->cx = addr1;
++ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++
++#ifdef CONFIG_X86_64
++static int pax_handle_fetch_fault_64(struct pt_regs *regs)
++{
++ int err;
++
++ do { /* PaX: libffi trampoline emulation */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char stcclc, jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ if (stcclc == 0xF8)
++ regs->flags &= ~X86_EFLAGS_CF;
++ else
++ regs->flags |= X86_EFLAGS_CF;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned int addr1;
++ unsigned long addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned short mov1, mov2, jmp1;
++ unsigned char jmp2;
++ unsigned long addr1, addr2;
++
++ err = get_user(mov1, (unsigned short __user *)regs->ip);
++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
++
++ if (err)
++ break;
++
++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
++ regs->r11 = addr1;
++ regs->r10 = addr2;
++ regs->ip = addr1;
++ return 2;
++ }
++ } while (0);
++
++ return 1; /* PaX in action */
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->ip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ if (v8086_mode(regs))
++ return 1;
++
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++
++#ifdef CONFIG_X86_32
++ return pax_handle_fetch_fault_32(regs);
++#else
++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
++ return pax_handle_fetch_fault_32(regs);
++ else
++ return pax_handle_fetch_fault_64(regs);
++#endif
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char __force_user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
++ for (i = -1; i < 80 / (long)sizeof(long); i++) {
++ unsigned long c;
++ if (get_user(c, (unsigned long __force_user *)sp+i)) {
++#ifdef CONFIG_X86_32
++ printk(KERN_CONT "???????? ");
++#else
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
++ printk(KERN_CONT "???????? ???????? ");
++ else
++ printk(KERN_CONT "???????????????? ");
++#endif
++ } else {
++#ifdef CONFIG_X86_64
++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
++ printk(KERN_CONT "%08x ", (unsigned int)c);
++ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
++ } else
++#endif
++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
++ }
++ }
++ printk("\n");
++}
++#endif
++
++/**
++ * probe_kernel_write(): safely attempt to write to a location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++long notrace probe_kernel_write(void *dst, const void *src, size_t size)
++{
++ long ret;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(KERNEL_DS);
++ pagefault_disable();
++ pax_open_kernel();
++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
++ pax_close_kernel();
++ pagefault_enable();
++ set_fs(old_fs);
++
++ return ret ? -EFAULT : 0;
++}
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index dd74e46..0970b01 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+@@ -331,6 +331,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ goto slow_irqon;
+ #endif
+
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ (void __user *)start, len)))
++ return 0;
++
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index f4f29b1..5cac4fb 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
++
++ pax_open_kernel();
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index df7d12c..93fae8e 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -277,13 +277,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long start_addr;
++ unsigned long start_addr, pax_task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
+
+ if (len > mm->cached_hole_size) {
+- start_addr = mm->free_area_cache;
++ start_addr = mm->free_area_cache;
+ } else {
+- start_addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -291,26 +299,27 @@ full_search:
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+- if (TASK_SIZE - len < addr) {
++ if (pax_task_size - len < addr) {
+ /*
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
++
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -319,10 +328,10 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+ {
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma, *prev_vma;
+- unsigned long base = mm->mmap_base, addr = addr0;
++ struct vm_area_struct *vma;
++ unsigned long base = mm->mmap_base, addr;
+ unsigned long largest_hole = mm->cached_hole_size;
+- int first_time = 1;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ /* don't allow allocations above current base */
+ if (mm->free_area_cache > base)
+@@ -332,64 +341,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
+-try_again:
++
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+
+ /* either no address requested or can't fit in requested address hole */
+- addr = (mm->free_area_cache - len) & huge_page_mask(h);
++ addr = (mm->free_area_cache - len);
+ do {
++ addr &= huge_page_mask(h);
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+ */
+- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
++ vma = find_vma(mm, addr);
++ if (!vma)
+ return addr;
+
+ /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+- if (addr + len <= vma->vm_start &&
+- (!prev_vma || (addr >= prev_vma->vm_end))) {
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- mm->cached_hole_size = largest_hole;
+- return (mm->free_area_cache = addr);
+- } else {
+- /* pull free_area_cache down to the first hole */
+- if (mm->free_area_cache == vma->vm_end) {
+- mm->free_area_cache = vma->vm_start;
+- mm->cached_hole_size = largest_hole;
+- }
++ mm->cached_hole_size = largest_hole;
++ return (mm->free_area_cache = addr);
++ }
++ /* pull free_area_cache down to the first hole */
++ if (mm->free_area_cache == vma->vm_end) {
++ mm->free_area_cache = vma->vm_start;
++ mm->cached_hole_size = largest_hole;
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+- largest_hole = vma->vm_start - addr;
++ largest_hole = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start - len) & huge_page_mask(h);
+- } while (len <= vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ fail:
+ /*
+- * if hint left us with no space for the requested
+- * mapping then try again:
+- */
+- if (first_time) {
+- mm->free_area_cache = base;
+- largest_hole = 0;
+- first_time = 0;
+- goto try_again;
+- }
+- /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+ addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
+ len, pgoff, flags);
+@@ -397,6 +410,7 @@ fail:
+ /*
+ * Restore the topdown base:
+ */
++ mm->mmap_base = base;
+ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+@@ -410,10 +424,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long pax_task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+- if (len > TASK_SIZE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (len > pax_task_size)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+@@ -422,11 +446,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index a4cca06..9e00106 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -3,6 +3,7 @@
+ #include <linux/ioport.h>
+ #include <linux/swap.h>
+ #include <linux/memblock.h>
++#include <linux/tboot.h>
+
+ #include <asm/cacheflush.h>
+ #include <asm/e820.h>
+@@ -15,6 +16,8 @@
+ #include <asm/tlbflush.h>
+ #include <asm/tlb.h>
+ #include <asm/proto.h>
++#include <asm/desc.h>
++#include <asm/bios_ebda.h>
+
+ unsigned long __initdata pgt_buf_start;
+ unsigned long __meminitdata pgt_buf_end;
+@@ -43,7 +46,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ {
+ int i;
+ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+- unsigned long start = 0, good_end;
++ unsigned long start = 0x100000, good_end;
+ unsigned long pgd_extra = 0;
+ phys_addr_t base;
+
+@@ -282,7 +285,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+
+ #ifdef CONFIG_X86_32
+ early_ioremap_page_table_range_init();
++#endif
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
++ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ KERNEL_PGD_PTRS);
++ load_cr3(get_cpu_pgd(0));
++#elif defined(CONFIG_X86_32)
+ load_cr3(swapper_pg_dir);
+ #endif
+
+@@ -324,10 +334,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++static unsigned int ebda_start __read_only;
++static unsigned int ebda_end __read_only;
++#endif
++
+ int devmem_is_allowed(unsigned long pagenr)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ /* allow BDA */
++ if (!pagenr)
++ return 1;
++ /* allow EBDA */
++ if (pagenr >= ebda_start && pagenr < ebda_end)
++ return 1;
++ /* if tboot is in use, allow access to its hardcoded serial log range */
++ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
++ return 1;
++#else
++ if (!pagenr)
++ return 1;
++#ifdef CONFIG_VM86
++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#endif
++#endif
++
++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
++ return 1;
++#ifdef CONFIG_GRKERNSEC_KMEM
++ /* throw out everything else below 1MB */
+ if (pagenr <= 256)
+- return 1;
++ return 0;
++#endif
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ return 0;
+ if (!page_is_ram(pagenr))
+@@ -384,8 +424,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ #endif
+ }
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++static inline void gr_init_ebda(void)
++{
++ unsigned int ebda_addr;
++ unsigned int ebda_size = 0;
++
++ ebda_addr = get_bios_ebda();
++ if (ebda_addr) {
++ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
++ ebda_size <<= 10;
++ }
++ if (ebda_addr && ebda_size) {
++ ebda_start = ebda_addr >> PAGE_SHIFT;
++ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
++ } else {
++ ebda_start = 0x9f000 >> PAGE_SHIFT;
++ ebda_end = 0xa0000 >> PAGE_SHIFT;
++ }
++}
++#else
++static inline void gr_init_ebda(void) { }
++#endif
++
+ void free_initmem(void)
+ {
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long addr, limit;
++ struct desc_struct d;
++ int cpu;
++#else
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ unsigned long addr, end;
++#endif
++#endif
++
++ gr_init_ebda();
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
++ if (!paravirt_enabled())
++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
++/*
++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++*/
++#ifdef CONFIG_X86_PAE
++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
++/*
++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++*/
++#endif
++
++#ifdef CONFIG_MODULES
++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++#endif
++
++#else
++ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++#endif
++
++ flush_tlb_all();
++#endif
++
+ free_init_pages("unused kernel memory",
+ (unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end));
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 29f7c6d..7500c2f 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
+ }
+
+ /*
+- * Creates a middle page table and puts a pointer to it in the
+- * given global directory entry. This only returns the gd entry
+- * in non-PAE compilation mode, since the middle layer is folded.
+- */
+-static pmd_t * __init one_md_table_init(pgd_t *pgd)
+-{
+- pud_t *pud;
+- pmd_t *pmd_table;
+-
+-#ifdef CONFIG_X86_PAE
+- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
+- if (after_bootmem)
+- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
+- else
+- pmd_table = (pmd_t *)alloc_low_page();
+- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
+- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+- pud = pud_offset(pgd, 0);
+- BUG_ON(pmd_table != pmd_offset(pud, 0));
+-
+- return pmd_table;
+- }
+-#endif
+- pud = pud_offset(pgd, 0);
+- pmd_table = pmd_offset(pud, 0);
+-
+- return pmd_table;
+-}
+-
+-/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry:
+ */
+@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
+ page_table = (pte_t *)alloc_low_page();
+
+ paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
++#else
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++#endif
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+ }
+
+ return pte_offset_kernel(pmd, 0);
+ }
+
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++
++ return pmd_table;
++}
++
+ pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+ {
+ int pgd_idx = pgd_index(vaddr);
+@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
+- pmd = pmd + pmd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
++
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+ pmd++, pmd_idx++) {
+ pte = page_table_kmap_check(one_page_table_init(pmd),
+@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+ }
+ }
+
+-static inline int is_kernel_text(unsigned long addr)
++static inline int is_kernel_text(unsigned long start, unsigned long end)
+ {
+- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
+- return 1;
+- return 0;
++ if ((start > ktla_ktva((unsigned long)_etext) ||
++ end <= ktla_ktva((unsigned long)_stext)) &&
++ (start > ktla_ktva((unsigned long)_einittext) ||
++ end <= ktla_ktva((unsigned long)_sinittext)) &&
++
++#ifdef CONFIG_ACPI_SLEEP
++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
++#endif
++
++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
++ return 0;
++ return 1;
+ }
+
+ /*
+@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
+ unsigned long last_map_addr = end;
+ unsigned long start_pfn, end_pfn;
+ pgd_t *pgd_base = swapper_pg_dir;
+- int pgd_idx, pmd_idx, pte_ofs;
++ unsigned int pgd_idx, pmd_idx, pte_ofs;
+ unsigned long pfn;
+ pgd_t *pgd;
++ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned pages_2m, pages_4k;
+@@ -281,8 +282,13 @@ repeat:
+ pfn = start_pfn;
+ pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+- pmd = one_md_table_init(pgd);
++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
++ pud = pud_offset(pgd, 0);
++ pmd = pmd_offset(pud, 0);
++
++#ifdef CONFIG_X86_PAE
++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
++#endif
+
+ if (pfn >= end_pfn)
+ continue;
+@@ -294,14 +300,13 @@ repeat:
+ #endif
+ for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
+ pmd++, pmd_idx++) {
+- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /*
+ * Map with big pages if possible, otherwise
+ * create normal page tables:
+ */
+ if (use_pse) {
+- unsigned int addr2;
+ pgprot_t prot = PAGE_KERNEL_LARGE;
+ /*
+ * first pass will use the same initial
+@@ -311,11 +316,7 @@ repeat:
+ __pgprot(PTE_IDENT_ATTR |
+ _PAGE_PSE);
+
+- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
+- PAGE_OFFSET + PAGE_SIZE-1;
+-
+- if (is_kernel_text(addr) ||
+- is_kernel_text(addr2))
++ if (is_kernel_text(address, address + PMD_SIZE))
+ prot = PAGE_KERNEL_LARGE_EXEC;
+
+ pages_2m++;
+@@ -332,7 +333,7 @@ repeat:
+ pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
+ pte += pte_ofs;
+ for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
+- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pgprot_t prot = PAGE_KERNEL;
+ /*
+ * first pass will use the same initial
+@@ -340,7 +341,7 @@ repeat:
+ */
+ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
+
+- if (is_kernel_text(addr))
++ if (is_kernel_text(address, address + PAGE_SIZE))
+ prot = PAGE_KERNEL_EXEC;
+
+ pages_4k++;
+@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
+
+ pud = pud_offset(pgd, va);
+ pmd = pmd_offset(pud, va);
+- if (!pmd_present(*pmd))
++ if (!pmd_present(*pmd) || pmd_huge(*pmd))
+ break;
+
+ pte = pte_offset_kernel(pmd, va);
+@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
+
+ static void __init pagetable_init(void)
+ {
+- pgd_t *pgd_base = swapper_pg_dir;
+-
+- permanent_kmaps_init(pgd_base);
++ permanent_kmaps_init(swapper_pg_dir);
+ }
+
+-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ /* user-defined highmem size */
+@@ -774,7 +773,7 @@ void __init mem_init(void)
+ set_highmem_pages_init();
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
+@@ -815,10 +814,10 @@ void __init mem_init(void)
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10,
+
+- (unsigned long)&_etext, (unsigned long)&_edata,
+- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++ (unsigned long)&_sdata, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
+
+- (unsigned long)&_text, (unsigned long)&_etext,
++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
+ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+ /*
+@@ -896,6 +895,7 @@ void set_kernel_text_rw(void)
+ if (!kernel_set_to_readonly)
+ return;
+
++ start = ktla_ktva(start);
+ pr_debug("Set kernel text: %lx - %lx for read write\n",
+ start, start+size);
+
+@@ -910,6 +910,7 @@ void set_kernel_text_ro(void)
+ if (!kernel_set_to_readonly)
+ return;
+
++ start = ktla_ktva(start);
+ pr_debug("Set kernel text: %lx - %lx for read only\n",
+ start, start+size);
+
+@@ -938,6 +939,7 @@ void mark_rodata_ro(void)
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
++ start = ktla_ktva(start);
+ set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+ size >> 10);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 44b93da..5a0b3ee 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
+ * around without checking the pgd every time.
+ */
+
+-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
+ EXPORT_SYMBOL_GPL(__supported_pte_mask);
+
+ int force_personality32;
+@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+
+ for (address = start; address <= end; address += PGDIR_SIZE) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+ spin_lock(&pgd_lock);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++ pgd_t *pgd = pgd_offset_cpu(cpu, address);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ spin_lock(pgt_lock);
++#endif
+
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
+ BUG_ON(pgd_page_vaddr(*pgd)
+ != pgd_page_vaddr(*pgd_ref));
+
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ spin_unlock(pgt_lock);
++#endif
++
+ }
+ spin_unlock(&pgd_lock);
+ }
+@@ -162,7 +176,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
+ {
+ if (pgd_none(*pgd)) {
+ pud_t *pud = (pud_t *)spp_getpage();
+- pgd_populate(&init_mm, pgd, pud);
++ pgd_populate_kernel(&init_mm, pgd, pud);
+ if (pud != pud_offset(pgd, 0))
+ printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
+ pud, pud_offset(pgd, 0));
+@@ -174,7 +188,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
+ {
+ if (pud_none(*pud)) {
+ pmd_t *pmd = (pmd_t *) spp_getpage();
+- pud_populate(&init_mm, pud, pmd);
++ pud_populate_kernel(&init_mm, pud, pmd);
+ if (pmd != pmd_offset(pud, 0))
+ printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
+ pmd, pmd_offset(pud, 0));
+@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+ pmd = fill_pmd(pud, vaddr);
+ pte = fill_pte(pmd, vaddr);
+
++ pax_open_kernel();
+ set_pte(pte, new_pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+ pgd = pgd_offset_k((unsigned long)__va(phys));
+ if (pgd_none(*pgd)) {
+ pud = (pud_t *) spp_getpage();
+- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
+ }
+ pud = pud_offset(pgd, (unsigned long)__va(phys));
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *) spp_getpage();
+- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+- _PAGE_USER));
++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
+ }
+ pmd = pmd_offset(pud, phys);
+ BUG_ON(!pmd_none(*pmd));
+@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
+ if (pfn >= pgt_buf_top)
+ panic("alloc_low_page: ran out of memory");
+
+- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
++ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
+ clear_page(adr);
+ *phys = pfn * PAGE_SIZE;
+ return adr;
+@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
+
+ phys = __pa(virt);
+ left = phys & (PAGE_SIZE - 1);
+- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
++ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
+ adr = (void *)(((unsigned long)adr) | left);
+
+ return adr;
+@@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+ unmap_low_page(pmd);
+
+ spin_lock(&init_mm.page_table_lock);
+- pud_populate(&init_mm, pud, __va(pmd_phys));
++ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ __flush_tlb_all();
+@@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
+ unmap_low_page(pud);
+
+ spin_lock(&init_mm.page_table_lock);
+- pgd_populate(&init_mm, pgd, __va(pud_phys));
++ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
+ spin_unlock(&init_mm.page_table_lock);
+ pgd_changed = true;
+ }
+@@ -856,8 +870,8 @@ int kern_addr_valid(unsigned long addr)
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_START,
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ
+ };
+
+ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+@@ -891,7 +905,7 @@ int in_gate_area_no_mm(unsigned long addr)
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
+ if (vma == &gate_vma)
+ return "[vsyscall]";
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 7b179b4..6bd17777 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++
++ pax_open_kernel();
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ pax_close_kernel();
++
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index be1ef57..406f1c2 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
+ int is_ram = page_is_ram(pfn);
+
+- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
+ return NULL;
+ WARN_ON_ONCE(is_ram);
+ }
+@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
+-void iounmap(volatile void __iomem *addr)
++void iounmap(const volatile void __iomem *addr)
+ {
+ struct vm_struct *p, *o;
+
+@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
+
+ /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
+ if (page_is_ram(start >> PAGE_SHIFT))
++#ifdef CONFIG_HIGHMEM
++ if ((start >> PAGE_SHIFT) < max_low_pfn)
++#endif
+ return __va(phys);
+
+ addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
+@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
+ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
+ {
+ if (page_is_ram(phys >> PAGE_SHIFT))
++#ifdef CONFIG_HIGHMEM
++ if ((phys >> PAGE_SHIFT) < max_low_pfn)
++#endif
+ return;
+
+ iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
+@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
+ early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+ static __initdata int after_paging_init;
+-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
+
+ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+ {
+@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+- memset(bm_pte, 0, sizeof(bm_pte));
+- pmd_populate_kernel(&init_mm, pmd, bm_pte);
++ pmd_populate_user(&init_mm, pmd, bm_pte);
+
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
+index d87dd6d..bf3fa66 100644
+--- a/arch/x86/mm/kmemcheck/kmemcheck.c
++++ b/arch/x86/mm/kmemcheck/kmemcheck.c
+@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+ * memory (e.g. tracked pages)? For now, we need this to avoid
+ * invoking kmemcheck for PnP BIOS calls.
+ */
+- if (regs->flags & X86_VM_MASK)
++ if (v8086_mode(regs))
+ return false;
+- if (regs->cs != __KERNEL_CS)
++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
+ return false;
+
+ pte = kmemcheck_pte_lookup(address);
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 5c1ae28..45f4ac9 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
+ * Leave an at least ~128 MB hole with possible stack randomization.
+ */
+ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
+-#define MAX_GAP (TASK_SIZE/6*5)
++#define MAX_GAP (pax_task_size/6*5)
+
+ static int mmap_is_legacy(void)
+ {
+@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
+ return rnd << PAGE_SHIFT;
+ }
+
+-static unsigned long mmap_base(void)
++static unsigned long mmap_base(struct mm_struct *mm)
+ {
+ unsigned long gap = rlimit(RLIMIT_STACK);
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
+ }
+
+ /*
+ * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+ * does, but not when emulating X86_32
+ */
+-static unsigned long mmap_legacy_base(void)
++static unsigned long mmap_legacy_base(struct mm_struct *mm)
+ {
+- if (mmap_is_ia32())
++ if (mmap_is_ia32()) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ return SEGMEXEC_TASK_UNMAPPED_BASE;
++ else
++#endif
++
+ return TASK_UNMAPPED_BASE;
+- else
++ } else
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+ }
+
+@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
+ */
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+- mm->mmap_legacy_base = mmap_legacy_base();
+- mm->mmap_base = mmap_base();
++ mm->mmap_legacy_base = mmap_legacy_base(mm);
++ mm->mmap_base = mmap_base(mm);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP) {
++ mm->mmap_legacy_base += mm->delta_mmap;
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ }
++#endif
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mm->mmap_legacy_base;
+diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
+index de54b9b..935281f 100644
+--- a/arch/x86/mm/mmio-mod.c
++++ b/arch/x86/mm/mmio-mod.c
+@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
+ break;
+ default:
+ {
+- unsigned char *ip = (unsigned char *)instptr;
++ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
+ my_trace->opcode = MMIO_UNKNOWN_OP;
+ my_trace->width = 0;
+ my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
+@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
+ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+ void __iomem *addr)
+ {
+- static atomic_t next_id;
++ static atomic_unchecked_t next_id;
+ struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+ /* These are page-unaligned. */
+ struct mmiotrace_map map = {
+@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+ .private = trace
+ },
+ .phys = offset,
+- .id = atomic_inc_return(&next_id)
++ .id = atomic_inc_return_unchecked(&next_id)
+ };
+ map.map_id = trace->id;
+
+@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+ ioremap_trace_core(offset, size, addr);
+ }
+
+-static void iounmap_trace_core(volatile void __iomem *addr)
++static void iounmap_trace_core(const volatile void __iomem *addr)
+ {
+ struct mmiotrace_map map = {
+ .phys = 0,
+@@ -328,7 +328,7 @@ not_enabled:
+ }
+ }
+
+-void mmiotrace_iounmap(volatile void __iomem *addr)
++void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+ might_sleep();
+ if (is_enabled()) /* recheck and proper locking in *_core() */
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index fbeaaf4..559063f 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -494,7 +494,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+ return true;
+ }
+
+-static int __init numa_register_memblks(struct numa_meminfo *mi)
++static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
+ {
+ unsigned long uninitialized_var(pfn_align);
+ int i, nid;
+diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
+index b008656..773eac2 100644
+--- a/arch/x86/mm/pageattr-test.c
++++ b/arch/x86/mm/pageattr-test.c
+@@ -36,7 +36,7 @@ enum {
+
+ static int pte_testbit(pte_t pte)
+ {
+- return pte_flags(pte) & _PAGE_UNUSED1;
++ return pte_flags(pte) & _PAGE_CPA_TEST;
+ }
+
+ struct split_state {
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index f9e5267..5c194c9 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ */
+ #ifdef CONFIG_PCI_BIOS
+ if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
+- pgprot_val(forbidden) |= _PAGE_NX;
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+ #endif
+
+ /*
+@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ * Does not cover __inittext since that is gone later on. On
+ * 64bit we do not enforce !NX on the low mapping
+ */
+- if (within(address, (unsigned long)_text, (unsigned long)_etext))
+- pgprot_val(forbidden) |= _PAGE_NX;
++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+
++#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * The .rodata section needs to be read-only. Using the pfn
+ * catches all aliases.
+@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+ pgprot_val(forbidden) |= _PAGE_RW;
++#endif
+
+ #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+ /*
+@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
++ pgprot_val(forbidden) |= _PAGE_RW;
++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
++ }
++#endif
++
+ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+
+ return prot;
+@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
+ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+ {
+ /* change init_mm */
++ pax_open_kernel();
+ set_pte_atomic(kpte, pte);
++
+ #ifdef CONFIG_X86_32
+ if (!SHARED_KERNEL_PMD) {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ unsigned long cpu;
++#else
+ struct page *page;
++#endif
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
++ pgd_t *pgd = get_cpu_pgd(cpu);
++#else
+ list_for_each_entry(page, &pgd_list, lru) {
+- pgd_t *pgd;
++ pgd_t *pgd = (pgd_t *)page_address(page);
++#endif
++
+ pud_t *pud;
+ pmd_t *pmd;
+
+- pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ pgd += pgd_index(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ set_pte_atomic((pte_t *)pmd, pte);
+ }
+ }
+ #endif
++ pax_close_kernel();
+ }
+
+ static int
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index f6ff57b..481690f 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
+
+ if (!entry) {
+ printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
+- current->comm, current->pid, start, end);
++ current->comm, task_pid_nr(current), start, end);
+ return -EINVAL;
+ }
+
+@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
+ printk(KERN_INFO
+- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+- current->comm, from, to);
++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
++ current->comm, from, to, cursor);
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+ printk(KERN_INFO
+ "%s:%d ioremap_change_attr failed %s "
+ "for %Lx-%Lx\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(flags),
+ base, (unsigned long long)(base + size));
+ return -EINVAL;
+@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+ if (want_flags != flags) {
+ printk(KERN_WARNING
+ "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+ free_memtype(paddr, paddr + size);
+ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+ " for %Lx-%Lx, got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
+index 8acaddd..4eaa657 100644
+--- a/arch/x86/mm/pat_rbtree.c
++++ b/arch/x86/mm/pat_rbtree.c
+@@ -165,7 +165,7 @@ success:
+
+ failure:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
++ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
+ end, cattr_name(found_type), cattr_name(match->type));
+ return -EBUSY;
+ }
+diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
+index 9f0614d..92ae64a 100644
+--- a/arch/x86/mm/pf_in.c
++++ b/arch/x86/mm/pf_in.c
+@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
+ int i;
+ enum reason_type rv = OTHERS;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+
+@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+ for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
+@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
+ struct prefix_bits prf;
+ int i;
+
+- p = (unsigned char *)ins_addr;
++ p = (unsigned char *)ktla_ktva(ins_addr);
+ p += skip_prefix(p, &prf);
+ p += get_opcode(p, &opcode);
+ for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 8573b83..4f3ed7e 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
+ list_del(&page->lru);
+ }
+
+-#define UNSHARED_PTRS_PER_PGD \
+- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
+
++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++ unsigned int count = USER_PGD_PTRS;
+
++ while (count--)
++ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
++}
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
++{
++ unsigned int count = USER_PGD_PTRS;
++
++ while (count--) {
++ pgd_t pgd;
++
++#ifdef CONFIG_X86_64
++ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
++#else
++ pgd = *src++;
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
++#endif
++
++ *dst++ = pgd;
++ }
++
++}
++#endif
++
++#ifdef CONFIG_X86_64
++#define pxd_t pud_t
++#define pyd_t pgd_t
++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
++#define pxd_free(mm, pud) pud_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address) pgd_offset((mm), (address))
++#define PYD_SIZE PGDIR_SIZE
++#else
++#define pxd_t pmd_t
++#define pyd_t pud_t
++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
++#define pxd_free(mm, pud) pmd_free((mm), (pud))
++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
++#define pyd_offset(mm, address) pud_offset((mm), (address))
++#define PYD_SIZE PUD_SIZE
++#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
++static inline void pgd_dtor(pgd_t *pgd) {}
++#else
+ static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
+ {
+ BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
+@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
+ pgd_list_del(pgd);
+ spin_unlock(&pgd_lock);
+ }
++#endif
+
+ /*
+ * List of all pgd's needed for non-PAE so it can invalidate entries
+@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
+ * -- wli
+ */
+
+-#ifdef CONFIG_X86_PAE
++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+ /*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+
+ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+ {
+@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+ */
+ flush_tlb_mm(mm);
+ }
++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
++#define PREALLOCATED_PXDS USER_PGD_PTRS
+ #else /* !CONFIG_X86_PAE */
+
+ /* No need to prepopulate any pagetable entries in non-PAE modes. */
+-#define PREALLOCATED_PMDS 0
++#define PREALLOCATED_PXDS 0
+
+ #endif /* CONFIG_X86_PAE */
+
+-static void free_pmds(pmd_t *pmds[])
++static void free_pxds(pxd_t *pxds[])
+ {
+ int i;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++)
+- if (pmds[i])
+- free_page((unsigned long)pmds[i]);
++ for(i = 0; i < PREALLOCATED_PXDS; i++)
++ if (pxds[i])
++ free_page((unsigned long)pxds[i]);
+ }
+
+-static int preallocate_pmds(pmd_t *pmds[])
++static int preallocate_pxds(pxd_t *pxds[])
+ {
+ int i;
+ bool failed = false;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++) {
+- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
+- if (pmd == NULL)
++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
++ if (pxd == NULL)
+ failed = true;
+- pmds[i] = pmd;
++ pxds[i] = pxd;
+ }
+
+ if (failed) {
+- free_pmds(pmds);
++ free_pxds(pxds);
+ return -ENOMEM;
+ }
+
+@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
+ * preallocate which never got a corresponding vma will need to be
+ * freed manually.
+ */
+-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
+ {
+ int i;
+
+- for(i = 0; i < PREALLOCATED_PMDS; i++) {
++ for(i = 0; i < PREALLOCATED_PXDS; i++) {
+ pgd_t pgd = pgdp[i];
+
+ if (pgd_val(pgd) != 0) {
+- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
+
+- pgdp[i] = native_make_pgd(0);
++ set_pgd(pgdp + i, native_make_pgd(0));
+
+- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
+- pmd_free(mm, pmd);
++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
++ pxd_free(mm, pxd);
+ }
+ }
+ }
+
+-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
+ {
+- pud_t *pud;
++ pyd_t *pyd;
+ unsigned long addr;
+ int i;
+
+- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
+ return;
+
+- pud = pud_offset(pgd, 0);
++#ifdef CONFIG_X86_64
++ pyd = pyd_offset(mm, 0L);
++#else
++ pyd = pyd_offset(pgd, 0L);
++#endif
+
+- for (addr = i = 0; i < PREALLOCATED_PMDS;
+- i++, pud++, addr += PUD_SIZE) {
+- pmd_t *pmd = pmds[i];
++ for (addr = i = 0; i < PREALLOCATED_PXDS;
++ i++, pyd++, addr += PYD_SIZE) {
++ pxd_t *pxd = pxds[i];
+
+ if (i >= KERNEL_PGD_BOUNDARY)
+- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+- sizeof(pmd_t) * PTRS_PER_PMD);
++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
++ sizeof(pxd_t) * PTRS_PER_PMD);
+
+- pud_populate(mm, pud, pmd);
++ pyd_populate(mm, pyd, pxd);
+ }
+ }
+
+ pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ pgd_t *pgd;
+- pmd_t *pmds[PREALLOCATED_PMDS];
++ pxd_t *pxds[PREALLOCATED_PXDS];
+
+ pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
+
+@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+
+ mm->pgd = pgd;
+
+- if (preallocate_pmds(pmds) != 0)
++ if (preallocate_pxds(pxds) != 0)
+ goto out_free_pgd;
+
+ if (paravirt_pgd_alloc(mm) != 0)
+- goto out_free_pmds;
++ goto out_free_pxds;
+
+ /*
+ * Make sure that pre-populating the pmds is atomic with
+@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ spin_lock(&pgd_lock);
+
+ pgd_ctor(mm, pgd);
+- pgd_prepopulate_pmd(mm, pgd, pmds);
++ pgd_prepopulate_pxd(mm, pgd, pxds);
+
+ spin_unlock(&pgd_lock);
+
+ return pgd;
+
+-out_free_pmds:
+- free_pmds(pmds);
++out_free_pxds:
++ free_pxds(pxds);
+ out_free_pgd:
+ free_page((unsigned long)pgd);
+ out:
+@@ -295,7 +356,7 @@ out:
+
+ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ {
+- pgd_mop_up_pmds(mm, pgd);
++ pgd_mop_up_pxds(mm, pgd);
+ pgd_dtor(pgd);
+ paravirt_pgd_free(mm, pgd);
+ free_page((unsigned long)pgd);
+diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
+index cac7184..09a39fa 100644
+--- a/arch/x86/mm/pgtable_32.c
++++ b/arch/x86/mm/pgtable_32.c
+@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
++
++ pax_open_kernel();
+ if (pte_val(pteval))
+ set_pte_at(&init_mm, vaddr, pte, pteval);
+ else
+ pte_clear(&init_mm, vaddr, pte);
++ pax_close_kernel();
+
+ /*
+ * It's enough to flush this one mapping.
+diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
+index d2e2735..5c6586f 100644
+--- a/arch/x86/mm/physaddr.c
++++ b/arch/x86/mm/physaddr.c
+@@ -8,7 +8,7 @@
+
+ #ifdef CONFIG_X86_64
+
+-unsigned long __phys_addr(unsigned long x)
++unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
+ {
+ if (x >= __START_KERNEL_map) {
+ x -= __START_KERNEL_map;
+@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
+ #else
+
+ #ifdef CONFIG_DEBUG_VIRTUAL
+-unsigned long __phys_addr(unsigned long x)
++unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
+ {
+ /* VMALLOC_* aren't constants */
+ VIRTUAL_BUG_ON(x < PAGE_OFFSET);
+diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
+index 410531d..0f16030 100644
+--- a/arch/x86/mm/setup_nx.c
++++ b/arch/x86/mm/setup_nx.c
+@@ -5,8 +5,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ static int disable_nx __cpuinitdata;
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ /*
+ * noexec = on|off
+ *
+@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
+ return 0;
+ }
+ early_param("noexec", noexec_setup);
++#endif
++
++#endif
+
+ void __cpuinit x86_configure_nx(void)
+ {
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ if (cpu_has_nx && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
++#endif
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index d6c0418..06a0ad5 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -65,7 +65,11 @@ void leave_mm(int cpu)
+ BUG();
+ cpumask_clear_cpu(cpu,
+ mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(swapper_pg_dir);
++#endif
++
+ }
+ EXPORT_SYMBOL_GPL(leave_mm);
+
+diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
+index 6687022..ceabcfa 100644
+--- a/arch/x86/net/bpf_jit.S
++++ b/arch/x86/net/bpf_jit.S
+@@ -9,6 +9,7 @@
+ */
+ #include <linux/linkage.h>
+ #include <asm/dwarf2.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * Calling convention :
+@@ -35,6 +36,7 @@ sk_load_word:
+ jle bpf_slow_path_word
+ mov (SKBDATA,%rsi),%eax
+ bswap %eax /* ntohl() */
++ pax_force_retaddr
+ ret
+
+
+@@ -53,6 +55,7 @@ sk_load_half:
+ jle bpf_slow_path_half
+ movzwl (SKBDATA,%rsi),%eax
+ rol $8,%ax # ntohs()
++ pax_force_retaddr
+ ret
+
+ sk_load_byte_ind:
+@@ -66,6 +69,7 @@ sk_load_byte:
+ cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
+ jle bpf_slow_path_byte
+ movzbl (SKBDATA,%rsi),%eax
++ pax_force_retaddr
+ ret
+
+ /**
+@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
+ movzbl (SKBDATA,%rsi),%ebx
+ and $15,%bl
+ shl $2,%bl
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+ ENDPROC(sk_load_byte_msh)
+@@ -91,6 +96,7 @@ bpf_error:
+ xor %eax,%eax
+ mov -8(%rbp),%rbx
+ leaveq
++ pax_force_retaddr
+ ret
+
+ /* rsi contains offset and can be scratched */
+@@ -113,6 +119,7 @@ bpf_slow_path_word:
+ js bpf_error
+ mov -12(%rbp),%eax
+ bswap %eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_half:
+@@ -121,12 +128,14 @@ bpf_slow_path_half:
+ mov -12(%rbp),%ax
+ rol $8,%ax
+ movzwl %ax,%eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_byte:
+ bpf_slow_path_common(1)
+ js bpf_error
+ movzbl -12(%rbp),%eax
++ pax_force_retaddr
+ ret
+
+ bpf_slow_path_byte_msh:
+@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
+ and $15,%al
+ shl $2,%al
+ xchg %eax,%ebx
++ pax_force_retaddr
+ ret
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 5a5b6e4..37ccbe3 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -11,6 +11,7 @@
+ #include <asm/cacheflush.h>
+ #include <linux/netdevice.h>
+ #include <linux/filter.h>
++#include <linux/random.h>
+
+ /*
+ * Conventions :
+@@ -45,13 +46,84 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
+ return ptr + len;
+ }
+
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++#define MAX_INSTR_CODE_SIZE 96
++#else
++#define MAX_INSTR_CODE_SIZE 64
++#endif
++
+ #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
+
+ #define EMIT1(b1) EMIT(b1, 1)
+ #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
+ #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
+ #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
++
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++/* original constant will appear in ecx */
++#define DILUTE_CONST_SEQUENCE(_off, _key) \
++do { \
++ /* mov ecx, randkey */ \
++ EMIT1(0xb9); \
++ EMIT(_key, 4); \
++ /* xor ecx, randkey ^ off */ \
++ EMIT2(0x81, 0xf1); \
++ EMIT((_key) ^ (_off), 4); \
++} while (0)
++
++#define EMIT1_off32(b1, _off) \
++do { \
++ switch (b1) { \
++ case 0x05: /* add eax, imm32 */ \
++ case 0x2d: /* sub eax, imm32 */ \
++ case 0x25: /* and eax, imm32 */ \
++ case 0x0d: /* or eax, imm32 */ \
++ case 0xb8: /* mov eax, imm32 */ \
++ case 0x3d: /* cmp eax, imm32 */ \
++ case 0xa9: /* test eax, imm32 */ \
++ DILUTE_CONST_SEQUENCE(_off, randkey); \
++ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
++ break; \
++ case 0xbb: /* mov ebx, imm32 */ \
++ DILUTE_CONST_SEQUENCE(_off, randkey); \
++ /* mov ebx, ecx */ \
++ EMIT2(0x89, 0xcb); \
++ break; \
++ case 0xbe: /* mov esi, imm32 */ \
++ DILUTE_CONST_SEQUENCE(_off, randkey); \
++ /* mov esi, ecx */ \
++ EMIT2(0x89, 0xce); \
++ break; \
++ case 0xe8: /* call rel imm32, always to known funcs */ \
++ EMIT1(b1); \
++ EMIT(_off, 4); \
++ break; \
++ case 0xe9: /* jmp rel imm32 */ \
++ EMIT1(b1); \
++ EMIT(_off, 4); \
++ /* prevent fall-through, we're not called if off = 0 */ \
++ EMIT(0xcccccccc, 4); \
++ EMIT(0xcccccccc, 4); \
++ break; \
++ default: \
++ BUILD_BUG_ON(1); \
++ } \
++} while (0)
++
++#define EMIT2_off32(b1, b2, _off) \
++do { \
++ if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */ \
++ DILUTE_CONST_SEQUENCE(_off, randkey); \
++ /* imul eax, ecx */ \
++ EMIT3(0x0f, 0xaf, 0xc1); \
++ } else { \
++ BUILD_BUG_ON(1); \
++ } \
++} while (0)
++#else
+ #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
++#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
++#endif
+
+ #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
+ #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
+@@ -86,6 +158,24 @@ do { \
+ #define X86_JBE 0x76
+ #define X86_JA 0x77
+
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++#define APPEND_FLOW_VERIFY() \
++do { \
++ /* mov ecx, randkey */ \
++ EMIT1(0xb9); \
++ EMIT(randkey, 4); \
++ /* cmp ecx, randkey */ \
++ EMIT2(0x81, 0xf9); \
++ EMIT(randkey, 4); \
++ /* jz after 8 int 3s */ \
++ EMIT2(0x74, 0x08); \
++ EMIT(0xcccccccc, 4); \
++ EMIT(0xcccccccc, 4); \
++} while (0)
++#else
++#define APPEND_FLOW_VERIFY() do { } while (0)
++#endif
++
+ #define EMIT_COND_JMP(op, offset) \
+ do { \
+ if (is_near(offset)) \
+@@ -93,6 +183,7 @@ do { \
+ else { \
+ EMIT2(0x0f, op + 0x10); \
+ EMIT(offset, 4); /* jxx .+off32 */ \
++ APPEND_FLOW_VERIFY(); \
+ } \
+ } while (0)
+
+@@ -117,10 +208,14 @@ static inline void bpf_flush_icache(void *start, void *end)
+ set_fs(old_fs);
+ }
+
++struct bpf_jit_work {
++ struct work_struct work;
++ void *image;
++};
+
+ void bpf_jit_compile(struct sk_filter *fp)
+ {
+- u8 temp[64];
++ u8 temp[MAX_INSTR_CODE_SIZE];
+ u8 *prog;
+ unsigned int proglen, oldproglen = 0;
+ int ilen, i;
+@@ -133,6 +228,9 @@ void bpf_jit_compile(struct sk_filter *fp)
+ unsigned int *addrs;
+ const struct sock_filter *filter = fp->insns;
+ int flen = fp->len;
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++ unsigned int randkey;
++#endif
+
+ if (!bpf_jit_enable)
+ return;
+@@ -141,11 +239,19 @@ void bpf_jit_compile(struct sk_filter *fp)
+ if (addrs == NULL)
+ return;
+
++ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
++ if (!fp->work)
++ goto out;
++
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++ randkey = get_random_int();
++#endif
++
+ /* Before first pass, make a rough estimation of addrs[]
+- * each bpf instruction is translated to less than 64 bytes
++ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
+ */
+ for (proglen = 0, i = 0; i < flen; i++) {
+- proglen += 64;
++ proglen += MAX_INSTR_CODE_SIZE;
+ addrs[i] = proglen;
+ }
+ cleanup_addr = proglen; /* epilogue address */
+@@ -253,10 +359,8 @@ void bpf_jit_compile(struct sk_filter *fp)
+ case BPF_S_ALU_MUL_K: /* A *= K */
+ if (is_imm8(K))
+ EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
+- else {
+- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
+- EMIT(K, 4);
+- }
++ else
++ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
+ break;
+ case BPF_S_ALU_DIV_X: /* A /= X; */
+ seen |= SEEN_XREG;
+@@ -276,8 +380,14 @@ void bpf_jit_compile(struct sk_filter *fp)
+ EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
+ break;
+ case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
++#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
++ DILUTE_CONST_SEQUENCE(K, randkey);
++ // imul rax, rcx
++ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
++#else
+ EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
+ EMIT(K, 4);
++#endif
+ EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+ break;
+ case BPF_S_ALU_AND_X:
+@@ -477,7 +587,7 @@ void bpf_jit_compile(struct sk_filter *fp)
+ common_load: seen |= SEEN_DATAREF;
+ if ((int)K < 0) {
+ /* Abort the JIT because __load_pointer() is needed. */
+- goto out;
++ goto error;
+ }
+ t_offset = func - (image + addrs[i]);
+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
+@@ -492,7 +602,7 @@ common_load: seen |= SEEN_DATAREF;
+ case BPF_S_LDX_B_MSH:
+ if ((int)K < 0) {
+ /* Abort the JIT because __load_pointer() is needed. */
+- goto out;
++ goto error;
+ }
+ seen |= SEEN_DATAREF | SEEN_XREG;
+ t_offset = sk_load_byte_msh - (image + addrs[i]);
+@@ -582,17 +692,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
+ break;
+ default:
+ /* hmm, too complex filter, give up with jit compiler */
+- goto out;
++ goto error;
+ }
+ ilen = prog - temp;
+ if (image) {
+ if (unlikely(proglen + ilen > oldproglen)) {
+ pr_err("bpb_jit_compile fatal error\n");
+- kfree(addrs);
+- module_free(NULL, image);
+- return;
++ module_free_exec(NULL, image);
++ goto error;
+ }
++ pax_open_kernel();
+ memcpy(image + proglen, temp, ilen);
++ pax_close_kernel();
+ }
+ proglen += ilen;
+ addrs[i] = proglen;
+@@ -613,11 +724,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
+ break;
+ }
+ if (proglen == oldproglen) {
+- image = module_alloc(max_t(unsigned int,
+- proglen,
+- sizeof(struct work_struct)));
++ image = module_alloc_exec(proglen);
+ if (!image)
+- goto out;
++ goto error;
+ }
+ oldproglen = proglen;
+ }
+@@ -633,7 +742,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
+ bpf_flush_icache(image, image + proglen);
+
+ fp->bpf_func = (void *)image;
+- }
++ } else
++error:
++ kfree(fp->work);
++
+ out:
+ kfree(addrs);
+ return;
+@@ -641,18 +753,20 @@ out:
+
+ static void jit_free_defer(struct work_struct *arg)
+ {
+- module_free(NULL, arg);
++ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
++ kfree(arg);
+ }
+
+ /* run from softirq, we must use a work_struct to call
+- * module_free() from process context
++ * module_free_exec() from process context
+ */
+ void bpf_jit_free(struct sk_filter *fp)
+ {
+ if (fp->bpf_func != sk_run_filter) {
+- struct work_struct *work = (struct work_struct *)fp->bpf_func;
++ struct work_struct *work = &fp->work->work;
+
+ INIT_WORK(work, jit_free_defer);
++ fp->work->image = fp->bpf_func;
+ schedule_work(work);
+ }
+ }
+diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
+index bff89df..377758a 100644
+--- a/arch/x86/oprofile/backtrace.c
++++ b/arch/x86/oprofile/backtrace.c
+@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
+ struct stack_frame_ia32 *fp;
+ unsigned long bytes;
+
+- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+ if (bytes != sizeof(bufhead))
+ return NULL;
+
+- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
++ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
+
+ oprofile_add_trace(bufhead[0].return_address);
+
+@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
+ struct stack_frame bufhead[2];
+ unsigned long bytes;
+
+- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
++ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
+ if (bytes != sizeof(bufhead))
+ return NULL;
+
+@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
+ {
+ struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
+
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned long stack = kernel_stack_pointer(regs);
+ if (depth)
+ dump_trace(NULL, regs, (unsigned long *)stack, 0,
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 6bc0899..13d2579 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -23,6 +23,7 @@
+ #include <asm/nmi.h>
+ #include <asm/msr.h>
+ #include <asm/apic.h>
++#include <asm/pgtable.h>
+
+ #include "op_counter.h"
+ #include "op_x86_model.h"
+@@ -759,8 +760,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
+ if (ret)
+ return ret;
+
+- if (!model->num_virt_counters)
+- model->num_virt_counters = model->num_counters;
++ if (!model->num_virt_counters) {
++ pax_open_kernel();
++ *(unsigned int *)&model->num_virt_counters = model->num_counters;
++ pax_close_kernel();
++ }
+
+ mux_init(ops);
+
+diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
+index 303f086..d020916 100644
+--- a/arch/x86/oprofile/op_model_amd.c
++++ b/arch/x86/oprofile/op_model_amd.c
+@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
+ num_counters = AMD64_NUM_COUNTERS;
+ }
+
+- op_amd_spec.num_counters = num_counters;
+- op_amd_spec.num_controls = num_counters;
+- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
++ pax_open_kernel();
++ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
++ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
++ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
++ pax_close_kernel();
+
+ return 0;
+ }
+diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
+index d90528e..0127e2b 100644
+--- a/arch/x86/oprofile/op_model_ppro.c
++++ b/arch/x86/oprofile/op_model_ppro.c
+@@ -19,6 +19,7 @@
+ #include <asm/msr.h>
+ #include <asm/apic.h>
+ #include <asm/nmi.h>
++#include <asm/pgtable.h>
+
+ #include "op_x86_model.h"
+ #include "op_counter.h"
+@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
+
+ num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
+
+- op_arch_perfmon_spec.num_counters = num_counters;
+- op_arch_perfmon_spec.num_controls = num_counters;
++ pax_open_kernel();
++ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
++ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
++ pax_close_kernel();
+ }
+
+ static int arch_perfmon_init(struct oprofile_operations *ignore)
+diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
+index 71e8a67..6a313bb 100644
+--- a/arch/x86/oprofile/op_x86_model.h
++++ b/arch/x86/oprofile/op_x86_model.h
+@@ -52,7 +52,7 @@ struct op_x86_model_spec {
+ void (*switch_ctrl)(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs);
+ #endif
+-};
++} __do_const;
+
+ struct op_counter_config;
+
+diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
+index 385a940..b11662d 100644
+--- a/arch/x86/pci/amd_bus.c
++++ b/arch/x86/pci/amd_bus.c
+@@ -355,7 +355,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
++static struct notifier_block amd_cpu_notifier = {
+ .notifier_call = amd_cpu_notify,
+ };
+
+diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
+index 372e9b8..e775a6c 100644
+--- a/arch/x86/pci/irq.c
++++ b/arch/x86/pci/irq.c
+@@ -50,7 +50,7 @@ struct irq_router {
+ struct irq_router_handler {
+ u16 vendor;
+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
+-};
++} __do_const;
+
+ int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
+ void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
+@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
+ return 0;
+ }
+
+-static __initdata struct irq_router_handler pirq_routers[] = {
++static __initconst const struct irq_router_handler pirq_routers[] = {
+ { PCI_VENDOR_ID_INTEL, intel_router_probe },
+ { PCI_VENDOR_ID_AL, ali_router_probe },
+ { PCI_VENDOR_ID_ITE, ite_router_probe },
+@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
+ static void __init pirq_find_router(struct irq_router *r)
+ {
+ struct irq_routing_table *rt = pirq_table;
+- struct irq_router_handler *h;
++ const struct irq_router_handler *h;
+
+ #ifdef CONFIG_PCI_BIOS
+ if (!rt->signature) {
+@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
+ return 0;
+ }
+
+-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
+ {
+ .callback = fix_broken_hp_bios_irq9,
+ .ident = "HP Pavilion N5400 Series Laptop",
+diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
+index cb29191..036766d 100644
+--- a/arch/x86/pci/mrst.c
++++ b/arch/x86/pci/mrst.c
+@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
+ printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
+ pci_mmcfg_late_init();
+ pcibios_enable_irq = mrst_pci_irq_enable;
+- pci_root_ops = pci_mrst_ops;
++ pax_open_kernel();
++ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
++ pax_close_kernel();
+ /* Continue with standard init */
+ return 1;
+ }
+diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
+index db0e9a5..0372c14 100644
+--- a/arch/x86/pci/pcbios.c
++++ b/arch/x86/pci/pcbios.c
+@@ -79,50 +79,93 @@ union bios32 {
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+-static unsigned long bios32_service(unsigned long service)
++static unsigned long __devinit bios32_service(unsigned long service)
+ {
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
++ struct desc_struct d, *gdt;
+
+ local_irq_save(flags);
+- __asm__("lcall *(%%edi); cld"
++
++ gdt = get_cpu_gdt_table(smp_processor_id());
++
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++
++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+- "D" (&bios32_indirect));
++ "D" (&bios32_indirect),
++ "r"(__PCIBIOS_DS)
++ : "memory");
++
++ pax_open_kernel();
++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+
+ switch (return_code) {
+- case 0:
+- return address + entry;
+- case 0x80: /* Not present */
+- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+- return 0;
+- default: /* Shouldn't happen */
+- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+- service, return_code);
++ case 0: {
++ int cpu;
++ unsigned char flags;
++
++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
++ printk(KERN_WARNING "bios32_service: not valid\n");
+ return 0;
++ }
++ address = address + PAGE_OFFSET;
++ length += 16UL; /* some BIOSs underreport this... */
++ flags = 4;
++ if (length >= 64*1024*1024) {
++ length >>= PAGE_SHIFT;
++ flags |= 8;
++ }
++
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ pack_descriptor(&d, address, length, 0x9b, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, address, length, 0x93, flags);
++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
++ }
++ return entry;
++ }
++ case 0x80: /* Not present */
++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
++ return 0;
++ default: /* Shouldn't happen */
++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
++ service, return_code);
++ return 0;
+ }
+ }
+
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect __read_only = { 0, __PCIBIOS_CS };
+
+-static int pci_bios_present;
++static int pci_bios_present __read_only;
+
+ static int __devinit check_pcibios(void)
+ {
+@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
+ unsigned long flags, pcibios_entry;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
++ pci_indirect.address = pcibios_entry;
+
+ local_irq_save(flags);
+- __asm__(
+- "lcall *(%%edi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%edi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
+ "=b" (ebx),
+ "=c" (ecx)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+- "D" (&pci_indirect)
++ "D" (&pci_indirect),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ local_irq_restore(flags);
+
+@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 8 bits, do not trust the
+ * BIOS having done it:
+@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+ *value &= 0xff;
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ /*
+ * Zero-extend the result beyond 16 bits, do not trust the
+ * BIOS having done it:
+@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+ *value &= 0xffff;
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+
+ switch (len) {
+ case 1:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 2:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ case 4:
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w6, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
+ "c" (value),
+ "b" (bx),
+ "D" ((long)reg),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ break;
+ }
+
+@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
+
+ DBG("PCI: Fetching IRQ routing table... ");
+ __asm__("push %%es\n\t"
++ "movw %w8, %%ds\n\t"
+ "push %%ds\n\t"
+ "pop %%es\n\t"
+- "lcall *(%%esi); cld\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
+ "pop %%es\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
+ "1" (0),
+ "D" ((long) &opt),
+ "S" (&pci_indirect),
+- "m" (opt)
++ "m" (opt),
++ "r" (__PCIBIOS_DS)
+ : "memory");
+ DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
+ if (ret & 0xff00)
+@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+ {
+ int ret;
+
+- __asm__("lcall *(%%esi); cld\n\t"
++ __asm__("movw %w5, %%ds\n\t"
++ "lcall *%%ss:(%%esi); cld\n\t"
++ "push %%ss\n\t"
++ "pop %%ds\n"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
+ : "0" (PCIBIOS_SET_PCI_HW_INT),
+ "b" ((dev->bus->number << 8) | dev->devfn),
+ "c" ((irq << 8) | (pin + 10)),
+- "S" (&pci_indirect));
++ "S" (&pci_indirect),
++ "r" (__PCIBIOS_DS));
+ return !(ret & 0xff00);
+ }
+ EXPORT_SYMBOL(pcibios_set_irq_routing);
+diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
+index 40e4469..0592924 100644
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
+ {
+ struct desc_ptr gdt_descr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ struct desc_struct d;
++#endif
++
+ local_irq_save(efi_rt_eflags);
+
+ load_cr3(initial_page_table);
+ __flush_tlb_all();
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+ gdt_descr.address = __pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
+ {
+ struct desc_ptr gdt_descr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ struct desc_struct d;
++
++ memset(&d, 0, sizeof d);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ load_cr3(get_cpu_pgd(smp_processor_id()));
++#else
+ load_cr3(swapper_pg_dir);
++#endif
++
+ __flush_tlb_all();
+
+ local_irq_restore(efi_rt_eflags);
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 0fba86d..3642981 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -75,6 +75,11 @@ void __init efi_call_phys_prelog(void)
+ vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+ }
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ load_cr3(swapper_pg_dir);
++#endif
++
+ __flush_tlb_all();
+ }
+
+@@ -88,6 +93,11 @@ void __init efi_call_phys_epilog(void)
+ for (pgd = 0; pgd < n_pgds; pgd++)
+ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+ kfree(save_pgd);
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ load_cr3(get_cpu_pgd(smp_processor_id()));
++#endif
++
+ __flush_tlb_all();
+ local_irq_restore(efi_flags);
+ early_code_mapping_set_exec(0);
+diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
+index fbe66e6..eae5e38 100644
+--- a/arch/x86/platform/efi/efi_stub_32.S
++++ b/arch/x86/platform/efi/efi_stub_32.S
+@@ -6,7 +6,9 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+
+ /*
+ * efi_call_phys(void *, ...) is a function with variable parameters.
+@@ -20,7 +22,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $(__KERNEXEC_EFI_DS), %edx
++ mov %edx, %ds
++ mov %edx, %es
++ mov %edx, %ss
++ addl $2f,(1f)
++ ljmp *(1f)
++
++__INITDATA
++1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
++.previous
++
++2:
++ subl $2b,(1b)
++#else
++ jmp 1f-__PAGE_OFFSET
+ 1:
++#endif
+
+ /*
+ * 2. Now on the top of stack is the return
+@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $(__KERNEL_DS), %edx
++ mov %edx, %ds
++ mov %edx, %es
++ mov %edx, %ss
++ ljmp $(__KERNEL_CS),$1f
++#else
++ jmp 1f+__PAGE_OFFSET
++#endif
+ 1:
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
+index 4c07cca..2c8427d 100644
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define SAVE_XMM \
+ mov %rsp, %rax; \
+@@ -40,6 +41,7 @@ ENTRY(efi_call0)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call0)
+
+@@ -50,6 +52,7 @@ ENTRY(efi_call1)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call1)
+
+@@ -60,6 +63,7 @@ ENTRY(efi_call2)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call2)
+
+@@ -71,6 +75,7 @@ ENTRY(efi_call3)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call3)
+
+@@ -83,6 +88,7 @@ ENTRY(efi_call4)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call4)
+
+@@ -96,6 +102,7 @@ ENTRY(efi_call5)
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call5)
+
+@@ -112,5 +119,6 @@ ENTRY(efi_call6)
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call6)
+diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
+index ad4ec1c..686479e 100644
+--- a/arch/x86/platform/mrst/mrst.c
++++ b/arch/x86/platform/mrst/mrst.c
+@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
+ EXPORT_SYMBOL_GPL(sfi_mrtc_array);
+ int sfi_mrtc_num;
+
+-static void mrst_power_off(void)
++static __noreturn void mrst_power_off(void)
+ {
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
++ BUG();
+ }
+
+-static void mrst_reboot(void)
++static __noreturn void mrst_reboot(void)
+ {
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+ else
+ intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
++ BUG();
+ }
+
+ /* parse all the mtimer info to a static mtimer array */
+diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
+index d6ee929..3637cb5 100644
+--- a/arch/x86/platform/olpc/olpc_dt.c
++++ b/arch/x86/platform/olpc/olpc_dt.c
+@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
+ return res;
+ }
+
+-static struct of_pdt_ops prom_olpc_ops __initdata = {
++static struct of_pdt_ops prom_olpc_ops __initconst = {
+ .nextprop = olpc_dt_nextprop,
+ .getproplen = olpc_dt_getproplen,
+ .getproperty = olpc_dt_getproperty,
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 43c9f6a..2b63a0b 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -132,7 +132,7 @@ static void do_fpu_end(void)
+ static void fix_processor_context(void)
+ {
+ int cpu = smp_processor_id();
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+
+ set_tss_desc(cpu, t); /*
+ * This just modifies memory; should not be
+@@ -142,8 +142,6 @@ static void fix_processor_context(void)
+ */
+
+ #ifdef CONFIG_X86_64
+- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
+-
+ syscall_init(); /* This sets MSR_*STAR and related */
+ #endif
+ load_TR_desc(); /* This does ltr */
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index e529730..8d08690 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -11,10 +11,13 @@
+ #include <endian.h>
+ #include <regex.h>
+
++#include "../../../include/generated/autoconf.h"
++
+ static void die(char *fmt, ...);
+
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+ static unsigned long reloc16_count, reloc16_idx;
+@@ -322,9 +325,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -359,7 +392,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -384,7 +417,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -417,7 +450,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -437,9 +472,22 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++
++#ifdef CONFIG_X86_32
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
++#endif
++
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -448,13 +496,13 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -481,14 +529,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -550,13 +598,13 @@ static void print_absolute_relocs(void)
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
+ int use_real_mode)
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -580,6 +628,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
+ sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+ r_type = ELF32_R_TYPE(rel->r_info);
+
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
++
+ shn_abs = sym->st_shndx == SHN_ABS;
+
+ switch (r_type) {
+@@ -676,7 +740,7 @@ static int write32(unsigned int v, FILE *f)
+
+ static void emit_relocs(int as_text, int use_real_mode)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc, use_real_mode);
+@@ -803,6 +867,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
+index c6c7131..2851e03 100644
+--- a/arch/x86/um/tls_32.c
++++ b/arch/x86/um/tls_32.c
+@@ -259,7 +259,7 @@ out:
+ if (unlikely(task == current &&
+ !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
+ printk(KERN_ERR "get_tls_entry: task with pid %d got here "
+- "without flushed TLS.", current->pid);
++ "without flushed TLS.", task_pid_nr(current));
+ }
+
+ return 0;
+diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
+index 5d17950..2253fc9 100644
+--- a/arch/x86/vdso/Makefile
++++ b/arch/x86/vdso/Makefile
+@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ GCOV_PROFILE := n
+
+ #
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index 468d591..8e80a0a 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -25,6 +25,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/vdso.h>
+ #include <asm/proto.h>
++#include <asm/mman.h>
+
+ enum {
+ VDSO_DISABLED = 0,
+@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
+ void enable_sep_cpu(void)
+ {
+ int cpu = get_cpu();
+- struct tss_struct *tss = &per_cpu(init_tss, cpu);
++ struct tss_struct *tss = init_tss + cpu;
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ current->mm->context.vdso = addr;
+
+ if (compat_uses_vma || !compat) {
+ /*
+@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ }
+
+ current_thread_info()->sysenter_return =
+- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+
+ up_fail:
+ if (ret)
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+
+ up_write(&mm->mmap_sem);
+
+@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
+ {
+- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
+ return "[vdso]";
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
++ return "[vdso]";
++#endif
++
+ return NULL;
+ }
+
+@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+ * Check to see if the corresponding task was created in compat vdso
+ * mode.
+ */
+- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
++ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
+ return &gate_vma;
+ return NULL;
+ }
+diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
+index 153407c..611cba9 100644
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -16,8 +16,6 @@
+ #include <asm/vdso.h>
+ #include <asm/page.h>
+
+-unsigned int __read_mostly vdso_enabled = 1;
+-
+ extern char vdso_start[], vdso_end[];
+ extern unsigned short vdso_sync_cpuid;
+
+@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ * unaligned here as a result of stack start randomization.
+ */
+ addr = PAGE_ALIGN(addr);
+- addr = align_addr(addr, NULL, ALIGN_VDSO);
+
+ return addr;
+ }
+@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- unsigned long addr;
++ unsigned long addr = 0;
+ int ret;
+
+- if (!vdso_enabled)
+- return 0;
+-
+ down_write(&mm->mmap_sem);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ addr = vdso_addr(mm->start_stack, vdso_size);
++ addr = align_addr(addr, NULL, ALIGN_VDSO);
+ addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+- current->mm->context.vdso = (void *)addr;
++ mm->context.vdso = addr;
+
+ ret = install_special_mapping(mm, addr, vdso_size,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_ALWAYSDUMP,
+ vdso_pages);
+- if (ret) {
+- current->mm->context.vdso = NULL;
+- goto up_fail;
+- }
++
++ if (ret)
++ mm->context.vdso = 0;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+-
+-static __init int vdso_setup(char *s)
+-{
+- vdso_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 5189fe8..d937469 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+
+ struct shared_info xen_dummy_shared_info;
+
+-void *xen_initial_gdt;
+-
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+ __read_mostly int xen_have_vector_callback;
+ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+@@ -382,8 +380,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+ unsigned long va = dtr->address;
+ unsigned int size = dtr->size + 1;
+- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+- unsigned long frames[pages];
++ unsigned long frames[65536 / PAGE_SIZE];
+ int f;
+
+ /*
+@@ -431,8 +428,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+ {
+ unsigned long va = dtr->address;
+ unsigned int size = dtr->size + 1;
+- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+- unsigned long frames[pages];
++ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
+ int f;
+
+ /*
+@@ -440,7 +436,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+ * 8-byte entries, or 16 4k pages..
+ */
+
+- BUG_ON(size > 65536);
++ BUG_ON(size > GDT_SIZE);
+ BUG_ON(va & ~PAGE_MASK);
+
+ for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+@@ -1072,30 +1068,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+ #endif
+ };
+
+-static void xen_reboot(int reason)
++static __noreturn void xen_reboot(int reason)
+ {
+ struct sched_shutdown r = { .reason = reason };
+
+- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
+- BUG();
++ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
++ BUG();
+ }
+
+-static void xen_restart(char *msg)
++static __noreturn void xen_restart(char *msg)
+ {
+ xen_reboot(SHUTDOWN_reboot);
+ }
+
+-static void xen_emergency_restart(void)
++static __noreturn void xen_emergency_restart(void)
+ {
+ xen_reboot(SHUTDOWN_reboot);
+ }
+
+-static void xen_machine_halt(void)
++static __noreturn void xen_machine_halt(void)
+ {
+ xen_reboot(SHUTDOWN_poweroff);
+ }
+
+-static void xen_machine_power_off(void)
++static void __noreturn xen_machine_power_off(void)
+ {
+ if (pm_power_off)
+ pm_power_off();
+@@ -1196,7 +1192,17 @@ asmlinkage void __init xen_start_kernel(void)
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
+ /* Work out if we support NX */
+- x86_configure_nx();
++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
++ unsigned l, h;
++
++ __supported_pte_mask |= _PAGE_NX;
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ }
++#endif
+
+ xen_setup_features();
+
+@@ -1227,13 +1233,6 @@ asmlinkage void __init xen_start_kernel(void)
+
+ machine_ops = xen_machine_ops;
+
+- /*
+- * The only reliable way to retain the initial address of the
+- * percpu gdt_page is to remember it here, so we can go and
+- * mark it RW later, when the initial percpu area is freed.
+- */
+- xen_initial_gdt = &per_cpu(gdt_page, 0);
+-
+ xen_smp_init();
+
+ #ifdef CONFIG_ACPI_NUMA
+@@ -1418,7 +1417,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
++static struct notifier_block xen_hvm_cpu_notifier = {
+ .notifier_call = xen_hvm_cpu_notify,
+ };
+
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index fe00be69..c51170f 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -365,7 +365,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
+ return val;
+ }
+
+-static pteval_t pte_pfn_to_mfn(pteval_t val)
++static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
+ {
+ if (val & _PAGE_PRESENT) {
+ unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+@@ -1757,6 +1757,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
+ convert_pfn_mfn(init_level4_pgt);
+ convert_pfn_mfn(level3_ident_pgt);
+ convert_pfn_mfn(level3_kernel_pgt);
++ convert_pfn_mfn(level3_vmalloc_start_pgt);
++ convert_pfn_mfn(level3_vmalloc_end_pgt);
++ convert_pfn_mfn(level3_vmemmap_pgt);
+
+ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+ l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+@@ -1775,7 +1778,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+@@ -1986,6 +1993,7 @@ static void __init xen_post_allocator_init(void)
+ pv_mmu_ops.set_pud = xen_set_pud;
+ #if PAGETABLE_LEVELS == 4
+ pv_mmu_ops.set_pgd = xen_set_pgd;
++ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
+ #endif
+
+ /* This will work as long as patching hasn't happened yet
+@@ -2067,6 +2075,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+ .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+ .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+ .set_pgd = xen_set_pgd_hyper,
++ .set_pgd_batched = xen_set_pgd_hyper,
+
+ .alloc_pud = xen_alloc_pmd_init,
+ .release_pud = xen_release_pmd_init,
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 6e4d5dc..78c131b 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
+ {
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+-
+- /* We've switched to the "real" per-cpu gdt, so make sure the
+- old memory can be recycled */
+- make_lowmem_page_readwrite(xen_initial_gdt);
+-
+ xen_filter_cpu_maps();
+ xen_setup_vcpu_info_placement();
+ }
+@@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+ gdt = get_cpu_gdt_table(cpu);
+
+ ctxt->flags = VGCF_IN_KERNEL;
+- ctxt->user_regs.ds = __USER_DS;
+- ctxt->user_regs.es = __USER_DS;
++ ctxt->user_regs.ds = __KERNEL_DS;
++ ctxt->user_regs.es = __KERNEL_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
+ #ifdef CONFIG_X86_32
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, ctxt->user_regs.gs);
+ #else
+ ctxt->gs_base_kernel = per_cpu_offset(cpu);
+ #endif
+@@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
+ int rc;
+
+ per_cpu(current_task, cpu) = idle;
++ per_cpu(current_tinfo, cpu) = &idle->tinfo;
+ #ifdef CONFIG_X86_32
+ irq_ctx_init(cpu);
+ #else
+ clear_tsk_thread_flag(idle, TIF_FORK);
+- per_cpu(kernel_stack, cpu) =
+- (unsigned long)task_stack_page(idle) -
+- KERNEL_STACK_OFFSET + THREAD_SIZE;
++ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+ #endif
+ xen_setup_runstate_info(cpu);
+ xen_setup_timer(cpu);
+@@ -536,7 +530,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
+
+ void __init xen_smp_init(void)
+ {
+- smp_ops = xen_smp_ops;
++ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
+ xen_fill_possible_map();
+ xen_init_spinlocks();
+ }
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index 7328f71..c457aa7 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -83,14 +83,14 @@ ENTRY(xen_iret)
+ ESP_OFFSET=4 # bytes pushed onto stack
+
+ /*
+- * Store vcpu_info pointer for easy access. Do it this way to
+- * avoid having to reload %fs
++ * Store vcpu_info pointer for easy access.
+ */
+ #ifdef CONFIG_SMP
+- GET_THREAD_INFO(%eax)
+- movl %ss:TI_cpu(%eax), %eax
+- movl %ss:__per_cpu_offset(,%eax,4), %eax
+- mov %ss:xen_vcpu(%eax), %eax
++ push %fs
++ mov $(__KERNEL_PERCPU), %eax
++ mov %eax, %fs
++ mov PER_CPU_VAR(xen_vcpu), %eax
++ pop %fs
+ #else
+ movl %ss:xen_vcpu, %eax
+ #endif
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index aaa7291..3f77960 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -19,6 +19,17 @@ ENTRY(startup_xen)
+ #ifdef CONFIG_X86_32
+ mov %esi,xen_start_info
+ mov $init_thread_union+THREAD_SIZE,%esp
++#ifdef CONFIG_SMP
++ movl $cpu_gdt_table,%edi
++ movl $__per_cpu_load,%eax
++ movw %ax,__KERNEL_PERCPU + 2(%edi)
++ rorl $16,%eax
++ movb %al,__KERNEL_PERCPU + 4(%edi)
++ movb %ah,__KERNEL_PERCPU + 7(%edi)
++ movl $__per_cpu_end - 1,%eax
++ subl $__per_cpu_start,%eax
++ movw %ax,__KERNEL_PERCPU + 0(%edi)
++#endif
+ #else
+ mov %rsi,xen_start_info
+ mov $init_thread_union+THREAD_SIZE,%rsp
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index b095739..8c17bcd 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -10,8 +10,6 @@
+ extern const char xen_hypervisor_callback[];
+ extern const char xen_failsafe_callback[];
+
+-extern void *xen_initial_gdt;
+-
+ struct trap_info;
+ void xen_copy_trap_info(struct trap_info *traps);
+
+diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
+index 525bd3d..ef888b1 100644
+--- a/arch/xtensa/variants/dc232b/include/variant/core.h
++++ b/arch/xtensa/variants/dc232b/include/variant/core.h
+@@ -119,9 +119,9 @@
+ ----------------------------------------------------------------------*/
+
+ #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+
+ #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
+index 2f33760..835e50a 100644
+--- a/arch/xtensa/variants/fsf/include/variant/core.h
++++ b/arch/xtensa/variants/fsf/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_H
+ #define _XTENSA_CORE_H
+
++#include <linux/const.h>
+
+ /****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -112,9 +113,9 @@
+ ----------------------------------------------------------------------*/
+
+ #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+
+ #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
+index af00795..2bb8105 100644
+--- a/arch/xtensa/variants/s6000/include/variant/core.h
++++ b/arch/xtensa/variants/s6000/include/variant/core.h
+@@ -11,6 +11,7 @@
+ #ifndef _XTENSA_CORE_CONFIGURATION_H
+ #define _XTENSA_CORE_CONFIGURATION_H
+
++#include <linux/const.h>
+
+ /****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+@@ -118,9 +119,9 @@
+ ----------------------------------------------------------------------*/
+
+ #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+ #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+ #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
++#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
+
+ #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
+ #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
+diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
+index 58916af..9b538a6 100644
+--- a/block/blk-iopoll.c
++++ b/block/blk-iopoll.c
+@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
+ }
+ EXPORT_SYMBOL(blk_iopoll_complete);
+
+-static void blk_iopoll_softirq(struct softirq_action *h)
++static __latent_entropy void blk_iopoll_softirq(void)
+ {
+ struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
+ int rearm = 0, budget = blk_iopoll_budget;
+@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
++static struct notifier_block blk_iopoll_cpu_notifier = {
+ .notifier_call = blk_iopoll_cpu_notify,
+ };
+
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 623e1cd..ca1e109 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+ if (!len || !kbuf)
+ return -EINVAL;
+
+- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
++ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
+ if (do_copy)
+ bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ else
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 1366a89..88178fe 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+-static void blk_done_softirq(struct softirq_action *h)
++static __latent_entropy void blk_done_softirq(void)
+ {
+ struct list_head *cpu_list, local_list;
+
+@@ -97,7 +97,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
++static struct notifier_block blk_cpu_notifier = {
+ .notifier_call = blk_cpu_notify,
+ };
+
+diff --git a/block/bsg.c b/block/bsg.c
+index c0ab25c..9d49f8f 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_v4 *hdr, struct bsg_device *bd,
+ fmode_t has_write_perm)
+ {
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
++
+ if (hdr->request_len > BLK_MAX_CDB) {
+ rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
+ if (!rq->cmd)
+ return -ENOMEM;
+- }
++ cmdptr = rq->cmd;
++ } else
++ cmdptr = tmpcmd;
+
+- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
++ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
+ hdr->request_len))
+ return -EFAULT;
+
++ if (cmdptr != rq->cmd)
++ memcpy(rq->cmd, cmdptr, hdr->request_len);
++
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index 7b72502..646105c 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
+ err |= __get_user(f->spec1, &uf->spec1);
+ err |= __get_user(f->fmt_gap, &uf->fmt_gap);
+ err |= __get_user(name, &uf->name);
+- f->name = compat_ptr(name);
++ f->name = (void __force_kernel *)compat_ptr(name);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+diff --git a/block/genhd.c b/block/genhd.c
+index 8bd4ef2..078f68b9 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -474,21 +474,24 @@ static char *bdevt_str(dev_t devt, char *buf)
+
+ /*
+ * Register device numbers dev..(dev+range-1)
+- * range must be nonzero
++ * Noop if @range is zero.
+ * The hash chain is sorted on range, so that subranges can override.
+ */
+ void blk_register_region(dev_t devt, unsigned long range, struct module *module,
+ struct kobject *(*probe)(dev_t, int *, void *),
+ int (*lock)(dev_t, void *), void *data)
+ {
+- kobj_map(bdev_map, devt, range, module, probe, lock, data);
++ if (range)
++ kobj_map(bdev_map, devt, range, module, probe, lock, data);
+ }
+
+ EXPORT_SYMBOL(blk_register_region);
+
++/* undo blk_register_region(), noop if @range is zero */
+ void blk_unregister_region(dev_t devt, unsigned long range)
+ {
+- kobj_unmap(bdev_map, devt, range);
++ if (range)
++ kobj_unmap(bdev_map, devt, range);
+ }
+
+ EXPORT_SYMBOL(blk_unregister_region);
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 9e76a32..a220c64 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -66,7 +66,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
+ return put_user(0, p);
+ }
+
+-static int sg_get_timeout(struct request_queue *q)
++static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
+ {
+ return jiffies_to_clock_t(q->sg_timeout);
+ }
+@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
+ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+ {
+- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
++
++ if (rq->cmd != rq->__cmd)
++ cmdptr = rq->cmd;
++ else
++ cmdptr = tmpcmd;
++
++ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
++
++ if (cmdptr != rq->cmd)
++ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
++
+ if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
+ return -EPERM;
+
+@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
++ unsigned char tmpcmd[sizeof(rq->__cmd)];
++ unsigned char *cmdptr;
+
+ if (!sic)
+ return -EINVAL;
+@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ */
+ err = -EFAULT;
+ rq->cmd_len = cmdlen;
+- if (copy_from_user(rq->cmd, sic->data, cmdlen))
++
++ if (rq->cmd != rq->__cmd)
++ cmdptr = rq->cmd;
++ else
++ cmdptr = tmpcmd;
++
++ if (copy_from_user(cmdptr, sic->data, cmdlen))
+ goto error;
+
++ if (rq->cmd != cmdptr)
++ memcpy(rq->cmd, cmdptr, cmdlen);
++
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+diff --git a/crypto/api.c b/crypto/api.c
+index cea3cf6..86a0f6f 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -42,6 +42,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+ return alg;
+ }
+
++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
++
+ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
+ {
+ return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 7bdd61b..afec999 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
+
+ struct cryptd_blkcipher_request_ctx {
+ crypto_completion_t complete;
+-};
++} __no_const;
+
+ struct cryptd_hash_ctx {
+ struct crypto_shash *child;
+@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
+
+ struct cryptd_aead_request_ctx {
+ crypto_completion_t complete;
+-};
++} __no_const;
+
+ static void cryptd_queue_worker(struct work_struct *work);
+
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 5b63b8d..6f46ba0 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -26,6 +26,8 @@
+ #include <net/net_namespace.h>
+ #include "internal.h"
+
++#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
++
+ DEFINE_MUTEX(crypto_cfg_mutex);
+
+ /* The crypto netlink socket */
+@@ -192,7 +194,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ struct crypto_dump_info info;
+ int err;
+
+- if (!p->cru_driver_name)
++ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
++ return -EINVAL;
++
++ if (!p->cru_driver_name[0])
+ return -EINVAL;
+
+ alg = crypto_alg_match(p, 1);
+@@ -256,6 +261,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
+ LIST_HEAD(list);
+
++ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
++ return -EINVAL;
++
+ if (priority && !strlen(p->cru_driver_name))
+ return -EINVAL;
+
+@@ -283,6 +291,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct crypto_alg *alg;
+ struct crypto_user_alg *p = nlmsg_data(nlh);
+
++ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
++ return -EINVAL;
++
+ alg = crypto_alg_match(p, 1);
+ if (!alg)
+ return -ENOENT;
+@@ -310,6 +321,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct crypto_user_alg *p = nlmsg_data(nlh);
+ struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
+
++ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
++ return -EINVAL;
++
+ if (strlen(p->cru_driver_name))
+ exact = 1;
+
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index 29a89da..7e23990 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+ int ret;
+
+ pinst->kobj.kset = pcrypt_kset;
+- ret = kobject_add(&pinst->kobj, NULL, name);
++ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
+ if (!ret)
+ kobject_uevent(&pinst->kobj, KOBJ_ADD);
+
+diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
+index f57050e..7ccfc74 100644
+--- a/drivers/acpi/apei/apei-internal.h
++++ b/drivers/acpi/apei/apei-internal.h
+@@ -18,7 +18,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+-};
++} __do_const;
+
+ struct apei_exec_context {
+ u32 ip;
+diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
+index 5d41894..22021e4 100644
+--- a/drivers/acpi/apei/cper.c
++++ b/drivers/acpi/apei/cper.c
+@@ -38,12 +38,12 @@
+ */
+ u64 cper_next_record_id(void)
+ {
+- static atomic64_t seq;
++ static atomic64_unchecked_t seq;
+
+- if (!atomic64_read(&seq))
+- atomic64_set(&seq, ((u64)get_seconds()) << 32);
++ if (!atomic64_read_unchecked(&seq))
++ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
+
+- return atomic64_inc_return(&seq);
++ return atomic64_inc_return_unchecked(&seq);
+ }
+ EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..b81293b 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
+ u32 is_critical_error;
+ };
+
+-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
+
+ /*
+ * POLICY: If *anything* doesn't work, put it on the blacklist.
+@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+ return 0;
+ }
+
+-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
++static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Fujitsu Siemens",
+diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
+index 5d42c24..4964b94 100644
+--- a/drivers/acpi/custom_method.c
++++ b/drivers/acpi/custom_method.c
+@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ struct acpi_table_header table;
+ acpi_status status;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
+index 6c47ae9..abfdd63 100644
+--- a/drivers/acpi/ec_sys.c
++++ b/drivers/acpi/ec_sys.c
+@@ -12,6 +12,7 @@
+ #include <linux/acpi.h>
+ #include <linux/debugfs.h>
+ #include <linux/module.h>
++#include <linux/uaccess.h>
+ #include "internal.h"
+
+ MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
+@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
+ * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
+ */
+ unsigned int size = EC_SPACE_SIZE;
+- u8 *data = (u8 *) buf;
++ u8 data;
+ loff_t init_off = *off;
+ int err = 0;
+
+@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
+ size = count;
+
+ while (size) {
+- err = ec_read(*off, &data[*off - init_off]);
++ err = ec_read(*off, &data);
+ if (err)
+ return err;
++ if (put_user(data, &buf[*off - init_off]))
++ return -EFAULT;
+ *off += 1;
+ size--;
+ }
+@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
+
+ unsigned int size = count;
+ loff_t init_off = *off;
+- u8 *data = (u8 *) buf;
+ int err = 0;
+
+ if (*off >= EC_SPACE_SIZE)
+@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
+ }
+
+ while (size) {
+- u8 byte_write = data[*off - init_off];
++ u8 byte_write;
++ if (get_user(byte_write, &buf[*off - init_off]))
++ return -EFAULT;
+ err = ec_write(*off, byte_write);
+ if (err)
+ return err;
+diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
+index 251c7b62..feab1d6 100644
+--- a/drivers/acpi/proc.c
++++ b/drivers/acpi/proc.c
+@@ -345,16 +345,13 @@ acpi_system_write_wakeup_device(struct file *file,
+ struct list_head *node, *next;
+ char strbuf[5];
+ char str[5] = "";
+- unsigned int len = count;
+
+- if (len > 4)
+- len = 4;
+- if (len < 0)
+- return -EFAULT;
++ if (count > 4)
++ count = 4;
+
+- if (copy_from_user(strbuf, buffer, len))
++ if (copy_from_user(strbuf, buffer, count))
+ return -EFAULT;
+- strbuf[len] = '\0';
++ strbuf[count] = '\0';
+ sscanf(strbuf, "%s", str);
+
+ mutex_lock(&acpi_device_lock);
+diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
+index ac28db3..0848b37 100644
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -474,7 +474,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
+ return 0;
+ #endif
+
+- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
++ BUG_ON(pr->id >= nr_cpu_ids);
+
+ /*
+ * Buggy BIOS check
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index de0791c..d6d4ea3 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1036,7 +1036,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+ {
+ int i, count = CPUIDLE_DRIVER_STATE_START;
+ struct acpi_processor_cx *cx;
+- struct cpuidle_state *state;
++ cpuidle_state_no_const *state;
+ struct cpuidle_driver *drv = &acpi_idle_driver;
+
+ if (!pr->flags.power_setup_done)
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index 240a244..bc6239e 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -420,11 +420,11 @@ static u32 num_counters;
+ static struct attribute **all_attrs;
+ static u32 acpi_gpe_count;
+
+-static struct attribute_group interrupt_stats_attr_group = {
++static attribute_group_no_const interrupt_stats_attr_group = {
+ .name = "interrupts",
+ };
+
+-static struct kobj_attribute *counter_attrs;
++static kobj_attribute_no_const *counter_attrs;
+
+ static void delete_gpe_attr_array(void)
+ {
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index de2802c..2260da9 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1211,7 +1211,7 @@ int ahci_kick_engine(struct ata_port *ap)
+ }
+ EXPORT_SYMBOL_GPL(ahci_kick_engine);
+
+-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
++static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
+ {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index a0a3987..d029614 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4746,7 +4746,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+ struct ata_port *ap;
+ unsigned int tag;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ ap = qc->ap;
+
+ qc->flags = 0;
+@@ -4762,7 +4762,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+ struct ata_port *ap;
+ struct ata_link *link;
+
+- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
++ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ ap = qc->ap;
+ link = qc->dev->link;
+@@ -5767,6 +5767,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+ return;
+
+ spin_lock(&lock);
++ pax_open_kernel();
+
+ for (cur = ops->inherits; cur; cur = cur->inherits) {
+ void **inherit = (void **)cur;
+@@ -5780,8 +5781,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+ if (IS_ERR(*pp))
+ *pp = NULL;
+
+- ops->inherits = NULL;
++ *(struct ata_port_operations **)&ops->inherits = NULL;
+
++ pax_close_kernel();
+ spin_unlock(&lock);
+ }
+
+diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
+index e8574bb..f9f6a72 100644
+--- a/drivers/ata/pata_arasan_cf.c
++++ b/drivers/ata/pata_arasan_cf.c
+@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
+ /* Handle platform specific quirks */
+ if (pdata->quirk) {
+ if (pdata->quirk & CF_BROKEN_PIO) {
+- ap->ops->set_piomode = NULL;
++ pax_open_kernel();
++ *(void **)&ap->ops->set_piomode = NULL;
++ pax_close_kernel();
+ ap->pio_mask = 0;
+ }
+ if (pdata->quirk & CF_BROKEN_MWDMA)
+diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
+index f9b983a..887b9d8 100644
+--- a/drivers/atm/adummy.c
++++ b/drivers/atm/adummy.c
+@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
+index f8f41e0..1f987dd 100644
+--- a/drivers/atm/ambassador.c
++++ b/drivers/atm/ambassador.c
+@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
+ PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the descriptor
+ kfree (tx_descr);
+@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
+ dump_skb ("<<<", vc, skb);
+
+ // VC layer stats
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsibility
+ atm_vcc->push (atm_vcc, skb);
+@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
+ } else {
+ PRINTK (KERN_INFO, "dropped over-size frame");
+ // should we count this?
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ }
+
+ } else {
+@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
+ }
+
+ if (check_area (skb->data, skb->len)) {
+- atomic_inc(&atm_vcc->stats->tx_err);
++ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
+ return -ENOMEM; // ?
+ }
+
+diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
+index b22d71c..d6e1049 100644
+--- a/drivers/atm/atmtcp.c
++++ b/drivers/atm/atmtcp.c
+@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ if (dev_data) return 0;
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOLINK;
+ }
+ size = skb->len+sizeof(struct atmtcp_hdr);
+@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ if (!new_skb) {
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOBUFS;
+ }
+ hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
+@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ return 0;
+ }
+
+@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
+ read_unlock(&vcc_sklist_lock);
+ if (!out_vcc) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ goto done;
+ }
+ skb_pull(skb,sizeof(struct atmtcp_hdr));
+@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ __net_timestamp(new_skb);
+ skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
+ out_vcc->push(out_vcc,new_skb);
+- atomic_inc(&vcc->stats->tx);
+- atomic_inc(&out_vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->tx);
++ atomic_inc_unchecked(&out_vcc->stats->rx);
+ done:
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb(skb);
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index 956e9ac..133516d 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
+ DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
+ vcc->dev->number);
+ length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ length = ATM_CELL_SIZE-1; /* no HEC */
+@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ size);
+ }
+ eff = length = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ else {
+ size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
+@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
+ "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
+ vcc->dev->number,vcc->vci,length,size << 2,descr);
+ length = eff = 0;
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+ skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
+@@ -771,7 +771,7 @@ rx_dequeued++;
+ vcc->push(vcc,skb);
+ pushed++;
+ }
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ wake_up(&eni_dev->rx_wait);
+ }
+@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
+ PCI_DMA_TODEVICE);
+ if (vcc->pop) vcc->pop(vcc,skb);
+ else dev_kfree_skb_irq(skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&eni_dev->tx_wait);
+ dma_complete++;
+ }
+@@ -1569,7 +1569,7 @@ tx_complete++;
+ /*--------------------------------- entries ---------------------------------*/
+
+
+-static const char *media_name[] __devinitdata = {
++static const char *media_name[] __devinitconst = {
+ "MMF", "SMF", "MMF", "03?", /* 0- 3 */
+ "UTP", "05?", "06?", "07?", /* 4- 7 */
+ "TAXI","09?", "10?", "11?", /* 8-11 */
+diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
+index 5072f8a..fa52520d 100644
+--- a/drivers/atm/firestream.c
++++ b/drivers/atm/firestream.c
+@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
+ }
+ }
+
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ fs_dprintk (FS_DEBUG_TXMEM, "i");
+ fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
+@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
+ #endif
+ skb_put (skb, qe->p1 & 0xffff);
+ ATM_SKB(skb)->vcc = atm_vcc;
+- atomic_inc(&atm_vcc->stats->rx);
++ atomic_inc_unchecked(&atm_vcc->stats->rx);
+ __net_timestamp(skb);
+ fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
+ atm_vcc->push (atm_vcc, skb);
+@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
+ kfree (pe);
+ }
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ case 0x1f: /* Reassembly abort: no buffers. */
+ /* Silently increment error counter. */
+ if (atm_vcc)
+- atomic_inc(&atm_vcc->stats->rx_drop);
++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
+ break;
+ default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
+ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
+diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
+index 361f5ae..7fc552d 100644
+--- a/drivers/atm/fore200e.c
++++ b/drivers/atm/fore200e.c
+@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
+ #endif
+ /* check error condition */
+ if (*entry->status & STATUS_ERROR)
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ else
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+ }
+
+@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
+ if (skb == NULL) {
+ DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
+
+ dev_kfree_skb_any(skb);
+
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return -ENOMEM;
+ }
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
+
+@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
+ DPRINTK(2, "damaged PDU on %d.%d.%d\n",
+ fore200e->atm_dev->number,
+ entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ }
+ }
+
+@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ goto retry_here;
+ }
+
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+
+ fore200e->tx_sat++;
+ DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
+diff --git a/drivers/atm/he.c b/drivers/atm/he.c
+index 9a51df4..f3bb5f8 100644
+--- a/drivers/atm/he.c
++++ b/drivers/atm/he.c
+@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+
+ if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+ hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto return_host_buffers;
+ }
+
+@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+ RBRQ_LEN_ERR(he_dev->rbrq_head)
+ ? "LEN_ERR" : "",
+ vcc->vpi, vcc->vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto return_host_buffers;
+ }
+
+@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+ vcc->push(vcc, skb);
+ spin_lock(&he_dev->global_lock);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return_host_buffers:
+ ++pdus_assembled;
+@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
+ tpd->vcc->pop(tpd->vcc, tpd->skb);
+ else
+ dev_kfree_skb_any(tpd->skb);
+- atomic_inc(&tpd->vcc->stats->tx_err);
++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
+ }
+ pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
+ return;
+@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+
+@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -EINVAL;
+ }
+ #endif
+@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_any(skb);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+ return -ENOMEM;
+ }
+@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ __enqueue_tpd(he_dev, tpd, cid);
+ spin_unlock_irqrestore(&he_dev->global_lock, flags);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
+index b812103..e391a49 100644
+--- a/drivers/atm/horizon.c
++++ b/drivers/atm/horizon.c
+@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
+ {
+ struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
+ // VC layer stats
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ __net_timestamp(skb);
+ // end of our responsibility
+ vcc->push (vcc, skb);
+@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
+ dev->tx_iovec = NULL;
+
+ // VC layer stats
+- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
+
+ // free the skb
+ hrz_kfree_skb (skb);
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index b0e75ce..035bf7e 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
+ else
+ dev_kfree_skb(skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ }
+
+ atomic_dec(&scq->used);
+@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for aal0.\n",
+ card->name);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
+ card->name);
+- atomic_add(i - 1, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
+ dev_kfree_skb(sb);
+ break;
+ }
+@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ cell += ATM_CELL_PAYLOAD;
+ }
+@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ "(CDC: %08x)\n",
+ card->name, len, rpp->len, readl(SAR_REG_CDC));
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (stat & SAR_RSQE_CRC) {
+ RXPRINTK("%s: AAL5 CRC error.\n", card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (skb_queue_len(&rpp->queue) > 1) {
+@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ RXPRINTK("%s: Can't alloc RX skb.\n",
+ card->name);
+ recycle_rx_pool_skb(card, rpp);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+ if (!atm_charge(vcc, skb->truesize)) {
+@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ return;
+ }
+@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ __net_timestamp(skb);
+
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ if (skb->truesize > SAR_FB_SIZE_3)
+ add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
+ if (vcc->qos.aal != ATM_AAL0) {
+ RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
+ card->name, vpi, vci);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto drop;
+ }
+
+ if ((sb = dev_alloc_skb(64)) == NULL) {
+ printk("%s: Can't allocate buffers for AAL0.\n",
+ card->name);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto drop;
+ }
+
+@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+
+ drop:
+ skb_pull(queue, 64);
+@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+
+ if (vc == NULL) {
+ printk("%s: NULL connection in send().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (!test_bit(VCF_TX, &vc->flags)) {
+ printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+ break;
+ default:
+ printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("%s: No scatter-gather yet.\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
+
+ err = queue_skb(card, vc, skb, oam);
+ if (err) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb(skb);
+ return err;
+ }
+@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
+ skb = dev_alloc_skb(64);
+ if (!skb) {
+ printk("%s: Out of memory in send_oam().\n", card->name);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ return -ENOMEM;
+ }
+ atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 3d0c2b0..45441fa 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
+ status = (u_short) (buf_desc_ptr->desc_mode);
+ if (status & (RX_CER | RX_PTE | RX_OFL))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("IA: bad packet, dropping it");)
+ if (status & RX_CER) {
+ IF_ERR(printk(" cause: packet CRC error\n");)
+@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
+ len = dma_addr - buf_addr;
+ if (len > iadev->rx_buf_sz) {
+ printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out_free_desc;
+ }
+
+@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+ ia_vcc = INPH_IA_VCC(vcc);
+ if (ia_vcc == NULL)
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ dev_kfree_skb_any(skb);
+ atm_return(vcc, atm_guess_pdu2truesize(len));
+ goto INCR_DLE;
+@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+ if ((length > iadev->rx_buf_sz) || (length >
+ (skb->len - sizeof(struct cpcs_trailer))))
+ {
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
+ length, skb->len);)
+ dev_kfree_skb_any(skb);
+@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
+
+ IF_RX(printk("rx_dle_intr: skb push");)
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ iadev->rx_pkt_cnt++;
+ }
+ INCR_DLE:
+@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+ {
+ struct k_sonet_stats *stats;
+ stats = &PRIV(_ia_dev[board])->sonet_stats;
+- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
+- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
+- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
+- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
+- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
+- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
+- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
+- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
+- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
+ }
+ ia_cmds.status = 0;
+ break;
+@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
+ if ((desc == 0) || (desc > iadev->num_tx_desc))
+ {
+ IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
+ ATM_DESC(skb) = vcc->vci;
+ skb_queue_tail(&iadev->tx_dma_q, skb);
+
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ iadev->tx_pkt_cnt++;
+ /* Increment transaction counter */
+ writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
+
+ #if 0
+ /* add flow control logic */
+- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
+ if (iavcc->vc_desc_cnt > 10) {
+ vcc->tx_quota = vcc->tx_quota * 3 / 4;
+ printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
+diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
+index f5569699..0da15eb 100644
+--- a/drivers/atm/lanai.c
++++ b/drivers/atm/lanai.c
+@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
+ vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
+ lanai_endtx(lanai, lvcc);
+ lanai_free_skb(lvcc->tx.atmvcc, skb);
+- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
+ }
+
+ /* Try to fill the buffer - don't call unless there is backlog */
+@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
+ ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
+ __net_timestamp(skb);
+ lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
+ out:
+ lvcc->rx.buf.ptr = end;
+ cardvcc_write(lvcc, endptr, vcc_rxreadptr);
+@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+ DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
+ "vcc %d\n", lanai->number, (unsigned int) s, vci);
+ lanai->stats.service_rxnotaal5++;
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ return 0;
+ }
+ if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
+@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+ int bytes;
+ read_unlock(&vcc_sklist_lock);
+ DPRINTK("got trashed rx pdu on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_trash++;
+ bytes = (SERVICE_GET_END(s) * 16) -
+ (((unsigned long) lvcc->rx.buf.ptr) -
+@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+ }
+ if (s & SERVICE_STREAM) {
+ read_unlock(&vcc_sklist_lock);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_stream++;
+ printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
+ "PDU on VCI %d!\n", lanai->number, vci);
+@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
+ return 0;
+ }
+ DPRINTK("got rx crc error on vci %d\n", vci);
+- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
+ lvcc->stats.x.aal5.service_rxcrc++;
+ lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
+ cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
+diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
+index 1c70c45..300718d 100644
+--- a/drivers/atm/nicstar.c
++++ b/drivers/atm/nicstar.c
+@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+ printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ if (!vc->tx) {
+ printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+ printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+ card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("nicstar%d: No scatter-gather yet.\n", card->index);
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ }
+
+ if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+- atomic_inc(&vcc->stats->tx_err);
++ atomic_inc_unchecked(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ return 0;
+ }
+@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ printk
+ ("nicstar%d: Can't allocate buffers for aal0.\n",
+ card->index);
+- atomic_add(i, &vcc->stats->rx_drop);
++ atomic_add_unchecked(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK
+ ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+ card->index);
+- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
+ dev_kfree_skb_any(sb);
+ break;
+ }
+@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ cell += ATM_CELL_PAYLOAD;
+ }
+
+@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ if (iovb == NULL) {
+ printk("nicstar%d: Out of iovec buffers.\n",
+ card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ small or large buffer itself. */
+ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+ printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_MAX_IOVECS);
+ NS_PRV_IOVCNT(iovb) = 0;
+@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ ("nicstar%d: Expected a small buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_rx_buf(card, skb);
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ ("nicstar%d: Expected a large buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ printk(" - PDU size mismatch.\n");
+ else
+ printk(".\n");
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ /* skb points to a small buffer */
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ skb_put(skb, len);
+ dequeue_sm_buf(card, skb);
+@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
+ struct sk_buff *sb;
+@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ if (len <= NS_SMBUFSIZE) {
+ if (!atm_charge(vcc, sb->truesize)) {
+ push_rxbufs(card, sb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ skb_put(sb, len);
+ dequeue_sm_buf(card, sb);
+@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, skb);
+@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ dequeue_lg_buf(card, skb);
+ #ifdef NS_USE_DESTRUCTORS
+@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, sb);
+@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ printk
+ ("nicstar%d: Out of huge buffers.\n",
+ card->index);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ recycle_iovec_rx_bufs(card,
+ (struct iovec *)
+ iovb->data,
+@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ card->hbpool.count++;
+ } else
+ dev_kfree_skb_any(hb);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ } else {
+ /* Copy the small buffer to the huge buffer */
+ sb = (struct sk_buff *)iov->iov_base;
+@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+ #endif /* NS_USE_DESTRUCTORS */
+ __net_timestamp(hb);
+ vcc->push(vcc, hb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ }
+
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index adfce9f..35501e1 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
+ }
+ atm_charge(vcc, skb->truesize);
+ vcc->push(vcc, skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ break;
+
+ case PKT_STATUS:
+@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
+ vcc = SKB_CB(oldskb)->vcc;
+
+ if (vcc) {
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ solos_pop(vcc, oldskb);
+ } else
+ dev_kfree_skb_irq(oldskb);
+diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
+index 90f1ccc..04c4a1e 100644
+--- a/drivers/atm/suni.c
++++ b/drivers/atm/suni.c
+@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
+
+
+ #define ADD_LIMITED(s,v) \
+- atomic_add((v),&stats->s); \
+- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
++ atomic_add_unchecked((v),&stats->s); \
++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
+
+
+ static void suni_hz(unsigned long from_timer)
+diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
+index 5120a96..e2572bd 100644
+--- a/drivers/atm/uPD98402.c
++++ b/drivers/atm/uPD98402.c
+@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
+ struct sonet_stats tmp;
+ int error = 0;
+
+- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+ sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
+ if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
+ if (zero && !error) {
+@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
+
+
+ #define ADD_LIMITED(s,v) \
+- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
+- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
+- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+
+
+ static void stat_event(struct atm_dev *dev)
+@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
+ if (reason & uPD98402_INT_PFM) stat_event(dev);
+ if (reason & uPD98402_INT_PCO) {
+ (void) GET(PCOCR); /* clear interrupt cause */
+- atomic_add(GET(HECCT),
++ atomic_add_unchecked(GET(HECCT),
+ &PRIV(dev)->sonet_stats.uncorr_hcs);
+ }
+ if ((reason & uPD98402_INT_RFO) &&
+@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
+ PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
+ uPD98402_INT_LOS),PIMR); /* enable them */
+ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
+- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
+- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
+ return 0;
+ }
+
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index d889f56..17eb71e 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
+ }
+ if (!size) {
+ dev_kfree_skb_irq(skb);
+- if (vcc) atomic_inc(&vcc->stats->rx_err);
++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
+ continue;
+ }
+ if (!atm_charge(vcc,skb->truesize)) {
+@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
+ skb->len = size;
+ ATM_SKB(skb)->vcc = vcc;
+ vcc->push(vcc,skb);
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ }
+ zout(pos & 0xffff,MTA(mbx));
+ #if 0 /* probably a stupid idea */
+@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
+ skb_queue_head(&zatm_vcc->backlog,skb);
+ break;
+ }
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+ wake_up(&zatm_vcc->tx_wait);
+ }
+
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index 8fc200b..32763bb 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
+ ic->classdev.parent = get_device(dev);
+ ic->classdev.class = cont->class;
+ cont->class->dev_release = attribute_container_release;
+- dev_set_name(&ic->classdev, dev_name(dev));
++ dev_set_name(&ic->classdev, "%s", dev_name(dev));
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index a4760e0..ea524a0 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
+ if (!thread)
+ return 0;
+
+- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
++ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
+ if (err)
+ printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+ else
+@@ -393,11 +393,11 @@ static int devtmpfsd(void *p)
+ *err = sys_unshare(CLONE_NEWNS);
+ if (*err)
+ goto out;
+- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
++ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
+ if (*err)
+ goto out;
+- sys_chdir("/.."); /* will traverse into overmounted root */
+- sys_chroot(".");
++ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
++ sys_chroot((char __force_user *)".");
+ complete(&setup_done);
+ while (1) {
+ spin_lock(&req_lock);
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 5693ece..e39a621 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -587,18 +587,16 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
+ {
+ int n;
+
+- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
+- if (n > 0 && PAGE_SIZE > n + 1) {
+- *(buf + n++) = '\n';
+- *(buf + n++) = '\0';
+- }
++ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
++ buf[n++] = '\n';
++ buf[n] = '\0';
+ return n;
+ }
+
+ struct node_attr {
+ struct sysdev_class_attribute attr;
+ enum node_states state;
+-};
++} __do_const;
+
+ static ssize_t show_node_state(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index adf41be0..b044daf 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -184,7 +184,7 @@ static ssize_t rtpm_status_show(struct device *dev,
+ return -EIO;
+ }
+ }
+- return sprintf(buf, p);
++ return sprintf(buf, "%s", p);
+ }
+
+ static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index caf995f..6f76697 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -30,14 +30,14 @@ bool events_check_enabled;
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+-static atomic_t combined_event_count = ATOMIC_INIT(0);
++static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
+
+ #define IN_PROGRESS_BITS (sizeof(int) * 4)
+ #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+ static void split_counters(unsigned int *cnt, unsigned int *inpr)
+ {
+- unsigned int comb = atomic_read(&combined_event_count);
++ unsigned int comb = atomic_read_unchecked(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
+ ws->last_time = ktime_get();
+
+ /* Increment the counter of events in progress. */
+- atomic_inc(&combined_event_count);
++ atomic_inc_unchecked(&combined_event_count);
+ }
+
+ /**
+@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
+ * Increment the counter of registered wakeup events and decrement the
+ * couter of wakeup events in progress simultaneously.
+ */
+- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
++ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
+ }
+
+ /**
+diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
+index e8d11b6..7b1b36f 100644
+--- a/drivers/base/syscore.c
++++ b/drivers/base/syscore.c
+@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
+ void register_syscore_ops(struct syscore_ops *ops)
+ {
+ mutex_lock(&syscore_ops_lock);
+- list_add_tail(&ops->node, &syscore_ops_list);
++ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
+ mutex_unlock(&syscore_ops_lock);
+ }
+ EXPORT_SYMBOL_GPL(register_syscore_ops);
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
+ void unregister_syscore_ops(struct syscore_ops *ops)
+ {
+ mutex_lock(&syscore_ops_lock);
+- list_del(&ops->node);
++ pax_list_del((struct list_head *)&ops->node);
+ mutex_unlock(&syscore_ops_lock);
+ }
+ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index d7ad865..61ddf2c 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, CommandList_struct, list);
+ /* can't do anything if fifo is full */
+- if ((h->access.fifo_full(h))) {
++ if ((h->access->fifo_full(h))) {
+ dev_warn(&h->pdev->dev, "fifo full\n");
+ break;
+ }
+@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+- h->access.submit_command(h, c);
++ h->access->submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ(&h->cmpQ, c);
+@@ -3444,17 +3444,17 @@ startio:
+
+ static inline unsigned long get_next_completion(ctlr_info_t *h)
+ {
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+ }
+
+ static inline int interrupt_pending(ctlr_info_t *h)
+ {
+- return h->access.intr_pending(h);
++ return h->access->intr_pending(h);
+ }
+
+ static inline long interrupt_not_for_us(ctlr_info_t *h)
+ {
+- return ((h->access.intr_pending(h) == 0) ||
++ return ((h->access->intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
+ }
+
+@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+@@ -4045,7 +4045,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+ trans_support & CFGTBL_Trans_use_short_tags);
+
+ /* Change the access methods to the performant access methods */
+- h->access = SA5_performant_access;
++ h->access = &SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+
+ return;
+@@ -4317,7 +4317,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
+ if (prod_index < 0)
+ return -ENODEV;
+ h->product_name = products[prod_index].product_name;
+- h->access = *(products[prod_index].access);
++ h->access = products[prod_index].access;
+
+ if (cciss_board_disabled(h)) {
+ dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+@@ -5042,7 +5042,7 @@ reinit_after_soft_reset:
+ }
+
+ /* make sure the board interrupts are off */
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
+ if (rc)
+ goto clean2;
+@@ -5094,7 +5094,7 @@ reinit_after_soft_reset:
+ * fake ones to scoop up any residual completions.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ spin_unlock_irqrestore(&h->lock, flags);
+ free_irq(h->intr[h->intr_mode], h);
+ rc = cciss_request_irq(h, cciss_msix_discard_completions,
+@@ -5114,9 +5114,9 @@ reinit_after_soft_reset:
+ dev_info(&h->pdev->dev, "Board READY.\n");
+ dev_info(&h->pdev->dev,
+ "Waiting for stale completions to drain.\n");
+- h->access.set_intr_mask(h, CCISS_INTR_ON);
++ h->access->set_intr_mask(h, CCISS_INTR_ON);
+ msleep(10000);
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+
+ rc = controller_reset_failed(h->cfgtable);
+ if (rc)
+@@ -5139,7 +5139,7 @@ reinit_after_soft_reset:
+ cciss_scsi_setup(h);
+
+ /* Turn the interrupts on so we can service requests */
+- h->access.set_intr_mask(h, CCISS_INTR_ON);
++ h->access->set_intr_mask(h, CCISS_INTR_ON);
+
+ /* Get the firmware version */
+ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+@@ -5212,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
+ kfree(flush_buf);
+ if (return_code != IO_OK)
+ dev_warn(&h->pdev->dev, "Error flushing cache\n");
+- h->access.set_intr_mask(h, CCISS_INTR_OFF);
++ h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ free_irq(h->intr[h->intr_mode], h);
+ }
+
+diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
+index 7fda30e..eb5dfe0 100644
+--- a/drivers/block/cciss.h
++++ b/drivers/block/cciss.h
+@@ -101,7 +101,7 @@ struct ctlr_info
+ /* information about each logical volume */
+ drive_info_struct *drv[CISS_MAX_LUN];
+
+- struct access_method access;
++ struct access_method *access;
+
+ /* queue and queue Info */
+ struct list_head reqQ;
+diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
+index 504bc16..e13b631 100644
+--- a/drivers/block/cpqarray.c
++++ b/drivers/block/cpqarray.c
+@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
+ if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
+ goto Enomem4;
+ }
+- hba[i]->access.set_intr_mask(hba[i], 0);
++ hba[i]->access->set_intr_mask(hba[i], 0);
+ if (request_irq(hba[i]->intr, do_ida_intr,
+ IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
+ {
+@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
+ add_timer(&hba[i]->timer);
+
+ /* Enable IRQ now that spinlock and rate limit timer are set up */
+- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
++ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
+
+ for(j=0; j<NWD; j++) {
+ struct gendisk *disk = ida_gendisk[i][j];
+@@ -694,7 +694,7 @@ DBGINFO(
+ for(i=0; i<NR_PRODUCTS; i++) {
+ if (board_id == products[i].board_id) {
+ c->product_name = products[i].product_name;
+- c->access = *(products[i].access);
++ c->access = products[i].access;
+ break;
+ }
+ }
+@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
+ hba[ctlr]->intr = intr;
+ sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
+ hba[ctlr]->product_name = products[j].product_name;
+- hba[ctlr]->access = *(products[j].access);
++ hba[ctlr]->access = products[j].access;
+ hba[ctlr]->ctlr = ctlr;
+ hba[ctlr]->board_id = board_id;
+ hba[ctlr]->pci_dev = NULL; /* not PCI */
+@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
+
+ while((c = h->reqQ) != NULL) {
+ /* Can't do anything if we're busy */
+- if (h->access.fifo_full(h) == 0)
++ if (h->access->fifo_full(h) == 0)
+ return;
+
+ /* Get the first entry from the request Q */
+@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
+ h->Qdepth--;
+
+ /* Tell the controller to do our bidding */
+- h->access.submit_command(h, c);
++ h->access->submit_command(h, c);
+
+ /* Get onto the completion Q */
+ addQ(&h->cmpQ, c);
+@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
+ unsigned long flags;
+ __u32 a,a1;
+
+- istat = h->access.intr_pending(h);
++ istat = h->access->intr_pending(h);
+ /* Is this interrupt for us? */
+ if (istat == 0)
+ return IRQ_NONE;
+@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
+ */
+ spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
+ if (istat & FIFO_NOT_EMPTY) {
+- while((a = h->access.command_completed(h))) {
++ while((a = h->access->command_completed(h))) {
+ a1 = a; a &= ~3;
+ if ((c = h->cmpQ) == NULL)
+ {
+@@ -1450,11 +1450,11 @@ static int sendcmd(
+ /*
+ * Disable interrupt
+ */
+- info_p->access.set_intr_mask(info_p, 0);
++ info_p->access->set_intr_mask(info_p, 0);
+ /* Make sure there is room in the command FIFO */
+ /* Actually it should be completely empty at this time. */
+ for (i = 200000; i > 0; i--) {
+- temp = info_p->access.fifo_full(info_p);
++ temp = info_p->access->fifo_full(info_p);
+ if (temp != 0) {
+ break;
+ }
+@@ -1467,7 +1467,7 @@ DBG(
+ /*
+ * Send the cmd
+ */
+- info_p->access.submit_command(info_p, c);
++ info_p->access->submit_command(info_p, c);
+ complete = pollcomplete(ctlr);
+
+ pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
+@@ -1550,9 +1550,9 @@ static int revalidate_allvol(ctlr_info_t *host)
+ * we check the new geometry. Then turn interrupts back on when
+ * we're done.
+ */
+- host->access.set_intr_mask(host, 0);
++ host->access->set_intr_mask(host, 0);
+ getgeometry(ctlr);
+- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
++ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
+
+ for(i=0; i<NWD; i++) {
+ struct gendisk *disk = ida_gendisk[ctlr][i];
+@@ -1592,7 +1592,7 @@ static int pollcomplete(int ctlr)
+ /* Wait (up to 2 seconds) for a command to complete */
+
+ for (i = 200000; i > 0; i--) {
+- done = hba[ctlr]->access.command_completed(hba[ctlr]);
++ done = hba[ctlr]->access->command_completed(hba[ctlr]);
+ if (done == 0) {
+ udelay(10); /* a short fixed delay */
+ } else
+diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
+index be73e9d..7fbf140 100644
+--- a/drivers/block/cpqarray.h
++++ b/drivers/block/cpqarray.h
+@@ -99,7 +99,7 @@ struct ctlr_info {
+ drv_info_t drv[NWD];
+ struct proc_dir_entry *proc;
+
+- struct access_method access;
++ struct access_method *access;
+
+ cmdlist_t *reqQ;
+ cmdlist_t *cmpQ;
+diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
+index 9cf2035..bffca95 100644
+--- a/drivers/block/drbd/drbd_int.h
++++ b/drivers/block/drbd/drbd_int.h
+@@ -736,7 +736,7 @@ struct drbd_request;
+ struct drbd_epoch {
+ struct list_head list;
+ unsigned int barrier_nr;
+- atomic_t epoch_size; /* increased on every request added. */
++ atomic_unchecked_t epoch_size; /* increased on every request added. */
+ atomic_t active; /* increased on every req. added, and dec on every finished. */
+ unsigned long flags;
+ };
+@@ -1108,7 +1108,7 @@ struct drbd_conf {
+ void *int_dig_in;
+ void *int_dig_vv;
+ wait_queue_head_t seq_wait;
+- atomic_t packet_seq;
++ atomic_unchecked_t packet_seq;
+ unsigned int peer_seq;
+ spinlock_t peer_seq_lock;
+ unsigned int minor;
+@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
+
+ static inline void drbd_tcp_cork(struct socket *sock)
+ {
+- int __user val = 1;
++ int val = 1;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ static inline void drbd_tcp_uncork(struct socket *sock)
+ {
+- int __user val = 0;
++ int val = 0;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ static inline void drbd_tcp_nodelay(struct socket *sock)
+ {
+- int __user val = 1;
++ int val = 1;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ static inline void drbd_tcp_quickack(struct socket *sock)
+ {
+- int __user val = 2;
++ int val = 2;
+ (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
+- (char __user *)&val, sizeof(val));
++ (char __force_user *)&val, sizeof(val));
+ }
+
+ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 0358e55..bc33689 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
+ p.sector = sector;
+ p.block_id = block_id;
+ p.blksize = blksize;
+- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
++ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
+
+ if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
+ return false;
+@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
+ p.sector = cpu_to_be64(req->sector);
+ p.block_id = (unsigned long)req;
+ p.seq_num = cpu_to_be32(req->seq_num =
+- atomic_add_return(1, &mdev->packet_seq));
++ atomic_add_return_unchecked(1, &mdev->packet_seq));
+
+ dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
+
+@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
+ atomic_set(&mdev->unacked_cnt, 0);
+ atomic_set(&mdev->local_cnt, 0);
+ atomic_set(&mdev->net_cnt, 0);
+- atomic_set(&mdev->packet_seq, 0);
++ atomic_set_unchecked(&mdev->packet_seq, 0);
+ atomic_set(&mdev->pp_in_use, 0);
+ atomic_set(&mdev->pp_in_use_by_net, 0);
+ atomic_set(&mdev->rs_sect_in, 0);
+@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
+ mdev->receiver.t_state);
+
+ /* no need to lock it, I'm the only thread alive */
+- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
+- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
++ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
+ mdev->al_writ_cnt =
+ mdev->bm_writ_cnt =
+ mdev->read_cnt =
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index af2a250..0fdeb75 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
+ return;
+ }
+
+- if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
++ if (!capable(CAP_SYS_ADMIN)) {
+ retcode = ERR_PERM;
+ goto fail;
+ }
+@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
+ module_put(THIS_MODULE);
+ }
+
+-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
++static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
+
+ static unsigned short *
+ __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
+@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
+ cn_reply->ack = 0; // not used here.
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char*)tl - (char*)reply->tag_list);
+@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
+ cn_reply->id.idx = CN_IDX_DRBD;
+ cn_reply->id.val = CN_VAL_DRBD;
+
+- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
++ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
+ cn_reply->ack = 0; /* not used here. */
+ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
+ (int)((char *)tl - (char *)reply->tag_list);
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 13cbdd3..d374957 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -894,7 +894,7 @@ retry:
+ sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+ sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+
+- atomic_set(&mdev->packet_seq, 0);
++ atomic_set_unchecked(&mdev->packet_seq, 0);
+ mdev->peer_seq = 0;
+
+ drbd_thread_start(&mdev->asender);
+@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+ do {
+ next_epoch = NULL;
+
+- epoch_size = atomic_read(&epoch->epoch_size);
++ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
+
+ switch (ev & ~EV_CLEANUP) {
+ case EV_PUT:
+@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+ rv = FE_DESTROYED;
+ } else {
+ epoch->flags = 0;
+- atomic_set(&epoch->epoch_size, 0);
++ atomic_set_unchecked(&epoch->epoch_size, 0);
+ /* atomic_set(&epoch->active, 0); is already zero */
+ if (rv == FE_STILL_LIVE)
+ rv = FE_RECYCLED;
+@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ drbd_flush(mdev);
+
+- if (atomic_read(&mdev->current_epoch->epoch_size)) {
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
+ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+ if (epoch)
+ break;
+ }
+
+ epoch = mdev->current_epoch;
+- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
++ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
+
+ D_ASSERT(atomic_read(&epoch->active) == 0);
+ D_ASSERT(epoch->flags == 0);
+@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
+ }
+
+ epoch->flags = 0;
+- atomic_set(&epoch->epoch_size, 0);
++ atomic_set_unchecked(&epoch->epoch_size, 0);
+ atomic_set(&epoch->active, 0);
+
+ spin_lock(&mdev->epoch_lock);
+- if (atomic_read(&mdev->current_epoch->epoch_size)) {
++ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
+ list_add(&epoch->list, &mdev->current_epoch->list);
+ mdev->current_epoch = epoch;
+ mdev->epochs++;
+@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
+ spin_unlock(&mdev->peer_seq_lock);
+
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+- atomic_inc(&mdev->current_epoch->epoch_size);
++ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
+ return drbd_drain_block(mdev, data_size);
+ }
+
+@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
+
+ spin_lock(&mdev->epoch_lock);
+ e->epoch = mdev->current_epoch;
+- atomic_inc(&e->epoch->epoch_size);
++ atomic_inc_unchecked(&e->epoch->epoch_size);
+ atomic_inc(&e->epoch->active);
+ spin_unlock(&mdev->epoch_lock);
+
+@@ -3637,7 +3637,7 @@ struct data_cmd {
+ int expect_payload;
+ size_t pkt_size;
+ drbd_cmd_handler_f function;
+-};
++} __do_const;
+
+ static struct data_cmd drbd_cmd_handler[] = {
+ [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
+@@ -3884,7 +3884,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
+ D_ASSERT(list_empty(&mdev->done_ee));
+
+ /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+- atomic_set(&mdev->current_epoch->epoch_size, 0);
++ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
+ D_ASSERT(list_empty(&mdev->current_epoch->list));
+ }
+
+@@ -4492,7 +4492,7 @@ static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
+ struct asender_cmd {
+ size_t pkt_size;
+ int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
+-};
++} __do_const;
+
+ static struct asender_cmd *get_asender_cmd(int cmd)
+ {
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d659135..45fe633 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(get_ds());
+- bw = file->f_op->write(file, buf, len, &pos);
++ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
+ set_fs(old_fs);
+ if (likely(bw == len))
+ return 0;
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index a63b0a2..30228d1 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -83,7 +83,7 @@
+
+ #define MAX_SPEED 0xffff
+
+-#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
++#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1UL))
+
+ static DEFINE_MUTEX(pktcdvd_mutex);
+ static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
+index b5f83b4..2f49d18 100644
+--- a/drivers/bluetooth/btwilink.c
++++ b/drivers/bluetooth/btwilink.c
+@@ -301,7 +301,7 @@ static void ti_st_destruct(struct hci_dev *hdev)
+
+ static int bt_ti_probe(struct platform_device *pdev)
+ {
+- static struct ti_st *hst;
++ struct ti_st *hst;
+ struct hci_dev *hdev;
+ int err;
+
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 1331740..a691234 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -419,7 +419,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
+ ENSURE(reset, CDC_RESET);
+ ENSURE(generic_packet, CDC_GENERIC_PACKET);
+ cdi->mc_flags = 0;
+- cdo->n_minors = 0;
+ cdi->options = CDO_USE_FFLAGS;
+
+ if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
+@@ -439,8 +438,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
+ else
+ cdi->cdda_method = CDDA_OLD;
+
+- if (!cdo->generic_packet)
+- cdo->generic_packet = cdrom_dummy_generic_packet;
++ if (!cdo->generic_packet) {
++ pax_open_kernel();
++ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
++ pax_close_kernel();
++ }
+
+ cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
+ mutex_lock(&cdrom_mutex);
+@@ -461,7 +463,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
+ if (cdi->exit)
+ cdi->exit(cdi);
+
+- cdi->ops->n_minors--;
+ cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+ }
+
+@@ -2110,7 +2111,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ */
+ nr = nframes;
+ do {
+- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
++ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
+ if (cgc.buffer)
+ break;
+
+@@ -3432,7 +3433,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
+ struct cdrom_device_info *cdi;
+ int ret;
+
+- ret = scnprintf(info + *pos, max_size - *pos, header);
++ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
+ if (!ret)
+ return 1;
+
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index 3ceaf00..e3c3d38 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
+ .audio_ioctl = gdrom_audio_ioctl,
+ .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
+ CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
+- .n_minors = 1,
+ };
+
+ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 4364303..9adf4ee 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
+
+ config DEVKMEM
+ bool "/dev/kmem virtual device support"
+- default y
++ default n
++ depends on !GRKERNSEC_KMEM
+ help
+ Say Y here if you want to support the /dev/kmem device. The
+ /dev/kmem device is rarely used, but can be used for certain
+@@ -596,6 +597,7 @@ config DEVPORT
+ bool
+ depends on !M68K
+ depends on ISA || PCI
++ depends on !GRKERNSEC_KMEM
+ default y
+
+ source "drivers/s390/char/Kconfig"
+diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
+index a48e05b..6bac831 100644
+--- a/drivers/char/agp/compat_ioctl.c
++++ b/drivers/char/agp/compat_ioctl.c
+@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
+ return -ENOMEM;
+ }
+
+- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
++ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
+ sizeof(*usegment) * ureserve.seg_count)) {
+ kfree(usegment);
+ kfree(ksegment);
+diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
+index 2e04433..771f2cc 100644
+--- a/drivers/char/agp/frontend.c
++++ b/drivers/char/agp/frontend.c
+@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+ if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
+ return -EFAULT;
+
+- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
+ return -EFAULT;
+
+ client = agp_find_client_by_pid(reserve.pid);
+@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
+ if (segment == NULL)
+ return -ENOMEM;
+
+- if (copy_from_user(segment, (void __user *) reserve.seg_list,
++ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
+ sizeof(struct agp_segment) * reserve.seg_count)) {
+ kfree(segment);
+ return -EFAULT;
+diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
+index 095ab90..afad0a4 100644
+--- a/drivers/char/briq_panel.c
++++ b/drivers/char/briq_panel.c
+@@ -9,6 +9,7 @@
+ #include <linux/types.h>
+ #include <linux/errno.h>
+ #include <linux/tty.h>
++#include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/kernel.h>
+ #include <linux/wait.h>
+@@ -34,6 +35,7 @@ static int vfd_is_open;
+ static unsigned char vfd[40];
+ static int vfd_cursor;
+ static unsigned char ledpb, led;
++static DEFINE_MUTEX(vfd_mutex);
+
+ static void update_vfd(void)
+ {
+@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
+ if (!vfd_is_open)
+ return -EBUSY;
+
++ mutex_lock(&vfd_mutex);
+ for (;;) {
+ char c;
+ if (!indx)
+ break;
+- if (get_user(c, buf))
++ if (get_user(c, buf)) {
++ mutex_unlock(&vfd_mutex);
+ return -EFAULT;
++ }
+ if (esc) {
+ set_led(c);
+ esc = 0;
+@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
+ buf++;
+ }
+ update_vfd();
++ mutex_unlock(&vfd_mutex);
+
+ return len;
+ }
+diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
+index f773a9d..65cd683 100644
+--- a/drivers/char/genrtc.c
++++ b/drivers/char/genrtc.c
+@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
+ switch (cmd) {
+
+ case RTC_PLL_GET:
++ memset(&pll, 0, sizeof(pll));
+ if (get_rtc_pll(&pll))
+ return -EINVAL;
+ else
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 14d49e4..d331fd8 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -560,7 +560,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
+ }
+
+ static int
+-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
+ struct hpet_info *info)
+ {
+ struct hpet_timer __iomem *timer;
+diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
+index 86fe45c..c0ea948 100644
+--- a/drivers/char/hw_random/intel-rng.c
++++ b/drivers/char/hw_random/intel-rng.c
+@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
+
+ if (no_fwh_detect)
+ return -ENODEV;
+- printk(warning);
++ printk("%s", warning);
+ return -EBUSY;
+ }
+
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 58c0e63..46c16bf 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -415,7 +415,7 @@ struct ipmi_smi {
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
+
+- atomic_t stats[IPMI_NUM_STATS];
++ atomic_unchecked_t stats[IPMI_NUM_STATS];
+
+ /*
+ * run_to_completion duplicate of smb_info, smi_info
+@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
+
+
+ #define ipmi_inc_stat(intf, stat) \
+- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
+ #define ipmi_get_stat(intf, stat) \
+- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+ static int is_lan_addr(struct ipmi_addr *addr)
+ {
+@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+ INIT_LIST_HEAD(&intf->cmd_rcvrs);
+ init_waitqueue_head(&intf->waitq);
+ for (i = 0; i < IPMI_NUM_STATS; i++)
+- atomic_set(&intf->stats[i], 0);
++ atomic_set_unchecked(&intf->stats[i], 0);
+
+ intf->proc_dir = NULL;
+
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 9397ab4..d01bee1 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -277,7 +277,7 @@ struct smi_info {
+ unsigned char slave_addr;
+
+ /* Counters and things for the proc filesystem. */
+- atomic_t stats[SI_NUM_STATS];
++ atomic_unchecked_t stats[SI_NUM_STATS];
+
+ struct task_struct *thread;
+
+@@ -286,9 +286,9 @@ struct smi_info {
+ };
+
+ #define smi_inc_stat(smi, stat) \
+- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
+ #define smi_get_stat(smi, stat) \
+- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
+
+ #define SI_MAX_PARMS 4
+
+@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ atomic_set(&new_smi->req_events, 0);
+ new_smi->run_to_completion = 0;
+ for (i = 0; i < SI_NUM_STATS; i++)
+- atomic_set(&new_smi->stats[i], 0);
++ atomic_set_unchecked(&new_smi->stats[i], 0);
+
+ new_smi->interrupt_disabled = 1;
+ atomic_set(&new_smi->stop_operation, 0);
+diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
+index 1aeaaba..e018570 100644
+--- a/drivers/char/mbcs.c
++++ b/drivers/char/mbcs.c
+@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
+ return 0;
+ }
+
+-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
++static const struct cx_device_id __devinitconst mbcs_id_table[] = {
+ {
+ .part_num = MBCS_PART_NUM,
+ .mfg_num = MBCS_MFG_NUM,
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index 1451790..046b083 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -18,6 +18,7 @@
+ #include <linux/raw.h>
+ #include <linux/tty.h>
+ #include <linux/capability.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
+ #include <linux/highmem.h>
+@@ -35,6 +36,10 @@
+ # include <linux/efi.h>
+ #endif
+
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++extern const struct file_operations grsec_fops;
++#endif
++
+ static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+ {
+@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn)) {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_mem_readwrite(from, to);
++#else
+ printk(KERN_INFO
+ "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+ current->comm, from, to);
++#endif
+ return 0;
+ }
+ cursor += PAGE_SIZE;
+@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ }
+ return 1;
+ }
++#elif defined(CONFIG_GRKERNSEC_KMEM)
++static inline int range_is_allowed(unsigned long pfn, unsigned long size)
++{
++ return 0;
++}
+ #else
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+
+ while (count > 0) {
+ unsigned long remaining;
++ char *temp;
+
+ sz = size_inside_page(p, count);
+
+@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ if (!ptr)
+ return -EFAULT;
+
+- remaining = copy_to_user(buf, ptr, sz);
++#ifdef CONFIG_PAX_USERCOPY
++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
++ if (!temp) {
++ unxlate_dev_mem_ptr(p, ptr);
++ return -ENOMEM;
++ }
++ memcpy(temp, ptr, sz);
++#else
++ temp = ptr;
++#endif
++
++ remaining = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++ kfree(temp);
++#endif
++
+ unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
+@@ -376,7 +407,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
+ else
+ csize = count;
+
+- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
++ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
+ if (rc < 0)
+ return rc;
+ buf += csize;
+@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+ unsigned long p = *ppos;
+- ssize_t low_count, read, sz;
++ ssize_t low_count, read, sz, err = 0;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+- int err = 0;
+
+ read = 0;
+ if (p < (unsigned long) high_memory) {
+@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ }
+ #endif
+ while (low_count > 0) {
++ char *temp;
++
+ sz = size_inside_page(p, low_count);
+
+ /*
+@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ */
+ kbuf = xlate_dev_kmem_ptr((char *)p);
+
+- if (copy_to_user(buf, kbuf, sz))
++#ifdef CONFIG_PAX_USERCOPY
++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
++ if (!temp)
++ return -ENOMEM;
++ memcpy(temp, kbuf, sz);
++#else
++ temp = kbuf;
++#endif
++
++ err = copy_to_user(buf, temp, sz);
++
++#ifdef CONFIG_PAX_USERCOPY
++ kfree(temp);
++#endif
++
++ if (err)
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+@@ -815,6 +862,11 @@ static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
+ ssize_t ret = -EFAULT;
+ size_t len = iov_length(iv, count);
+
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (!capable(CAP_SYSLOG))
++ return -EPERM;
++#endif
++
+ line = kmalloc(len + 1, GFP_KERNEL);
+ if (line == NULL)
+ return -ENOMEM;
+@@ -867,6 +919,9 @@ static const struct memdev {
+ #ifdef CONFIG_CRASH_DUMP
+ [12] = { "oldmem", 0, &oldmem_fops, NULL },
+ #endif
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
++#endif
+ };
+
+ static int memory_open(struct inode *inode, struct file *filp)
+@@ -931,7 +986,7 @@ static int __init chr_dev_init(void)
+ if (!devlist[minor].name)
+ continue;
+ device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
+- NULL, devlist[minor].name);
++ NULL, "%s", devlist[minor].name);
+ }
+
+ return tty_init();
+diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
+index c689697..04e6d6a2 100644
+--- a/drivers/char/mwave/tp3780i.c
++++ b/drivers/char/mwave/tp3780i.c
+@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
+ PRINTK_2(TRACE_TP3780I,
+ "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
+
++ memset(pAbilities, 0, sizeof(*pAbilities));
+ /* fill out standard constant fields */
+ pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
+ pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
+diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
+index da3cfee..a5a6606 100644
+--- a/drivers/char/nvram.c
++++ b/drivers/char/nvram.c
+@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
+
+ spin_unlock_irq(&rtc_lock);
+
+- if (copy_to_user(buf, contents, tmp - contents))
++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
+ return -EFAULT;
+
+ *ppos = i;
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index c244f0e..fc574b2 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -255,10 +255,8 @@
+ #include <linux/fips.h>
+ #include <linux/ptrace.h>
+ #include <linux/kmemcheck.h>
+-
+-#ifdef CONFIG_GENERIC_HARDIRQS
+-# include <linux/irq.h>
+-#endif
++#include <linux/workqueue.h>
++#include <linux/irq.h>
+
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+@@ -266,129 +264,151 @@
+ #include <asm/irq_regs.h>
+ #include <asm/io.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/random.h>
++
+ /*
+ * Configuration information
+ */
+-#define INPUT_POOL_WORDS 128
+-#define OUTPUT_POOL_WORDS 32
+-#define SEC_XFER_SIZE 512
+-#define EXTRACT_SIZE 10
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define INPUT_POOL_SHIFT 14
++#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
++#define OUTPUT_POOL_SHIFT 12
++#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
++#else
++#define INPUT_POOL_SHIFT 12
++#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
++#define OUTPUT_POOL_SHIFT 10
++#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
++#endif
++#define SEC_XFER_SIZE 512
++#define EXTRACT_SIZE 10
+
+ #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+
+ /*
++ * To allow fractional bits to be tracked, the entropy_count field is
++ * denominated in units of 1/8th bits.
++ *
++ * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
++ * credit_entropy_bits() needs to be 64 bits wide.
++ */
++#define ENTROPY_SHIFT 3
++#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
++
++/*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random. Should be enough to do a significant reseed.
+ */
+-static int random_read_wakeup_thresh = 64;
++static int random_read_wakeup_bits = 64;
+
+ /*
+ * If the entropy count falls under this number of bits, then we
+ * should wake up processes which are selecting or polling on write
+ * access to /dev/random.
+ */
+-static int random_write_wakeup_thresh = 128;
++static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+
+ /*
+- * When the input pool goes over trickle_thresh, start dropping most
+- * samples to avoid wasting CPU time and reduce lock contention.
++ * The minimum number of seconds between urandom pool reseeding. We
++ * do this to limit the amount of entropy that can be drained from the
++ * input pool even if there are heavy demands on /dev/urandom.
+ */
+-
+-static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
+-
+-static DEFINE_PER_CPU(int, trickle_count);
++static int random_min_urandom_seed = 60;
+
+ /*
+- * A pool of size .poolwords is stirred with a primitive polynomial
+- * of degree .poolwords over GF(2). The taps for various sizes are
+- * defined below. They are chosen to be evenly spaced (minimum RMS
+- * distance from evenly spaced; the numbers in the comments are a
+- * scaled squared error sum) except for the last tap, which is 1 to
+- * get the twisting happening as fast as possible.
++ * Originally, we used a primitive polynomial of degree .poolwords
++ * over GF(2). The taps for various sizes are defined below. They
++ * were chosen to be evenly spaced except for the last tap, which is 1
++ * to get the twisting happening as fast as possible.
++ *
++ * For the purposes of better mixing, we use the CRC-32 polynomial as
++ * well to make a (modified) twisted Generalized Feedback Shift
++ * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
++ * generators. ACM Transactions on Modeling and Computer Simulation
++ * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
++ * GFSR generators II. ACM Transactions on Modeling and Computer
++ * Simulation 4:254-266)
++ *
++ * Thanks to Colin Plumb for suggesting this.
++ *
++ * The mixing operation is much less sensitive than the output hash,
++ * where we use SHA-1. All that we want of mixing operation is that
++ * it be a good non-cryptographic hash; i.e. it not produce collisions
++ * when fed "random" data of the sort we expect to see. As long as
++ * the pool state differs for different inputs, we have preserved the
++ * input entropy and done a good job. The fact that an intelligent
++ * attacker can construct inputs that will produce controlled
++ * alterations to the pool's state is not important because we don't
++ * consider such inputs to contribute any randomness. The only
++ * property we need with respect to them is that the attacker can't
++ * increase his/her knowledge of the pool's state. Since all
++ * additions are reversible (knowing the final state and the input,
++ * you can reconstruct the initial state), if an attacker has any
++ * uncertainty about the initial state, he/she can only shuffle that
++ * uncertainty about, but never cause any collisions (which would
++ * decrease the uncertainty).
++ *
++ * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
++ * Videau in their paper, "The Linux Pseudorandom Number Generator
++ * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
++ * paper, they point out that we are not using a true Twisted GFSR,
++ * since Matsumoto & Kurita used a trinomial feedback polynomial (that
++ * is, with only three taps, instead of the six that we are using).
++ * As a result, the resulting polynomial is neither primitive nor
++ * irreducible, and hence does not have a maximal period over
++ * GF(2**32). They suggest a slight change to the generator
++ * polynomial which improves the resulting TGFSR polynomial to be
++ * irreducible, which we have made here.
+ */
+ static struct poolinfo {
+- int poolwords;
++ int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
++#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+ int tap1, tap2, tap3, tap4, tap5;
+ } poolinfo_table[] = {
+- /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+- { 128, 103, 76, 51, 25, 1 },
+- /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+- { 32, 26, 20, 14, 7, 1 },
++#ifdef CONFIG_GRKERNSEC_RANDNET
++ /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
++ { S(512), 411, 308, 208, 104, 1 },
++ /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
++ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
++ { S(128), 104, 76, 51, 25, 1 },
++#else
++ /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
++ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
++ { S(128), 104, 76, 51, 25, 1 },
++ /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
++ /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
++ { S(32), 26, 19, 14, 7, 1 },
++#endif
+ #if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+- { 2048, 1638, 1231, 819, 411, 1 },
++ { S(2048), 1638, 1231, 819, 411, 1 },
+
+ /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
+- { 1024, 817, 615, 412, 204, 1 },
++ { S(1024), 817, 615, 412, 204, 1 },
+
+ /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
+- { 1024, 819, 616, 410, 207, 2 },
++ { S(1024), 819, 616, 410, 207, 2 },
+
+ /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
+- { 512, 411, 308, 208, 104, 1 },
++ { S(512), 411, 308, 208, 104, 1 },
+
+ /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
+- { 512, 409, 307, 206, 102, 2 },
++ { S(512), 409, 307, 206, 102, 2 },
+ /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
+- { 512, 409, 309, 205, 103, 2 },
++ { S(512), 409, 309, 205, 103, 2 },
+
+ /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
+- { 256, 205, 155, 101, 52, 1 },
++ { S(256), 205, 155, 101, 52, 1 },
+
+ /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
+- { 128, 103, 78, 51, 27, 2 },
++ { S(128), 103, 78, 51, 27, 2 },
+
+ /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
+- { 64, 52, 39, 26, 14, 1 },
++ { S(64), 52, 39, 26, 14, 1 },
+ #endif
+ };
+
+-#define POOLBITS poolwords*32
+-#define POOLBYTES poolwords*4
+-
+-/*
+- * For the purposes of better mixing, we use the CRC-32 polynomial as
+- * well to make a twisted Generalized Feedback Shift Reigster
+- *
+- * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
+- * Transactions on Modeling and Computer Simulation 2(3):179-194.
+- * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
+- * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
+- *
+- * Thanks to Colin Plumb for suggesting this.
+- *
+- * We have not analyzed the resultant polynomial to prove it primitive;
+- * in fact it almost certainly isn't. Nonetheless, the irreducible factors
+- * of a random large-degree polynomial over GF(2) are more than large enough
+- * that periodicity is not a concern.
+- *
+- * The input hash is much less sensitive than the output hash. All
+- * that we want of it is that it be a good non-cryptographic hash;
+- * i.e. it not produce collisions when fed "random" data of the sort
+- * we expect to see. As long as the pool state differs for different
+- * inputs, we have preserved the input entropy and done a good job.
+- * The fact that an intelligent attacker can construct inputs that
+- * will produce controlled alterations to the pool's state is not
+- * important because we don't consider such inputs to contribute any
+- * randomness. The only property we need with respect to them is that
+- * the attacker can't increase his/her knowledge of the pool's state.
+- * Since all additions are reversible (knowing the final state and the
+- * input, you can reconstruct the initial state), if an attacker has
+- * any uncertainty about the initial state, he/she can only shuffle
+- * that uncertainty about, but never cause any collisions (which would
+- * decrease the uncertainty).
+- *
+- * The chosen system lets the state of the pool be (essentially) the input
+- * modulo the generator polymnomial. Now, for random primitive polynomials,
+- * this is a universal class of hash functions, meaning that the chance
+- * of a collision is limited by the attacker's knowledge of the generator
+- * polynomail, so if it is chosen at random, an attacker can never force
+- * a collision. Here, we use a fixed polynomial, but we *can* assume that
+- * ###--> it is unknown to the processes generating the input entropy. <-###
+- * Because of this important property, this is a good, collision-resistant
+- * hash; hash collisions will occur no more often than chance.
+- */
+-
+ /*
+ * Static global variables
+ */
+@@ -396,21 +416,6 @@ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+ static struct fasync_struct *fasync;
+
+-#if 0
+-static int debug;
+-module_param(debug, bool, 0644);
+-#define DEBUG_ENT(fmt, arg...) do { \
+- if (debug) \
+- printk(KERN_DEBUG "random %04d %04d %04d: " \
+- fmt,\
+- input_pool.entropy_count,\
+- blocking_pool.entropy_count,\
+- nonblocking_pool.entropy_count,\
+- ## arg); } while (0)
+-#else
+-#define DEBUG_ENT(fmt, arg...) do {} while (0)
+-#endif
+-
+ /**********************************************************************
+ *
+ * OS independent entropy store. Here are the functions which handle
+@@ -421,22 +426,26 @@ module_param(debug, bool, 0644);
+ struct entropy_store;
+ struct entropy_store {
+ /* read-only data: */
+- struct poolinfo *poolinfo;
++ const struct poolinfo *poolinfo;
+ __u32 *pool;
+ const char *name;
+ struct entropy_store *pull;
+- int limit;
++ struct work_struct push_work;
+
+ /* read-write data: */
++ unsigned long last_pulled;
+ spinlock_t lock;
+- unsigned add_ptr;
+- unsigned input_rotate;
++ unsigned short add_ptr;
++ unsigned short input_rotate;
+ int entropy_count;
+ int entropy_total;
+ unsigned int initialized:1;
++ unsigned int limit:1;
++ unsigned int last_data_init:1;
+ __u8 last_data[EXTRACT_SIZE];
+ };
+
++static void push_to_pool(struct work_struct *work);
+ static __u32 input_pool_data[INPUT_POOL_WORDS];
+ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+ static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
+@@ -445,7 +454,7 @@ static struct entropy_store input_pool = {
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .limit = 1,
+- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+ .pool = input_pool_data
+ };
+
+@@ -454,16 +463,20 @@ static struct entropy_store blocking_pool = {
+ .name = "blocking",
+ .limit = 1,
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
+- .pool = blocking_pool_data
++ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
++ .pool = blocking_pool_data,
++ .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
++ push_to_pool),
+ };
+
+ static struct entropy_store nonblocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
+- .pool = nonblocking_pool_data
++ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
++ .pool = nonblocking_pool_data,
++ .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
++ push_to_pool),
+ };
+
+ static __u32 const twist_table[8] = {
+@@ -480,8 +493,8 @@ static __u32 const twist_table[8] = {
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+- int nbytes, __u8 out[64])
++static void _mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
+ {
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+@@ -501,7 +514,7 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+- w = rol32(*bytes++, input_rotate & 31);
++ w = rol32(*bytes++, input_rotate);
+ i = (i - 1) & wordmask;
+
+ /* XOR in the various taps */
+@@ -521,11 +534,11 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+ * rotation, so that successive passes spread the
+ * input bits across the pool evenly.
+ */
+- input_rotate += i ? 7 : 14;
++ input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
+ }
+
+- ACCESS_ONCE(r->input_rotate) = input_rotate;
+- ACCESS_ONCE(r->add_ptr) = i;
++ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
++ ACCESS_ONCE_RW(r->add_ptr) = i;
+ smp_wmb();
+
+ if (out)
+@@ -533,13 +546,21 @@ static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+ }
+
++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
++{
++ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
++ _mix_pool_bytes(r, in, nbytes, out);
++}
++
+ static void mix_pool_bytes(struct entropy_store *r, const void *in,
+- int nbytes, __u8 out[64])
++ int nbytes, __u8 out[64])
+ {
+ unsigned long flags;
+
++ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
+ spin_lock_irqsave(&r->lock, flags);
+- __mix_pool_bytes(r, in, nbytes, out);
++ _mix_pool_bytes(r, in, nbytes, out);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+@@ -556,58 +577,151 @@ struct fast_pool {
+ * collector. It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+-static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
++static void fast_mix(struct fast_pool *f, __u32 input[4])
+ {
+- const char *bytes = in;
+ __u32 w;
+- unsigned i = f->count;
+ unsigned input_rotate = f->rotate;
+
+- while (nbytes--) {
+- w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
+- f->pool[(i + 1) & 3];
+- f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
+- input_rotate += (i++ & 3) ? 7 : 14;
+- }
+- f->count = i;
++ w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
++ f->pool[0] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate = (input_rotate + 14) & 31;
++ w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
++ f->pool[1] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate = (input_rotate + 7) & 31;
++ w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
++ f->pool[2] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate = (input_rotate + 7) & 31;
++ w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
++ f->pool[3] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate = (input_rotate + 7) & 31;
++
+ f->rotate = input_rotate;
++ f->count++;
+ }
+
+ /*
+- * Credit (or debit) the entropy store with n bits of entropy
++ * Credit (or debit) the entropy store with n bits of entropy.
++ * Use credit_entropy_bits_safe() if the value comes from userspace
++ * or otherwise should be checked for extreme values.
+ */
+ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ {
+ int entropy_count, orig;
++ const int pool_size = r->poolinfo->poolfracbits;
++ int nfrac = nbits << ENTROPY_SHIFT;
+
+ if (!nbits)
+ return;
+
+- DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+ retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+- entropy_count += nbits;
++ if (nfrac < 0) {
++ /* Debit */
++ entropy_count += nfrac;
++ } else {
++ /*
++ * Credit: we have to account for the possibility of
++ * overwriting already present entropy. Even in the
++ * ideal case of pure Shannon entropy, new contributions
++ * approach the full value asymptotically:
++ *
++ * entropy <- entropy + (pool_size - entropy) *
++ * (1 - exp(-add_entropy/pool_size))
++ *
++ * For add_entropy <= pool_size/2 then
++ * (1 - exp(-add_entropy/pool_size)) >=
++ * (add_entropy/pool_size)*0.7869...
++ * so we can approximate the exponential with
++ * 3/4*add_entropy/pool_size and still be on the
++ * safe side by adding at most pool_size/2 at a time.
++ *
++ * The use of pool_size-2 in the while statement is to
++ * prevent rounding artifacts from making the loop
++ * arbitrarily long; this limits the loop to log2(pool_size)*2
++ * turns no matter how large nbits is.
++ */
++ int pnfrac = nfrac;
++ const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
++ /* The +2 corresponds to the /4 in the denominator */
++
++ do {
++ unsigned int anfrac = min(pnfrac, pool_size/2);
++ unsigned int add =
++ ((pool_size - entropy_count)*anfrac*3) >> s;
++
++ entropy_count += add;
++ pnfrac -= anfrac;
++ } while (unlikely(entropy_count < pool_size-2 && pnfrac));
++ }
++
+ if (entropy_count < 0) {
+- DEBUG_ENT("negative entropy/overflow\n");
++ pr_warn("random: negative entropy/overflow: pool %s count %d\n",
++ r->name, entropy_count);
++ WARN_ON(1);
+ entropy_count = 0;
+- } else if (entropy_count > r->poolinfo->POOLBITS)
+- entropy_count = r->poolinfo->POOLBITS;
++ } else if (entropy_count > pool_size)
++ entropy_count = pool_size;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+
+- if (!r->initialized && nbits > 0) {
+- r->entropy_total += nbits;
+- if (r->entropy_total > 128)
+- r->initialized = 1;
++ r->entropy_total += nbits;
++ if (!r->initialized && r->entropy_total > 128) {
++ r->initialized = 1;
++ r->entropy_total = 0;
++ if (r == &nonblocking_pool) {
++ prandom_reseed_late();
++ pr_notice("random: %s pool is initialized\n", r->name);
++ }
+ }
+
+- /* should we wake readers? */
+- if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+- wake_up_interruptible(&random_read_wait);
+- kill_fasync(&fasync, SIGIO, POLL_IN);
++ trace_credit_entropy_bits(r->name, nbits,
++ entropy_count >> ENTROPY_SHIFT,
++ r->entropy_total, _RET_IP_);
++
++ if (r == &input_pool) {
++ int entropy_bits = entropy_count >> ENTROPY_SHIFT;
++
++ /* should we wake readers? */
++ if (entropy_bits >= random_read_wakeup_bits) {
++ wake_up_interruptible(&random_read_wait);
++ kill_fasync(&fasync, SIGIO, POLL_IN);
++ }
++ /* If the input pool is getting full, send some
++ * entropy to the two output pools, flipping back and
++ * forth between them, until the output pools are 75%
++ * full.
++ */
++ if (entropy_bits > random_write_wakeup_bits &&
++ r->initialized &&
++ r->entropy_total >= 2*random_read_wakeup_bits) {
++ static struct entropy_store *last = &blocking_pool;
++ struct entropy_store *other = &blocking_pool;
++
++ if (last == &blocking_pool)
++ other = &nonblocking_pool;
++ if (other->entropy_count <=
++ 3 * other->poolinfo->poolfracbits / 4)
++ last = other;
++ if (last->entropy_count <=
++ 3 * last->poolinfo->poolfracbits / 4) {
++ schedule_work(&last->push_work);
++ r->entropy_total = 0;
++ }
++ }
+ }
+ }
+
++static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
++{
++ const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
++
++ /* Cap the value to avoid overflows */
++ nbits = min(nbits, nbits_max);
++ nbits = max(nbits, -nbits_max);
++
++ credit_entropy_bits(r, nbits);
++}
++
+ /*********************************************************************
+ *
+ * Entropy input management
+@@ -621,42 +735,7 @@ struct timer_rand_state {
+ unsigned dont_count_entropy:1;
+ };
+
+-#ifndef CONFIG_GENERIC_HARDIRQS
+-
+-static struct timer_rand_state *irq_timer_state[NR_IRQS];
+-
+-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+-{
+- return irq_timer_state[irq];
+-}
+-
+-static void set_timer_rand_state(unsigned int irq,
+- struct timer_rand_state *state)
+-{
+- irq_timer_state[irq] = state;
+-}
+-
+-#else
+-
+-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+-{
+- struct irq_desc *desc;
+-
+- desc = irq_to_desc(irq);
+-
+- return desc->timer_rand_state;
+-}
+-
+-static void set_timer_rand_state(unsigned int irq,
+- struct timer_rand_state *state)
+-{
+- struct irq_desc *desc;
+-
+- desc = irq_to_desc(irq);
+-
+- desc->timer_rand_state = state;
+-}
+-#endif
++#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+
+ /*
+ * Add device- or boot-specific data to the input and nonblocking
+@@ -669,15 +748,22 @@ static void set_timer_rand_state(unsigned int irq,
+ void add_device_randomness(const void *buf, unsigned int size)
+ {
+ unsigned long time = random_get_entropy() ^ jiffies;
++ unsigned long flags;
+
+- mix_pool_bytes(&input_pool, buf, size, NULL);
+- mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+- mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+- mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
++ trace_add_device_randomness(size, _RET_IP_);
++ spin_lock_irqsave(&input_pool.lock, flags);
++ _mix_pool_bytes(&input_pool, buf, size, NULL);
++ _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
++ spin_unlock_irqrestore(&input_pool.lock, flags);
++
++ spin_lock_irqsave(&nonblocking_pool.lock, flags);
++ _mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
++ _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
++ spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
+ }
+ EXPORT_SYMBOL(add_device_randomness);
+
+-static struct timer_rand_state input_timer_state;
++static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
+
+ /*
+ * This function adds entropy to the entropy "pool" by using timing
+@@ -691,6 +777,7 @@ static struct timer_rand_state input_timer_state;
+ */
+ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ {
++ struct entropy_store *r;
+ struct {
+ long jiffies;
+ unsigned cycles;
+@@ -699,15 +786,12 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ long delta, delta2, delta3;
+
+ preempt_disable();
+- /* if over the trickle threshold, use only 1 in 4096 samples */
+- if (input_pool.entropy_count > trickle_thresh &&
+- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+- goto out;
+
+ sample.jiffies = jiffies;
+ sample.cycles = random_get_entropy();
+ sample.num = num;
+- mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
++ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++ mix_pool_bytes(r, &sample, sizeof(sample), NULL);
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+@@ -741,10 +825,8 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ * Round down by 1 bit on general principles,
+ * and limit entropy entimate to 12 bits.
+ */
+- credit_entropy_bits(&input_pool,
+- min_t(int, fls(delta>>1), 11));
++ credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ }
+-out:
+ preempt_enable();
+ }
+
+@@ -757,10 +839,10 @@ void add_input_randomness(unsigned int type, unsigned int code,
+ if (value == last_value)
+ return;
+
+- DEBUG_ENT("input event\n");
+ last_value = value;
+ add_timer_randomness(&input_timer_state,
+ (type << 4) ^ code ^ (code >> 4) ^ value);
++ trace_add_input_randomness(ENTROPY_BITS(&input_pool));
+ }
+ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+@@ -772,20 +854,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+- __u32 input[4], cycles = random_get_entropy();
++ cycles_t cycles = random_get_entropy();
++ __u32 input[4], c_high, j_high;
++ __u64 ip;
+
+- input[0] = cycles ^ jiffies;
+- input[1] = irq;
+- if (regs) {
+- __u64 ip = instruction_pointer(regs);
+- input[2] = ip;
+- input[3] = ip >> 32;
+- }
++ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
++ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
++ input[0] = cycles ^ j_high ^ irq;
++ input[1] = now ^ c_high;
++ ip = regs ? instruction_pointer(regs) : _RET_IP_;
++ input[2] = ip;
++ input[3] = ip >> 32;
+
+- fast_mix(fast_pool, input, sizeof(input));
++ fast_mix(fast_pool, input);
+
+- if ((fast_pool->count & 1023) &&
+- !time_after(now, fast_pool->last + HZ))
++ if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
+ return;
+
+ fast_pool->last = now;
+@@ -814,10 +897,8 @@ void add_disk_randomness(struct gendisk *disk)
+ if (!disk || !disk->random)
+ return;
+ /* first major is 1, so we get >= 0x200 here */
+- DEBUG_ENT("disk event %d:%d\n",
+- MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
+-
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
++ trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
+ }
+ #endif
+
+@@ -835,97 +916,109 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ * from the primary pool to the secondary extraction pool. We make
+ * sure we pull enough for a 'catastrophic reseed'.
+ */
++static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
+ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
++ if (r->limit == 0 && random_min_urandom_seed) {
++ unsigned long now = jiffies;
+
+- if (r->pull && r->entropy_count < nbytes * 8 &&
+- r->entropy_count < r->poolinfo->POOLBITS) {
+- /* If we're limited, always leave two wakeup worth's BITS */
+- int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+- int bytes = nbytes;
+-
+- /* pull at least as many as BYTES as wakeup BITS */
+- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+- /* but never more than the buffer size */
+- bytes = min_t(int, bytes, sizeof(tmp));
+-
+- DEBUG_ENT("going to reseed %s with %d bits "
+- "(%d of %d requested)\n",
+- r->name, bytes * 8, nbytes * 8, r->entropy_count);
+-
+- bytes = extract_entropy(r->pull, tmp, bytes,
+- random_read_wakeup_thresh / 8, rsvd);
+- mix_pool_bytes(r, tmp, bytes, NULL);
+- credit_entropy_bits(r, bytes*8);
++ if (time_before(now,
++ r->last_pulled + random_min_urandom_seed * HZ))
++ return;
++ r->last_pulled = now;
+ }
++ if (r->pull &&
++ r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
++ r->entropy_count < r->poolinfo->poolfracbits)
++ _xfer_secondary_pool(r, nbytes);
++}
++
++static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
++{
++ __u32 tmp[OUTPUT_POOL_WORDS];
++ int bytes, min_bytes;
++
++ /* For /dev/random's pool, always leave two wakeups' worth */
++ int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
++
++ /* pull at least as much as a wakeup */
++ min_bytes = random_read_wakeup_bits / 8;
++ /* but never more than the buffer size */
++ bytes = min(sizeof(tmp), max_t(size_t, min_bytes, nbytes));
++
++ trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
++ ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
++ bytes = extract_entropy(r->pull, tmp, bytes, min_bytes, rsvd_bytes);
++ mix_pool_bytes(r, tmp, bytes, NULL);
++ credit_entropy_bits(r, bytes*8);
+ }
+
+ /*
+- * These functions extracts randomness from the "entropy pool", and
+- * returns it in a buffer.
+- *
+- * The min parameter specifies the minimum amount we can pull before
+- * failing to avoid races that defeat catastrophic reseeding while the
+- * reserved parameter indicates how much entropy we must leave in the
+- * pool after each pull to avoid starving other readers.
+- *
+- * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
++ * Used as a workqueue function so that when the input pool is getting
++ * full, we can "spill over" some entropy to the output pools. That
++ * way the output pools can store some of the excess entropy instead
++ * of letting it go to waste.
+ */
++static void push_to_pool(struct work_struct *work)
++{
++ struct entropy_store *r = container_of(work, struct entropy_store,
++ push_work);
++ BUG_ON(!r);
++ _xfer_secondary_pool(r, random_read_wakeup_bits/8);
++ trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
++ r->pull->entropy_count >> ENTROPY_SHIFT);
++}
+
++/*
++ * This function decides how many bytes to actually take from the
++ * given pool, and also debits the entropy count accordingly.
++ */
+ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ int reserved)
+ {
+- unsigned long flags;
++ int have_bytes;
++ int entropy_count, orig;
++ size_t ibytes;
+
+- /* Hold lock while accounting */
+- spin_lock_irqsave(&r->lock, flags);
+-
+- BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
+- DEBUG_ENT("trying to extract %d bits from %s\n",
+- nbytes * 8, r->name);
++ BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+
+ /* Can we pull enough? */
+- if (r->entropy_count / 8 < min + reserved) {
+- nbytes = 0;
+- } else {
+- int entropy_count, orig;
+ retry:
+- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+- /* If limited, never pull more than available */
+- if (r->limit && nbytes + reserved >= entropy_count / 8)
+- nbytes = entropy_count/8 - reserved;
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
++ have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
++ ibytes = nbytes;
++ /* If limited, never pull more than available */
++ if (r->limit)
++ ibytes = min_t(size_t, ibytes, max(0, have_bytes - reserved));
++ if (ibytes < min)
++ ibytes = 0;
++ entropy_count = max_t(int, 0,
++ entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
+
+- if (entropy_count / 8 >= nbytes + reserved) {
+- entropy_count -= nbytes*8;
+- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+- goto retry;
+- } else {
+- entropy_count = reserved;
+- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+- goto retry;
+- }
+-
+- if (entropy_count < random_write_wakeup_thresh) {
+- wake_up_interruptible(&random_write_wait);
+- kill_fasync(&fasync, SIGIO, POLL_OUT);
+- }
++ trace_debit_entropy(r->name, 8 * ibytes);
++ if (ibytes &&
++ (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
++ wake_up_interruptible(&random_write_wait);
++ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+
+- DEBUG_ENT("debiting %d entropy credits from %s%s\n",
+- nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
+-
+- spin_unlock_irqrestore(&r->lock, flags);
+-
+- return nbytes;
++ return ibytes;
+ }
+
++/*
++ * This function does the actual extraction for extract_entropy and
++ * extract_entropy_user.
++ *
++ * Note: we assume that .poolwords is a multiple of 16 words.
++ */
+ static void extract_buf(struct entropy_store *r, __u8 *out)
+ {
+ int i;
+ union {
+ __u32 w[5];
+- unsigned long l[LONGS(EXTRACT_SIZE)];
++ unsigned long l[LONGS(20)];
+ } hash;
+ __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
+@@ -938,6 +1031,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+
+ /*
++ * If we have an architectural hardware random number
++ * generator, mix that in, too.
++ */
++ for (i = 0; i < LONGS(20); i++) {
++ unsigned long v;
++ if (!arch_get_random_long(&v))
++ break;
++ hash.l[i] ^= v;
++ }
++
++ /*
+ * We mix the hash back into the pool to prevent backtracking
+ * attacks (where the attacker knows the state of the pool
+ * plus the current outputs, and attempts to find previous
+@@ -966,27 +1070,43 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ hash.w[1] ^= hash.w[4];
+ hash.w[2] ^= rol32(hash.w[2], 16);
+
+- /*
+- * If we have a architectural hardware random number
+- * generator, mix that in, too.
+- */
+- for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
+- unsigned long v;
+- if (!arch_get_random_long(&v))
+- break;
+- hash.l[i] ^= v;
+- }
+-
+ memcpy(out, &hash, EXTRACT_SIZE);
+ memset(&hash, 0, sizeof(hash));
+ }
+
++/*
++ * This function extracts randomness from the "entropy pool", and
++ * returns it in a buffer.
++ *
++ * The min parameter specifies the minimum amount we can pull before
++ * failing to avoid races that defeat catastrophic reseeding while the
++ * reserved parameter indicates how much entropy we must leave in the
++ * pool after each pull to avoid starving other readers.
++ */
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int reserved)
+ {
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
++ unsigned long flags;
+
++ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
++ if (fips_enabled) {
++ spin_lock_irqsave(&r->lock, flags);
++ if (!r->last_data_init) {
++ r->last_data_init = 1;
++ spin_unlock_irqrestore(&r->lock, flags);
++ trace_extract_entropy(r->name, EXTRACT_SIZE,
++ ENTROPY_BITS(r), _RET_IP_);
++ xfer_secondary_pool(r, EXTRACT_SIZE);
++ extract_buf(r, tmp);
++ spin_lock_irqsave(&r->lock, flags);
++ memcpy(r->last_data, tmp, EXTRACT_SIZE);
++ }
++ spin_unlock_irqrestore(&r->lock, flags);
++ }
++
++ trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+
+@@ -994,8 +1114,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ extract_buf(r, tmp);
+
+ if (fips_enabled) {
+- unsigned long flags;
+-
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+@@ -1015,12 +1133,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ return ret;
+ }
+
++/*
++ * This function extracts randomness from the "entropy pool", and
++ * returns it in a userspace buffer.
++ */
+ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ size_t nbytes)
+ {
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+
++ trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, 0, 0);
+
+@@ -1036,7 +1159,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -1055,11 +1178,18 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ /*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for key generation, seeding
+- * TCP sequence numbers, etc. It does not use the hw random number
+- * generator, if available; use get_random_bytes_arch() for that.
++ * TCP sequence numbers, etc. It does not rely on the hardware random
++ * number generator. For random bytes direct from the hardware RNG
++ * (when available), use get_random_bytes_arch().
+ */
+ void get_random_bytes(void *buf, int nbytes)
+ {
++ if (unlikely(nonblocking_pool.initialized == 0))
++ printk(KERN_NOTICE "random: %pF get_random_bytes called "
++ "with %d bits of entropy available\n",
++ (void *) _RET_IP_,
++ nonblocking_pool.entropy_total);
++ trace_get_random_bytes(nbytes, _RET_IP_);
+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+ }
+ EXPORT_SYMBOL(get_random_bytes);
+@@ -1078,6 +1208,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
+ {
+ char *p = buf;
+
++ trace_get_random_bytes_arch(nbytes, _RET_IP_);
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+@@ -1111,12 +1242,11 @@ static void init_std_data(struct entropy_store *r)
+ ktime_t now = ktime_get_real();
+ unsigned long rv;
+
+- r->entropy_count = 0;
+- r->entropy_total = 0;
++ r->last_pulled = jiffies;
+ mix_pool_bytes(r, &now, sizeof(now), NULL);
+- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
++ for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
+ if (!arch_get_random_long(&rv))
+- break;
++ rv = random_get_entropy();
+ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
+ }
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+@@ -1139,25 +1269,7 @@ static int rand_initialize(void)
+ init_std_data(&nonblocking_pool);
+ return 0;
+ }
+-module_init(rand_initialize);
+-
+-void rand_initialize_irq(int irq)
+-{
+- struct timer_rand_state *state;
+-
+- state = get_timer_rand_state(irq);
+-
+- if (state)
+- return;
+-
+- /*
+- * If kzalloc returns null, we just won't use that entropy
+- * source.
+- */
+- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+- if (state)
+- set_timer_rand_state(irq, state);
+-}
++early_initcall(rand_initialize);
+
+ #ifdef CONFIG_BLOCK
+ void rand_initialize_disk(struct gendisk *disk)
+@@ -1169,71 +1281,59 @@ void rand_initialize_disk(struct gendisk *disk)
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+- if (state)
++ if (state) {
++ state->last_time = INITIAL_JIFFIES;
+ disk->random = state;
++ }
+ }
+ #endif
+
+ static ssize_t
+ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ {
+- ssize_t n, retval = 0, count = 0;
++ ssize_t n;
+
+ if (nbytes == 0)
+ return 0;
+
+- while (nbytes > 0) {
+- n = nbytes;
+- if (n > SEC_XFER_SIZE)
+- n = SEC_XFER_SIZE;
+-
+- DEBUG_ENT("reading %d bits\n", n*8);
+-
+- n = extract_entropy_user(&blocking_pool, buf, n);
+-
+- DEBUG_ENT("read got %d bits (%d still needed)\n",
+- n*8, (nbytes-n)*8);
+-
+- if (n == 0) {
+- if (file->f_flags & O_NONBLOCK) {
+- retval = -EAGAIN;
+- break;
+- }
+-
+- DEBUG_ENT("sleeping?\n");
+-
+- wait_event_interruptible(random_read_wait,
+- input_pool.entropy_count >=
+- random_read_wakeup_thresh);
+-
+- DEBUG_ENT("awake\n");
+-
+- if (signal_pending(current)) {
+- retval = -ERESTARTSYS;
+- break;
+- }
+-
+- continue;
+- }
+-
+- if (n < 0) {
+- retval = n;
+- break;
+- }
+- count += n;
+- buf += n;
+- nbytes -= n;
+- break; /* This break makes the device work */
+- /* like a named pipe */
++ nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
++ while (1) {
++ n = extract_entropy_user(&blocking_pool, buf, nbytes);
++ if (n < 0)
++ return n;
++ trace_random_read(n*8, (nbytes-n)*8,
++ ENTROPY_BITS(&blocking_pool),
++ ENTROPY_BITS(&input_pool));
++ if (n > 0)
++ return n;
++ /* Pool is (near) empty. Maybe wait and retry. */
++
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ wait_event_interruptible(random_read_wait,
++ ENTROPY_BITS(&input_pool) >=
++ random_read_wakeup_bits);
++ if (signal_pending(current))
++ return -ERESTARTSYS;
+ }
+-
+- return (count ? count : retval);
+ }
+
+ static ssize_t
+ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ {
+- return extract_entropy_user(&nonblocking_pool, buf, nbytes);
++ int ret;
++
++ if (unlikely(nonblocking_pool.initialized == 0))
++ printk_once(KERN_NOTICE "random: %s urandom read "
++ "with %d bits of entropy available\n",
++ current->comm, nonblocking_pool.entropy_total);
++
++ ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
++
++ trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
++ ENTROPY_BITS(&input_pool));
++ return ret;
+ }
+
+ static unsigned int
+@@ -1244,9 +1344,9 @@ random_poll(struct file *file, poll_table * wait)
+ poll_wait(file, &random_read_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
+- if (input_pool.entropy_count >= random_read_wakeup_thresh)
++ if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
+ mask |= POLLIN | POLLRDNORM;
+- if (input_pool.entropy_count < random_write_wakeup_thresh)
++ if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
+ mask |= POLLOUT | POLLWRNORM;
+ return mask;
+ }
+@@ -1297,7 +1397,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ switch (cmd) {
+ case RNDGETENTCNT:
+ /* inherently racy, no point locking */
+- if (put_user(input_pool.entropy_count, p))
++ ent_count = ENTROPY_BITS(&input_pool);
++ if (put_user(ent_count, p))
+ return -EFAULT;
+ return 0;
+ case RNDADDTOENTCNT:
+@@ -1305,7 +1406,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ return -EPERM;
+ if (get_user(ent_count, p))
+ return -EFAULT;
+- credit_entropy_bits(&input_pool, ent_count);
++ credit_entropy_bits_safe(&input_pool, ent_count);
+ return 0;
+ case RNDADDENTROPY:
+ if (!capable(CAP_SYS_ADMIN))
+@@ -1320,14 +1421,19 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ size);
+ if (retval < 0)
+ return retval;
+- credit_entropy_bits(&input_pool, ent_count);
++ credit_entropy_bits_safe(&input_pool, ent_count);
+ return 0;
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+- /* Clear the entropy pool counters. */
++ /*
++ * Clear the entropy pool counters. We no longer clear
++ * the entropy pool, as that's silly.
++ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+- rand_initialize();
++ input_pool.entropy_count = 0;
++ nonblocking_pool.entropy_count = 0;
++ blocking_pool.entropy_count = 0;
+ return 0;
+ default:
+ return -EINVAL;
+@@ -1387,23 +1493,23 @@ EXPORT_SYMBOL(generate_random_uuid);
+ #include <linux/sysctl.h>
+
+ static int min_read_thresh = 8, min_write_thresh;
+-static int max_read_thresh = INPUT_POOL_WORDS * 32;
++static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
+ static int max_write_thresh = INPUT_POOL_WORDS * 32;
+ static char sysctl_bootid[16];
+
+ /*
+- * These functions is used to return both the bootid UUID, and random
++ * This function is used to return both the bootid UUID, and random
+ * UUID. The difference is in whether table->data is NULL; if it is,
+ * then a new UUID is generated and returned to the user.
+ *
+- * If the user accesses this via the proc interface, it will be returned
+- * as an ASCII string in the standard UUID format. If accesses via the
+- * sysctl system call, it is returned as 16 bytes of binary data.
++ * If the user accesses this via the proc interface, the UUID will be
++ * returned as an ASCII string in the standard UUID format; if via the
++ * sysctl system call, as 16 bytes of binary data.
+ */
+-static int proc_do_uuid(ctl_table *table, int write,
++static int proc_do_uuid(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- ctl_table fake_table;
++ ctl_table_no_const fake_table;
+ unsigned char buf[64], tmp_uuid[16], *uuid;
+
+ uuid = table->data;
+@@ -1427,8 +1533,26 @@ static int proc_do_uuid(ctl_table *table, int write,
+ return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+ }
+
++/*
++ * Return entropy available scaled to integral bits
++ */
++static int proc_do_entropy(ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ ctl_table_no_const fake_table;
++ int entropy_count;
++
++ entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
++
++ fake_table.data = &entropy_count;
++ fake_table.maxlen = sizeof(entropy_count);
++
++ return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
++}
++
+ static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
+-ctl_table random_table[] = {
++extern struct ctl_table random_table[];
++struct ctl_table random_table[] = {
+ {
+ .procname = "poolsize",
+ .data = &sysctl_poolsize,
+@@ -1440,12 +1564,12 @@ ctl_table random_table[] = {
+ .procname = "entropy_avail",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+- .proc_handler = proc_dointvec,
++ .proc_handler = proc_do_entropy,
+ .data = &input_pool.entropy_count,
+ },
+ {
+ .procname = "read_wakeup_threshold",
+- .data = &random_read_wakeup_thresh,
++ .data = &random_read_wakeup_bits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+@@ -1454,7 +1578,7 @@ ctl_table random_table[] = {
+ },
+ {
+ .procname = "write_wakeup_threshold",
+- .data = &random_write_wakeup_thresh,
++ .data = &random_write_wakeup_bits,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+@@ -1462,6 +1586,13 @@ ctl_table random_table[] = {
+ .extra2 = &max_write_thresh,
+ },
+ {
++ .procname = "urandom_min_reseed_secs",
++ .data = &random_min_urandom_seed,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ {
+ .procname = "boot_id",
+ .data = &sysctl_bootid,
+ .maxlen = 16,
+@@ -1492,7 +1623,7 @@ int random_int_secret_init(void)
+ * value is not cryptographically secure but for several uses the cost of
+ * depleting entropy is too high
+ */
+-DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+ unsigned int get_random_int(void)
+ {
+ __u32 *hash;
+@@ -1510,6 +1641,7 @@ unsigned int get_random_int(void)
+
+ return ret;
+ }
++EXPORT_SYMBOL(get_random_int);
+
+ /*
+ * randomize_range() returns a start address such that
+diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
+index 1ee8ce7..b778bef 100644
+--- a/drivers/char/sonypi.c
++++ b/drivers/char/sonypi.c
+@@ -55,6 +55,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+ #include <asm/system.h>
++#include <asm/local.h>
+
+ #include <linux/sonypi.h>
+
+@@ -491,7 +492,7 @@ static struct sonypi_device {
+ spinlock_t fifo_lock;
+ wait_queue_head_t fifo_proc_list;
+ struct fasync_struct *fifo_async;
+- int open_count;
++ local_t open_count;
+ int model;
+ struct input_dev *input_jog_dev;
+ struct input_dev *input_key_dev;
+@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+ static int sonypi_misc_release(struct inode *inode, struct file *file)
+ {
+ mutex_lock(&sonypi_device.lock);
+- sonypi_device.open_count--;
++ local_dec(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+ return 0;
+ }
+@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
+ {
+ mutex_lock(&sonypi_device.lock);
+ /* Flush input queue on first open */
+- if (!sonypi_device.open_count)
++ if (!local_read(&sonypi_device.open_count))
+ kfifo_reset(&sonypi_device.fifo);
+- sonypi_device.open_count++;
++ local_inc(&sonypi_device.open_count);
+ mutex_unlock(&sonypi_device.lock);
+
+ return 0;
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index 0d91655..96118e0 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+ chip->vendor.req_complete_val)
+ goto out_recv;
+
+- if ((status == chip->vendor.req_canceled)) {
++ if (status == chip->vendor.req_canceled) {
+ dev_err(chip->dev, "Operation Canceled\n");
+ rc = -ECANCELED;
+ goto out;
+diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
+index 0636520..169c1d0 100644
+--- a/drivers/char/tpm/tpm_bios.c
++++ b/drivers/char/tpm/tpm_bios.c
+@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+ event = addr;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
+ return NULL;
+
+ return addr;
+@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+ return NULL;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
++ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
+ return NULL;
+
+ (*pos)++;
+@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+ int i;
+
+ for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+- seq_putc(m, data[i]);
++ if (!seq_putc(m, data[i]))
++ return -EFAULT;
+
+ return 0;
+ }
+@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
+ log->bios_event_log_end = log->bios_event_log + len;
+
+ virt = acpi_os_map_memory(start, len);
++ if (!virt) {
++ kfree(log->bios_event_log);
++ log->bios_event_log = NULL;
++ return -EFAULT;
++ }
+
+- memcpy(log->bios_event_log, virt, len);
++ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
+
+ acpi_os_unmap_memory(virt, len);
+ return 0;
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index c68b8ad..ef7a702 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -570,7 +570,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
+ if (to_user) {
+ ssize_t ret;
+
+- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
++ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
+ if (ret)
+ return -EFAULT;
+ } else {
+@@ -673,7 +673,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+ if (!port_has_data(port) && !port->host_connected)
+ return 0;
+
+- return fill_readbuf(port, ubuf, count, true);
++ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
+ }
+
+ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 56c6c6b..99056e6 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -533,8 +533,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
+ per_cpu(acfreq_data, cpu) = data;
+
+- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
++ pax_open_kernel();
++ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_close_kernel();
++ }
+
+ result = acpi_processor_register_performance(data->acpi_data, cpu);
+ if (result)
+@@ -644,7 +647,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
++ pax_open_kernel();
++ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
++ pax_close_kernel();
+ policy->cur = get_cur_freq_on_cpu(cpu);
+ break;
+ default:
+@@ -655,8 +660,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ /* Check for APERF/MPERF support in hardware */
+- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
+- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
++ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
++ pax_open_kernel();
++ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
++ pax_close_kernel();
++ }
+
+ pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
+ for (i = 0; i < perf->state_count; i++)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 987a165..d7f2bcd 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1790,7 +1790,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata cpufreq_cpu_notifier = {
++static struct notifier_block cpufreq_cpu_notifier = {
+ .notifier_call = cpufreq_cpu_callback,
+ };
+
+@@ -1819,8 +1819,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+
+ pr_debug("trying to register driver %s\n", driver_data->name);
+
+- if (driver_data->setpolicy)
+- driver_data->flags |= CPUFREQ_CONST_LOOPS;
++ if (driver_data->setpolicy) {
++ pax_open_kernel();
++ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
++ pax_close_kernel();
++ }
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ if (cpufreq_driver) {
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index 4bf374d..b170d80 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -342,7 +342,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ }
+
+ /* priority=1 so this will get called before cpufreq_remove_dev */
+-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
++static struct notifier_block cpufreq_stat_cpu_notifier = {
+ .notifier_call = cpufreq_stat_cpu_callback,
+ .priority = 1,
+ };
+diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
+index 6be3e07..dafe020 100644
+--- a/drivers/cpufreq/p4-clockmod.c
++++ b/drivers/cpufreq/p4-clockmod.c
+@@ -166,10 +166,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+ case 0x0F: /* Core Duo */
+ case 0x16: /* Celeron Core */
+ case 0x1C: /* Atom */
+- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_open_kernel();
++ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_close_kernel();
+ return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
+ case 0x0D: /* Pentium M (Dothan) */
+- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_open_kernel();
++ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_close_kernel();
+ /* fall through */
+ case 0x09: /* Pentium M (Banias) */
+ return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
+@@ -181,7 +185,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+
+ /* on P-4s, the TSC runs with constant frequency independent whether
+ * throttling is active or not. */
+- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_open_kernel();
++ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_close_kernel();
+
+ if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
+ printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index f6cd315..ce0d3b7 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1341,8 +1341,11 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
+ }
+
+ /* Check for APERF/MPERF support in hardware */
+- if (cpu_has(c, X86_FEATURE_APERFMPERF))
+- cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
++ if (cpu_has(c, X86_FEATURE_APERFMPERF)) {
++ pax_open_kernel();
++ *(void **)&cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
++ pax_close_kernel();
++ }
+
+ cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
+
+diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
+index 6ea3455..4a1af8f 100644
+--- a/drivers/cpufreq/speedstep-centrino.c
++++ b/drivers/cpufreq/speedstep-centrino.c
+@@ -352,8 +352,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
+ !cpu_has(cpu, X86_FEATURE_EST))
+ return -ENODEV;
+
+- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
+- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
++ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
++ pax_open_kernel();
++ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
++ pax_close_kernel();
++ }
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index 06ce268..1e0d3e4 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -188,7 +188,7 @@ static int poll_idle(struct cpuidle_device *dev,
+
+ static void poll_idle_init(struct cpuidle_driver *drv)
+ {
+- struct cpuidle_state *state = &drv->states[0];
++ cpuidle_state_no_const *state = &drv->states[0];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
+index ea2f8e7..70ac501 100644
+--- a/drivers/cpuidle/governor.c
++++ b/drivers/cpuidle/governor.c
+@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
+ mutex_lock(&cpuidle_lock);
+ if (__cpuidle_find_governor(gov->name) == NULL) {
+ ret = 0;
+- list_add_tail(&gov->governor_list, &cpuidle_governors);
++ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
+ if (!cpuidle_curr_governor ||
+ cpuidle_curr_governor->rating < gov->rating)
+ cpuidle_switch_governor(gov);
+@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
+ new_gov = cpuidle_replace_governor(gov->rating);
+ cpuidle_switch_governor(new_gov);
+ }
+- list_del(&gov->governor_list);
++ pax_list_del((struct list_head *)&gov->governor_list);
+ mutex_unlock(&cpuidle_lock);
+ }
+
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 1e756e1..6f7ead5 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -131,7 +131,7 @@ static struct attribute *cpuclass_switch_attrs[] = {
+ NULL
+ };
+
+-static struct attribute_group cpuclass_attr_group = {
++static attribute_group_no_const cpuclass_attr_group = {
+ .attrs = cpuclass_default_attrs,
+ .name = "cpuidle",
+ };
+diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
+index fe765f4..0bc6d6a 100644
+--- a/drivers/crypto/hifn_795x.c
++++ b/drivers/crypto/hifn_795x.c
+@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
+ MODULE_PARM_DESC(hifn_pll_ref,
+ "PLL reference clock (pci[freq] or ext[freq], default ext)");
+
+-static atomic_t hifn_dev_number;
++static atomic_unchecked_t hifn_dev_number;
+
+ #define ACRYPTO_OP_DECRYPT 0
+ #define ACRYPTO_OP_ENCRYPT 1
+@@ -2576,7 +2576,7 @@ static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id
+ goto err_out_disable_pci_device;
+
+ snprintf(name, sizeof(name), "hifn%d",
+- atomic_inc_return(&hifn_dev_number)-1);
++ atomic_inc_return_unchecked(&hifn_dev_number)-1);
+
+ err = pci_request_regions(pdev, name);
+ if (err)
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 59d24e9..0d20240 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -372,7 +372,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ = msecs_to_jiffies(devfreq->profile->polling_ms);
+ devfreq->nb.notifier_call = devfreq_notifier_call;
+
+- dev_set_name(&devfreq->dev, dev_name(dev));
++ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+ err = device_register(&devfreq->dev);
+ if (err) {
+ put_device(&devfreq->dev);
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index eb1d864..39ee5a7 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
+ }
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ cnt = dmatest_add_threads(dtc, DMA_PQ);
+- thread_count += cnt > 0 ?: 0;
++ thread_count += cnt > 0 ? cnt : 0;
+ }
+
+ pr_info("dmatest: Started %u threads using %s\n",
+diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
+index 81809c2..6409470 100644
+--- a/drivers/dma/shdma.c
++++ b/drivers/dma/shdma.c
+@@ -1054,7 +1054,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
+ return ret;
+ }
+
+-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
++static struct notifier_block sh_dmae_nmi_notifier = {
+ .notifier_call = sh_dmae_nmi_handler,
+
+ /* Run before NMI debug handler and KGDB */
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index a9d5482..376077f 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2682,7 +2682,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
+ * PCI core identifies what devices are on a system during boot, and then
+ * inquiry this table to see if this driver is for a given device found.
+ */
+-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
++static const struct pci_device_id amd64_pci_table[] __devinitconst = {
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
+diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
+index e47e73b..348e0bd 100644
+--- a/drivers/edac/amd76x_edac.c
++++ b/drivers/edac/amd76x_edac.c
+@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
++static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD762},
+diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
+index 1af531a..3a8ff27 100644
+--- a/drivers/edac/e752x_edac.c
++++ b/drivers/edac/e752x_edac.c
+@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
++static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7520},
+diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
+index 6ffb6d2..383d8d7 100644
+--- a/drivers/edac/e7xxx_edac.c
++++ b/drivers/edac/e7xxx_edac.c
+@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
++static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7205},
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index c3f6743..e2c52b0 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -483,9 +483,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ */
+ int edac_device_alloc_index(void)
+ {
+- static atomic_t device_indexes = ATOMIC_INIT(0);
++ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
+
+- return atomic_inc_return(&device_indexes) - 1;
++ return atomic_inc_return_unchecked(&device_indexes) - 1;
+ }
+ EXPORT_SYMBOL_GPL(edac_device_alloc_index);
+
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index 2b378207..99ba0bd 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -30,7 +30,7 @@
+
+ static DEFINE_MUTEX(edac_pci_ctls_mutex);
+ static LIST_HEAD(edac_pci_list);
+-static atomic_t pci_indexes = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
+
+ /*
+ * edac_pci_alloc_ctl_info
+@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+ */
+ int edac_pci_alloc_index(void)
+ {
+- return atomic_inc_return(&pci_indexes) - 1;
++ return atomic_inc_return_unchecked(&pci_indexes) - 1;
+ }
+ EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
+
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index 8cc8676..90b70b9 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
+ static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
+ static int edac_pci_poll_msec = 1000; /* one second workq period */
+
+-static atomic_t pci_parity_count = ATOMIC_INIT(0);
+-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
++static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
+
+ static struct kobject *edac_pci_top_main_kobj;
+ static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+@@ -236,7 +236,7 @@ struct edac_pci_dev_attribute {
+ void *value;
+ ssize_t(*show) (void *, char *);
+ ssize_t(*store) (void *, const char *, size_t);
+-};
++} __do_const;
+
+ /* Set of show/store abstract level functions for PCI Parity object */
+ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+- atomic_inc(&pci_nonparity_count);
++ atomic_inc_unchecked(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+ "Master Data Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+ }
+
+@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+- atomic_inc(&pci_nonparity_count);
++ atomic_inc_unchecked(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+- atomic_inc(&pci_parity_count);
++ atomic_inc_unchecked(&pci_parity_count);
+ }
+ }
+ }
+@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
+ if (!check_pci_errors)
+ return;
+
+- before_count = atomic_read(&pci_parity_count);
++ before_count = atomic_read_unchecked(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges.
+@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
+ /* Only if operator has selected panic on PCI Error */
+ if (edac_pci_get_panic_on_pe()) {
+ /* If the count is different 'after' from 'before' */
+- if (before_count != atomic_read(&pci_parity_count))
++ if (before_count != atomic_read_unchecked(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+ }
+diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
+index c0510b3..6e2a954 100644
+--- a/drivers/edac/i3000_edac.c
++++ b/drivers/edac/i3000_edac.c
+@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3000},
+diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
+index aa08497..7e6822a 100644
+--- a/drivers/edac/i3200_edac.c
++++ b/drivers/edac/i3200_edac.c
+@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3200},
+diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
+index 4dc3ac2..67d05a6 100644
+--- a/drivers/edac/i5000_edac.c
++++ b/drivers/edac/i5000_edac.c
+@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
+ *
+ * The "E500P" device is the first device supported.
+ */
+-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
+ .driver_data = I5000P},
+
+diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
+index bcbdeec..9886d16 100644
+--- a/drivers/edac/i5100_edac.c
++++ b/drivers/edac/i5100_edac.c
+@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
+ /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
+ { 0, }
+diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
+index 74d6ec34..baff517 100644
+--- a/drivers/edac/i5400_edac.c
++++ b/drivers/edac/i5400_edac.c
+@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
+ *
+ * The "E500P" device is the first device supported.
+ */
+-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
+ {0,} /* 0 terminated list. */
+ };
+diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
+index 2244df0..8a3b7a9 100644
+--- a/drivers/edac/i7300_edac.c
++++ b/drivers/edac/i7300_edac.c
+@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
+ *
+ * Has only 8086:360c PCI ID
+ */
+-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
+ {0,} /* 0 terminated list. */
+ };
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index b3ccefa..d39303b 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
+ /*
+ * pci_device_id table for which devices we are looking for
+ */
+-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
+ {0,} /* 0 terminated list. */
+diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
+index 4329d39..f3022ef 100644
+--- a/drivers/edac/i82443bxgx_edac.c
++++ b/drivers/edac/i82443bxgx_edac.c
+@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+
+ EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
+
+-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
+diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
+index 931a057..fd28340 100644
+--- a/drivers/edac/i82860_edac.c
++++ b/drivers/edac/i82860_edac.c
+@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82860},
+diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
+index 33864c6..01edc61 100644
+--- a/drivers/edac/i82875p_edac.c
++++ b/drivers/edac/i82875p_edac.c
+@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82875P},
+diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
+index 01658ca..092a452 100644
+--- a/drivers/edac/i82975x_edac.c
++++ b/drivers/edac/i82975x_edac.c
+@@ -601,7 +601,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
++static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82975X
+diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
+index 0106747..0b40417 100644
+--- a/drivers/edac/mce_amd.h
++++ b/drivers/edac/mce_amd.h
+@@ -83,7 +83,7 @@ struct amd_decoder_ops {
+ bool (*dc_mce)(u16, u8);
+ bool (*ic_mce)(u16, u8);
+ bool (*nb_mce)(u16, u8);
+-};
++} __no_const;
+
+ void amd_report_gart_errors(bool);
+ void amd_register_ecc_decoder(void (*f)(int, struct mce *));
+diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
+index b153674..ad2ba9b 100644
+--- a/drivers/edac/r82600_edac.c
++++ b/drivers/edac/r82600_edac.c
+@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
++static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
+ },
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index da71881..8d7d62c 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
+ /*
+ * pci_device_id table for which devices we are looking for
+ */
+-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
++static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
+ {0,} /* 0 terminated list. */
+ };
+diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
+index b6f47de..c5acf3a 100644
+--- a/drivers/edac/x38_edac.c
++++ b/drivers/edac/x38_edac.c
+@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
+ edac_mc_free(mci);
+ }
+
+-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
++static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
+ {
+ PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ X38},
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 85661b0..cdd4560 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -512,9 +512,9 @@ void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver,
+ struct device *device)
+ {
+- static atomic_t index = ATOMIC_INIT(-1);
++ static atomic_unchecked_t index = ATOMIC_INIT(-1);
+
+- card->index = atomic_inc_return(&index);
++ card->index = atomic_inc_return_unchecked(&index);
+ card->driver = driver;
+ card->device = device;
+ card->current_tlabel = 0;
+@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
+
+ void fw_core_remove_card(struct fw_card *card)
+ {
+- struct fw_card_driver dummy_driver = dummy_driver_template;
++ fw_card_driver_no_const dummy_driver = dummy_driver_template;
+
+ card->driver->update_phy_reg(card, 4,
+ PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index b97d4f0..7578a4d 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
+ int ret;
+
+ if ((request->channels == 0 && request->bandwidth == 0) ||
+- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+- request->bandwidth < 0)
++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
+ return -EINVAL;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index 1f3dd51..1ad071c 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
+ struct config_rom_attribute {
+ struct device_attribute attr;
+ u32 key;
+-};
++} __do_const;
+
+ static ssize_t show_immediate(struct device *dev,
+ struct device_attribute *dattr, char *buf)
+diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
+index 855ab3f..11f4bbd 100644
+--- a/drivers/firewire/core-transaction.c
++++ b/drivers/firewire/core-transaction.c
+@@ -37,6 +37,7 @@
+ #include <linux/timer.h>
+ #include <linux/types.h>
+ #include <linux/workqueue.h>
++#include <linux/sched.h>
+
+ #include <asm/byteorder.h>
+
+diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
+index b45be57..5fad18b 100644
+--- a/drivers/firewire/core.h
++++ b/drivers/firewire/core.h
+@@ -101,6 +101,7 @@ struct fw_card_driver {
+
+ int (*stop_iso)(struct fw_iso_context *ctx);
+ };
++typedef struct fw_card_driver __no_const fw_card_driver_no_const;
+
+ void fw_card_initialize(struct fw_card *card,
+ const struct fw_card_driver *driver, struct device *device);
+diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
+index 94a58a0..f5eba42 100644
+--- a/drivers/firmware/dmi-id.c
++++ b/drivers/firmware/dmi-id.c
+@@ -16,7 +16,7 @@
+ struct dmi_device_attribute{
+ struct device_attribute dev_attr;
+ int field;
+-};
++} __do_const;
+ #define to_dmi_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 4cd392d..4b629e1 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
+ }
+ }
+ else {
+- /*
+- * no iounmap() for that ioremap(); it would be a no-op, but
+- * it's so early in setup that sucker gets confused into doing
+- * what it shouldn't if we actually call it.
+- */
+ p = dmi_ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto error;
+@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ if (buf == NULL)
+ return -1;
+
+- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
++ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
+
+ iounmap(buf);
+ return 0;
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 2a64e69..ac8fe51 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -1221,7 +1221,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
+ static int
+ create_efivars_bin_attributes(struct efivars *efivars)
+ {
+- struct bin_attribute *attr;
++ bin_attribute_no_const *attr;
+ int error;
+
+ /* new_var */
+@@ -1413,7 +1413,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(register_efivars);
+
+-static struct efivar_operations ops;
++static efivar_operations_no_const ops __read_only;
+
+ /*
+ * For now we register the efi subsystem with the firmware subsystem
+diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
+index 2a90ba6..07f3733 100644
+--- a/drivers/firmware/google/memconsole.c
++++ b/drivers/firmware/google/memconsole.c
+@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
+ if (!found_memconsole())
+ return -ENODEV;
+
+- memconsole_bin_attr.size = memconsole_length;
++ pax_open_kernel();
++ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
++ pax_close_kernel();
+
+ ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
+
+diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
+index 98723cb..10ca85b 100644
+--- a/drivers/gpio/gpio-vr41xx.c
++++ b/drivers/gpio/gpio-vr41xx.c
+@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
+ printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
+ maskl, pendl, maskh, pendh);
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 3f1799b..3a853eb 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
++ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+ list_for_each_entry(mode, &connector->modes, head) {
+ drm_crtc_convert_to_umode(&u_mode, mode);
+ if (copy_to_user(mode_ptr + copied,
+@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+
+ if ((out_resp->count_props >= props_count) && props_count) {
+ copied = 0;
+- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
++ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
++ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] != 0) {
+ if (put_user(connector->property_ids[i],
+@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
++ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (put_user(connector->encoder_ids[i],
+@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ }
+
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
++ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+ if (get_user(out_id, &set_connectors_ptr[i])) {
+ ret = -EFAULT;
+ goto out;
+@@ -1846,7 +1846,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ fb = obj_to_fb(obj);
+
+ num_clips = r->num_clips;
+- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
++ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+@@ -2272,7 +2272,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
++ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+ ret = -EFAULT;
+@@ -2285,7 +2285,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ if (property->flags & DRM_MODE_PROP_ENUM) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
++ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+ if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+@@ -2308,7 +2308,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ copied = 0;
+ blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
++ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
+
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+ if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+@@ -2369,7 +2369,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+- void *blob_ptr;
++ void __user *blob_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+@@ -2383,7 +2383,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
+ blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+- blob_ptr = (void *)(unsigned long)out_resp->data;
++ blob_ptr = (void __user *)(unsigned long)out_resp->data;
+ if (copy_to_user(blob_ptr, blob->data, blob->length)){
+ ret = -EFAULT;
+ goto done;
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 11788f7..cd469eb 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+- WARN(!crtc, "checking null crtc?\n");
++ BUG_ON(!crtc);
+
+ dev = crtc->dev;
+
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index acfe567..6fd273c1 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
+ /**
+ * Copy and IOCTL return string to user space
+ */
+-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
++static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
+ {
+ int len;
+
+@@ -378,7 +378,7 @@ long drm_ioctl(struct file *filp,
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ struct drm_ioctl_desc *ioctl;
+- drm_ioctl_t *func;
++ drm_ioctl_no_const_t func;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int retcode = -EINVAL;
+ char stack_kdata[128];
+@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
+
+ dev = file_priv->minor->dev;
+ atomic_inc(&dev->ioctl_count);
+- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
+index fb94355..e1fcec5 100644
+--- a/drivers/gpu/drm/drm_encoder_slave.c
++++ b/drivers/gpu/drm/drm_encoder_slave.c
+@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
+ struct i2c_adapter *adap,
+ const struct i2c_board_info *info)
+ {
+- char modalias[sizeof(I2C_MODULE_PREFIX)
+- + I2C_NAME_SIZE];
+ struct module *module = NULL;
+ struct i2c_client *client;
+ struct drm_i2c_encoder_driver *encoder_drv;
+ int err = 0;
+
+- snprintf(modalias, sizeof(modalias),
+- "%s%s", I2C_MODULE_PREFIX, info->type);
+- request_module(modalias);
++ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
+
+ client = i2c_new_device(adap, info);
+ if (!client) {
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 020b103..68ae292 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+- atomic_set(&dev->counts[i], 0);
++ atomic_set_unchecked(&dev->counts[i], 0);
+
+ dev->sigdata.lock = NULL;
+
+@@ -135,11 +135,11 @@ int drm_open(struct inode *inode, struct file *filp)
+
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+- if (!dev->open_count++) {
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
++ if (local_inc_return(&dev->open_count) == 1) {
+ retcode = drm_setup(dev);
+ if (retcode)
+- dev->open_count--;
++ local_dec(&dev->open_count);
+ }
+ }
+ if (!retcode) {
+@@ -476,7 +476,7 @@ int drm_release(struct inode *inode, struct file *filp)
+
+ mutex_lock(&drm_global_mutex);
+
+- DRM_DEBUG("open_count = %d\n", dev->open_count);
++ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+@@ -488,7 +488,7 @@ int drm_release(struct inode *inode, struct file *filp)
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->device),
+- dev->open_count);
++ local_read(&dev->open_count));
+
+ /* Release any auth tokens that might point to this file_priv,
+ (do that under the drm_global_mutex) */
+@@ -574,8 +574,8 @@ int drm_release(struct inode *inode, struct file *filp)
+ * End inline drm_release
+ */
+
+- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+- if (!--dev->open_count) {
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
++ if (local_dec_and_test(&dev->open_count)) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
+index c87dc96..326055d 100644
+--- a/drivers/gpu/drm/drm_global.c
++++ b/drivers/gpu/drm/drm_global.c
+@@ -36,7 +36,7 @@
+ struct drm_global_item {
+ struct mutex mutex;
+ void *object;
+- int refcount;
++ atomic_t refcount;
+ };
+
+ static struct drm_global_item glob[DRM_GLOBAL_NUM];
+@@ -49,7 +49,7 @@ void drm_global_init(void)
+ struct drm_global_item *item = &glob[i];
+ mutex_init(&item->mutex);
+ item->object = NULL;
+- item->refcount = 0;
++ atomic_set(&item->refcount, 0);
+ }
+ }
+
+@@ -59,7 +59,7 @@ void drm_global_release(void)
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
+ BUG_ON(item->object != NULL);
+- BUG_ON(item->refcount != 0);
++ BUG_ON(atomic_read(&item->refcount) != 0);
+ }
+ }
+
+@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
+ void *object;
+
+ mutex_lock(&item->mutex);
+- if (item->refcount == 0) {
++ if (atomic_read(&item->refcount) == 0) {
+ item->object = kzalloc(ref->size, GFP_KERNEL);
+ if (unlikely(item->object == NULL)) {
+ ret = -ENOMEM;
+@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
+ goto out_err;
+
+ }
+- ++item->refcount;
++ atomic_inc(&item->refcount);
+ ref->object = item->object;
+ object = item->object;
+ mutex_unlock(&item->mutex);
+@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
+ struct drm_global_item *item = &glob[ref->global_type];
+
+ mutex_lock(&item->mutex);
+- BUG_ON(item->refcount == 0);
++ BUG_ON(atomic_read(&item->refcount) == 0);
+ BUG_ON(ref->object != item->object);
+- if (--item->refcount == 0) {
++ if (atomic_dec_and_test(&item->refcount)) {
+ ref->release(ref);
+ item->object = NULL;
+ }
+diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
+index ab1162d..42587b2 100644
+--- a/drivers/gpu/drm/drm_info.c
++++ b/drivers/gpu/drm/drm_info.c
+@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
+ struct drm_local_map *map;
+ struct drm_map_list *r_list;
+
+- /* Hardcoded from _DRM_FRAME_BUFFER,
+- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
++ static const char * const types[] = {
++ [_DRM_FRAME_BUFFER] = "FB",
++ [_DRM_REGISTERS] = "REG",
++ [_DRM_SHM] = "SHM",
++ [_DRM_AGP] = "AGP",
++ [_DRM_SCATTER_GATHER] = "SG",
++ [_DRM_CONSISTENT] = "PCI",
++ [_DRM_GEM] = "GEM" };
+ const char *type;
+ int i;
+
+@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
+ map = r_list->map;
+ if (!map)
+ continue;
+- if (map->type < 0 || map->type > 5)
++ if (map->type >= ARRAY_SIZE(types))
+ type = "??";
+ else
+ type = types[map->type];
+@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ 0);
++#else
+ vma->vm_pgoff);
++#endif
+
+ #if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index ddd70db..d1db604 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+ request = compat_alloc_user_space(nbytes);
+ if (!access_ok(VERIFY_WRITE, request, nbytes))
+ return -EFAULT;
+- list = (struct drm_buf_desc *) (request + 1);
++ list = (struct drm_buf_desc __user *) (request + 1);
+
+ if (__put_user(count, &request->count)
+ || __put_user(list, &request->list))
+@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+ request = compat_alloc_user_space(nbytes);
+ if (!access_ok(VERIFY_WRITE, request, nbytes))
+ return -EFAULT;
+- list = (struct drm_buf_pub *) (request + 1);
++ list = (struct drm_buf_pub __user *) (request + 1);
+
+ if (__put_user(count, &request->count)
+ || __put_user(list, &request->list))
+@@ -1015,7 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ return 0;
+ }
+
+-drm_ioctl_compat_t *drm_compat_ioctls[] = {
++drm_ioctl_compat_t drm_compat_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+@@ -1061,7 +1061,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+- drm_ioctl_compat_t *fn;
+ int ret;
+
+ /* Assume that ioctls without an explicit compat routine will just
+@@ -1071,10 +1070,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+ return drm_ioctl(filp, cmd, arg);
+
+- fn = drm_compat_ioctls[nr];
+-
+- if (fn != NULL)
+- ret = (*fn) (filp, cmd, arg);
++ if (drm_compat_ioctls[nr] != NULL)
++ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp, cmd, arg);
+
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index 904d7e9..ab88581 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
+ stats->data[i].value =
+ (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+ else
+- stats->data[i].value = atomic_read(&dev->counts[i]);
++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
+ }
+
+diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+index 632ae24..244cf4a 100644
+--- a/drivers/gpu/drm/drm_lock.c
++++ b/drivers/gpu/drm/drm_lock.c
+@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
+- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ return -EINVAL;
+ }
+
+- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
+diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+index 0f9ef9b..48bd695 100644
+--- a/drivers/gpu/drm/drm_sysfs.c
++++ b/drivers/gpu/drm/drm_sysfs.c
+@@ -495,7 +495,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+ int drm_sysfs_device_add(struct drm_minor *minor)
+ {
+ int err;
+- char *minor_str;
++ const char *minor_str;
+
+ minor->kdev.parent = minor->dev->dev;
+
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index 8f371e8..9f85d52 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
+ dma->buflist[vertex->idx],
+ vertex->discard, vertex->used);
+
+- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
+ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+ mc->last_render);
+
+- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter - 1;
+ sarea_priv->last_dispatch = (int)hw_status[5];
+
+diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
+index c9339f4..f5e1b9d 100644
+--- a/drivers/gpu/drm/i810/i810_drv.h
++++ b/drivers/gpu/drm/i810/i810_drv.h
+@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
+ int page_flipping;
+
+ wait_queue_head_t irq_queue;
+- atomic_t irq_received;
+- atomic_t irq_emitted;
++ atomic_unchecked_t irq_received;
++ atomic_unchecked_t irq_emitted;
+
+ int front_offset;
+ } drm_i810_private_t;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 9b4e5c6..d7ec240 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ I915_READ(GTIMR));
+ }
+ seq_printf(m, "Interrupts received: %d\n",
+- atomic_read(&dev_priv->irq_received));
++ atomic_read_unchecked(&dev_priv->irq_received));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
+@@ -1234,7 +1234,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
+ return ret;
+
+ if (opregion->header)
+- seq_write(m, opregion->header, OPREGION_SIZE);
++ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
+
+ mutex_unlock(&dev->struct_mutex);
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index ca67338..0003ba7 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 61274bf..72cb4a2 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -321,7 +321,7 @@ typedef struct drm_i915_private {
+ int current_page;
+ int page_flipping;
+
+- atomic_t irq_received;
++ atomic_unchecked_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+@@ -898,7 +898,7 @@ struct drm_i915_gem_object {
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+- atomic_t pending_flip;
++ atomic_unchecked_t pending_flip;
+ };
+
+ #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+@@ -1275,7 +1275,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
+ extern void intel_teardown_gmbus(struct drm_device *dev);
+ extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+ extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
++static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+ {
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index b1bb734..324ed11 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ i915_gem_clflush_object(obj);
+
+ if (obj->base.pending_write_domain)
+- cd->flips |= atomic_read(&obj->pending_flip);
++ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+@@ -904,9 +904,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+
+ static int
+ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+- int count)
++ unsigned int count)
+ {
+- int i;
++ unsigned int i;
+ int relocs_total = 0;
+ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+
+@@ -1371,7 +1371,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+- (struct drm_i915_relocation_entry __user *)
++ (struct drm_i915_gem_exec_object2 __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
+index 13b0289..5fb9e24 100644
+--- a/drivers/gpu/drm/i915/i915_ioc32.c
++++ b/drivers/gpu/drm/i915/i915_ioc32.c
+@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
+ (unsigned long)request);
+ }
+
+-drm_ioctl_compat_t *i915_compat_ioctls[] = {
++drm_ioctl_compat_t i915_compat_ioctls[] = {
+ [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
+ [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
+ [DRM_I915_GETPARAM] = compat_i915_getparam,
+@@ -201,18 +201,15 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
+ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+- drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+- if (fn != NULL)
++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
++ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+ ret = (*fn) (filp, cmd, arg);
+- else
++ } else
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 93e74fb..4a1182d 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ struct drm_i915_master_private *master_priv;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+@@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ struct drm_i915_master_private *master_priv;
+ u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ if (IS_GEN6(dev))
+ bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
+@@ -1229,7 +1229,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ int ret = IRQ_NONE, pipe;
+ bool blc_event = false;
+
+- atomic_inc(&dev_priv->irq_received);
++ atomic_inc_unchecked(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+
+@@ -1748,7 +1748,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+- atomic_set(&dev_priv->irq_received, 0);
++ atomic_set_unchecked(&dev_priv->irq_received, 0);
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+@@ -1936,7 +1936,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+- atomic_set(&dev_priv->irq_received, 0);
++ atomic_set_unchecked(&dev_priv->irq_received, 0);
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6d36695..4a3e870 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2215,7 +2215,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
+
+ wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
+- atomic_read(&obj->pending_flip) == 0);
++ atomic_read_unchecked(&obj->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+@@ -6991,8 +6991,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ obj = work->old_fb_obj;
+
+- atomic_clear_mask(1 << intel_crtc->plane,
+- &obj->pending_flip.counter);
++ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
+
+ wake_up(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+@@ -7201,7 +7200,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
+
+- pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
++ /* Contrary to the suggestions in the documentation,
++ * "Enable Panel Fitter" does not seem to be required when page
++ * flipping with a non-native mode, and worse causes a normal
++ * modeset to fail.
++ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
++ */
++ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+
+@@ -7333,7 +7338,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+@@ -7347,7 +7352,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ return 0;
+
+ cleanup_pending:
+- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+ crtc->fb = old_fb;
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+@@ -7482,11 +7487,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ if (HAS_PCH_SPLIT(dev)) {
+ if (pipe == 2 && IS_IVYBRIDGE(dev))
+ intel_crtc->no_pll = true;
+- intel_helper_funcs.prepare = ironlake_crtc_prepare;
+- intel_helper_funcs.commit = ironlake_crtc_commit;
++ pax_open_kernel();
++ *(void **)&intel_helper_funcs.prepare = ironlake_crtc_prepare;
++ *(void **)&intel_helper_funcs.commit = ironlake_crtc_commit;
++ pax_close_kernel();
+ } else {
+- intel_helper_funcs.prepare = i9xx_crtc_prepare;
+- intel_helper_funcs.commit = i9xx_crtc_commit;
++ pax_open_kernel();
++ *(void **)&intel_helper_funcs.prepare = i9xx_crtc_prepare;
++ *(void **)&intel_helper_funcs.commit = i9xx_crtc_commit;
++ pax_close_kernel();
+ }
+
+ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+@@ -8862,7 +8871,7 @@ struct intel_quirk {
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+-};
++} __do_const;
+
+ /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+ struct intel_dmi_quirk {
+diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
+index 54558a0..2d97005 100644
+--- a/drivers/gpu/drm/mga/mga_drv.h
++++ b/drivers/gpu/drm/mga/mga_drv.h
+@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
+ u32 clear_cmd;
+ u32 maccess;
+
+- atomic_t vbl_received; /**< Number of vblanks received. */
++ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
+ wait_queue_head_t fence_queue;
+- atomic_t last_fence_retired;
++ atomic_unchecked_t last_fence_retired;
+ u32 next_fence_to_post;
+
+ unsigned int fb_cpp;
+diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
+index c1f877b..0ae6960 100644
+--- a/drivers/gpu/drm/mga/mga_ioc32.c
++++ b/drivers/gpu/drm/mga/mga_ioc32.c
+@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+ return 0;
+ }
+
+-drm_ioctl_compat_t *mga_compat_ioctls[] = {
++drm_ioctl_compat_t mga_compat_ioctls[] = {
+ [DRM_MGA_INIT] = compat_mga_init,
+ [DRM_MGA_GETPARAM] = compat_mga_getparam,
+ [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
+ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+- drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+- if (fn != NULL)
++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
++ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+ ret = (*fn) (filp, cmd, arg);
+- else
++ } else
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
+index 2581202..f230a8d9 100644
+--- a/drivers/gpu/drm/mga/mga_irq.c
++++ b/drivers/gpu/drm/mga/mga_irq.c
+@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+
+@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+ /* VBLANK interrupt */
+ if (status & MGA_VLINEPEN) {
+ MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+- atomic_inc(&dev_priv->vbl_received);
++ atomic_inc_unchecked(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
+ handled = 1;
+ }
+@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+ if ((prim_start & ~0x03) != (prim_end & ~0x03))
+ MGA_WRITE(MGA_PRIMEND, prim_end);
+
+- atomic_inc(&dev_priv->last_fence_retired);
++ atomic_inc_unchecked(&dev_priv->last_fence_retired);
+ DRM_WAKEUP(&dev_priv->fence_queue);
+ handled = 1;
+ }
+@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
+ * using fences.
+ */
+ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
+ - *sequence) <= (1 << 23)));
+
+ *sequence = cur_fence;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 5fc201b..7b032b9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -201,7 +201,7 @@ struct methods {
+ const char desc[8];
+ void (*loadbios)(struct drm_device *, uint8_t *);
+ const bool rw;
+-};
++} __do_const;
+
+ static struct methods shadow_methods[] = {
+ { "PRAMIN", load_vbios_pramin, true },
+@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_table {
+ const char id;
+ int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+-};
++} __no_const;
+
+ #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index 4c0be3a..8f2cbb5 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -238,7 +238,7 @@ struct nouveau_channel {
+ struct list_head pending;
+ uint32_t sequence;
+ uint32_t sequence_ack;
+- atomic_t last_sequence_irq;
++ atomic_unchecked_t last_sequence_irq;
+ struct nouveau_vma vma;
+ } fence;
+
+@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
+ u32 handle, u16 class);
+ void (*set_tile_region)(struct drm_device *dev, int i);
+ void (*tlb_flush)(struct drm_device *, int engine);
+-};
++} __no_const;
+
+ struct nouveau_instmem_engine {
+ void *priv;
+@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
+ struct nouveau_mc_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+-};
++} __no_const;
+
+ struct nouveau_timer_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ uint64_t (*read)(struct drm_device *dev);
+-};
++} __no_const;
+
+ struct nouveau_fb_engine {
+ int num_tiles;
+@@ -706,7 +706,7 @@ struct drm_nouveau_private {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+- atomic_t validate_sequence;
++ atomic_unchecked_t validate_sequence;
+ } ttm;
+
+ struct {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 2f6daae..c9d7b9e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
+ if (USE_REFCNT(dev))
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+- sequence = atomic_read(&chan->fence.last_sequence_irq);
++ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
+
+ if (chan->fence.sequence_ack == sequence)
+ goto out;
+@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
+ return ret;
+ }
+
+- atomic_set(&chan->fence.last_sequence_irq, 0);
++ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index bd0b1fc..c082986 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -315,7 +315,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+ int trycnt = 0;
+ int ret, i;
+
+- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
++ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
+ retry:
+ if (++trycnt > 100000) {
+ NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+index 475ba81..a6c530c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+@@ -51,7 +51,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+ {
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+- drm_ioctl_compat_t *fn = NULL;
++ drm_ioctl_compat_t fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+index 01adcfb..c6726fe 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -544,7 +544,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
+index dbdea8e..cd6eeeb 100644
+--- a/drivers/gpu/drm/nouveau/nv04_graph.c
++++ b/drivers/gpu/drm/nouveau/nv04_graph.c
+@@ -554,7 +554,7 @@ static int
+ nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+ {
+- atomic_set(&chan->fence.last_sequence_irq, data);
++ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
+index bcac90b..53bfc76 100644
+--- a/drivers/gpu/drm/r128/r128_cce.c
++++ b/drivers/gpu/drm/r128/r128_cce.c
+@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
+
+ /* GH: Simple idle check.
+ */
+- atomic_set(&dev_priv->idle_count, 0);
++ atomic_set_unchecked(&dev_priv->idle_count, 0);
+
+ /* We don't support anything other than bus-mastering ring mode,
+ * but the ring can be in either AGP or PCI space for the ring
+diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
+index 930c71b..499aded 100644
+--- a/drivers/gpu/drm/r128/r128_drv.h
++++ b/drivers/gpu/drm/r128/r128_drv.h
+@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
+ int is_pci;
+ unsigned long cce_buffers_offset;
+
+- atomic_t idle_count;
++ atomic_unchecked_t idle_count;
+
+ int page_flipping;
+ int current_page;
+ u32 crtc_offset;
+ u32 crtc_offset_cntl;
+
+- atomic_t vbl_received;
++ atomic_unchecked_t vbl_received;
+
+ u32 color_fmt;
+ unsigned int front_offset;
+diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
+index 51c99fc..8acd411 100644
+--- a/drivers/gpu/drm/r128/r128_ioc32.c
++++ b/drivers/gpu/drm/r128/r128_ioc32.c
+@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
+ return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ }
+
+-drm_ioctl_compat_t *r128_compat_ioctls[] = {
++drm_ioctl_compat_t r128_compat_ioctls[] = {
+ [DRM_R128_INIT] = compat_r128_init,
+ [DRM_R128_DEPTH] = compat_r128_depth,
+ [DRM_R128_STIPPLE] = compat_r128_stipple,
+@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
+ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+- drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
+- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+- if (fn != NULL)
++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
++ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+ ret = (*fn) (filp, cmd, arg);
+- else
++ } else
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
+index 429d5a0..7e899ed 100644
+--- a/drivers/gpu/drm/r128/r128_irq.c
++++ b/drivers/gpu/drm/r128/r128_irq.c
+@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+ /* VBLANK interrupt */
+ if (status & R128_CRTC_VBLANK_INT) {
+ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+- atomic_inc(&dev_priv->vbl_received);
++ atomic_inc_unchecked(&dev_priv->vbl_received);
+ drm_handle_vblank(dev, 0);
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
+index a9e33ce..09edd4b 100644
+--- a/drivers/gpu/drm/r128/r128_state.c
++++ b/drivers/gpu/drm/r128/r128_state.c
+@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
+
+ static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
+ {
+- if (atomic_read(&dev_priv->idle_count) == 0)
++ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
+ r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+ else
+- atomic_set(&dev_priv->idle_count, 0);
++ atomic_set_unchecked(&dev_priv->idle_count, 0);
+ }
+
+ #endif
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 5efba47..aaaf339 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3094,7 +3094,9 @@ static int evergreen_startup(struct radeon_device *rdev)
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+- rdev->asic->copy = NULL;
++ pax_open_kernel();
++ *(void **)&rdev->asic->copy = NULL;
++ pax_close_kernel();
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
+index 5a82b6b..9e69c73 100644
+--- a/drivers/gpu/drm/radeon/mkregtable.c
++++ b/drivers/gpu/drm/radeon/mkregtable.c
+@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
+ regex_t mask_rex;
+ regmatch_t match[4];
+ char buf[1024];
+- size_t end;
++ long end;
+ int len;
+ int done = 0;
+ int r;
+ unsigned o;
+ struct offset *offset;
+ char last_reg_s[10];
+- int last_reg;
++ unsigned long last_reg;
+
+ if (regcomp
+ (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 77e6fb1..43e5aae 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1380,7 +1380,9 @@ static int cayman_startup(struct radeon_device *rdev)
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+- rdev->asic->copy = NULL;
++ pax_open_kernel();
++ *(void **)&rdev->asic->copy = NULL;
++ pax_close_kernel();
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 76c1290..865d31e 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -592,8 +592,10 @@ int r100_pci_gart_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++ pax_open_kernel();
++ *(void **)&rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
++ *(void **)&rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++ pax_close_kernel();
+ return radeon_gart_table_ram_alloc(rdev);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index 441570b..8896094 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -105,8 +105,10 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
+ if (r)
+ DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+ rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++ pax_open_kernel();
++ *(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
++ *(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++ pax_close_kernel();
+ return radeon_gart_table_vram_alloc(rdev);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 57e45c6..73bf8a5 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2440,7 +2440,9 @@ int r600_startup(struct radeon_device *rdev)
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+- rdev->asic->copy = NULL;
++ pax_open_kernel();
++ *(void **)&rdev->asic->copy = NULL;
++ pax_close_kernel();
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index cb1acff..8861bc5 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ d0 = G_038004_TEX_DEPTH(word1);
+ nfaces = 1;
++ array = 0;
+ switch (G_038000_DIM(word0)) {
+ case V_038000_SQ_TEX_DIM_1D:
+ case V_038000_SQ_TEX_DIM_2D:
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 28e69e9..c4a82cc 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -177,7 +177,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
+ */
+ struct radeon_fence_driver {
+ uint32_t scratch_reg;
+- atomic_t seq;
++ atomic_unchecked_t seq;
+ uint32_t last_seq;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index a2e1eae..8e4a0ec 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -114,13 +114,17 @@ void radeon_agp_disable(struct radeon_device *rdev)
+ rdev->family == CHIP_R423) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++ pax_open_kernel();
++ *(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
++ *(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++ pax_close_kernel();
+ } else {
+ DRM_INFO("Forcing AGP to PCI mode\n");
+ rdev->flags |= RADEON_IS_PCI;
+- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++ pax_open_kernel();
++ *(void **)&rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
++ *(void **)&rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++ pax_close_kernel();
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ }
+@@ -974,10 +978,12 @@ int radeon_asic_init(struct radeon_device *rdev)
+ rdev->asic = &r420_asic;
+ /* handle macs */
+ if (rdev->bios == NULL) {
+- rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
+- rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
+- rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
+- rdev->asic->set_memory_clock = NULL;
++ pax_open_kernel();
++ *(void **)&rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
++ *(void **)&rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
++ *(void **)&rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
++ *(void **)&rdev->asic->set_memory_clock = NULL;
++ pax_close_kernel();
+ }
+ break;
+ case CHIP_RS400:
+@@ -1057,8 +1063,10 @@ int radeon_asic_init(struct radeon_device *rdev)
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+- rdev->asic->get_memory_clock = NULL;
+- rdev->asic->set_memory_clock = NULL;
++ pax_open_kernel();
++ *(void **)&rdev->asic->get_memory_clock = NULL;
++ *(void **)&rdev->asic->set_memory_clock = NULL;
++ pax_close_kernel();
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 8cde84b..0d3f11f 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+- can_switch = (dev->open_count == 0);
++ can_switch = (local_read(&dev->open_count) == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
+index a1b59ca..86f2d44 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.h
++++ b/drivers/gpu/drm/radeon/radeon_drv.h
+@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
+
+ /* SW interrupt */
+ wait_queue_head_t swi_queue;
+- atomic_t swi_emitted;
++ atomic_unchecked_t swi_emitted;
+ int vblank_crtc;
+ uint32_t irq_enable_reg;
+ uint32_t r500_disp_irq_reg;
+diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
+index 76ec0e9..6feb1a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ return 0;
+ }
+- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
++ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
+ if (!rdev->cp.ready)
+ /* FIXME: cp is not running assume everythings is done right
+ * away
+@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
+ return r;
+ }
+ radeon_fence_write(rdev, 0);
+- atomic_set(&rdev->fence_drv.seq, 0);
++ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv.created);
+ INIT_LIST_HEAD(&rdev->fence_drv.emited);
+ INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
+index 48b7cea..b10b216 100644
+--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
+@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+- || __put_user((void __user *)(unsigned long)req32.value,
++ || __put_user((unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+@@ -369,7 +369,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ #define compat_radeon_cp_setparam NULL
+ #endif /* X86_64 || IA64 */
+
+-drm_ioctl_compat_t *radeon_compat_ioctls[] = {
++drm_ioctl_compat_t radeon_compat_ioctls[] = {
+ [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+ [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+ [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+@@ -394,18 +394,15 @@ drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+- drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+-
+- if (fn != NULL)
++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
++ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+ ret = (*fn) (filp, cmd, arg);
+- else
++ } else
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
+index 00da384..32f972d 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq.c
++++ b/drivers/gpu/drm/radeon/radeon_irq.c
+@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
+ unsigned int ret;
+ RING_LOCALS;
+
+- atomic_inc(&dev_priv->swi_emitted);
+- ret = atomic_read(&dev_priv->swi_emitted);
++ atomic_inc_unchecked(&dev_priv->swi_emitted);
++ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
+
+ BEGIN_RING(4);
+ OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *) dev->dev_private;
+
+- atomic_set(&dev_priv->swi_emitted, 0);
++ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
+ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+ dev->max_vblank_count = 0x001fffff;
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 65be5e8..578b3c2 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -487,16 +487,20 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev)
+ unsigned i;
+ int r;
+
+- radeon_debugfs_ib_bogus_info_list[0].data = rdev;
++ pax_open_kernel();
++ *(void **)&radeon_debugfs_ib_bogus_info_list[0].data = rdev;
++ pax_close_kernel();
+ r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+ if (r)
+ return r;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
+- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
+- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
+- radeon_debugfs_ib_list[i].driver_features = 0;
+- radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
++ pax_open_kernel();
++ *(void **)&radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
++ *(void **)&radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
++ *(u32 *)&radeon_debugfs_ib_list[i].driver_features = 0;
++ *(void **)&radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
++ pax_close_kernel();
+ }
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
+ RADEON_IB_POOL_SIZE);
+diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
+index e8422ae..d22d4a8 100644
+--- a/drivers/gpu/drm/radeon/radeon_state.c
++++ b/drivers/gpu/drm/radeon/radeon_state.c
+@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+ sarea_priv->nbox * sizeof(depth_boxes[0])))
+ return -EFAULT;
+
+@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_getparam_t *param = data;
+- int value;
++ int value = 0;
+
+ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 0b5468b..74cfb87 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -631,7 +631,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+ man->size = size >> PAGE_SHIFT;
+ }
+
+-static struct vm_operations_struct radeon_ttm_vm_ops;
++static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
+ static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+ }
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
++ pax_open_kernel();
+ radeon_ttm_vm_ops = *ttm_vm_ops;
+ radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
++ pax_close_kernel();
+ }
+ vma->vm_ops = &radeon_ttm_vm_ops;
+ return 0;
+@@ -820,30 +822,25 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
+ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+ {
+ #if defined(CONFIG_DEBUG_FS)
+- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
+- unsigned i;
++ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1] = {
++ {
++ .name = "radeon_vram_mm",
++ .show = &radeon_mm_dump_table,
++ },
++ {
++ .name = "radeon_gtt_mm",
++ .show = &radeon_mm_dump_table,
++ },
++ {
++ .name = "ttm_page_pool",
++ .show = &ttm_page_alloc_debugfs,
++ },
++ };
+
+- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+- if (i == 0)
+- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
+- else
+- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
+- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
+- radeon_mem_types_list[i].driver_features = 0;
+- if (i == 0)
+- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+- else
+- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+-
+- }
+- /* Add ttm page pool to debugfs */
+- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+- radeon_mem_types_list[i].driver_features = 0;
+- radeon_mem_types_list[i].data = NULL;
++ pax_open_kernel();
++ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
++ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
++ pax_close_kernel();
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
+
+ #endif
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index 93bce72..00332c1 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+ if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+ rdev->pm.sideport_bandwidth.full)
+ rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
++ read_delay_latency.full = dfixed_const(800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
+ rdev->pm.igp_sideport_mclk);
++ a.full = dfixed_const(370);
++ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
+ } else {
+ if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+ rdev->pm.k8_bandwidth.full)
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 3e72074..9fbe45b 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1083,7 +1083,9 @@ static int rv770_startup(struct radeon_device *rdev)
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+- rdev->asic->copy = NULL;
++ pax_open_kernel();
++ *(void **)&rdev->asic->copy = NULL;
++ pax_close_kernel();
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
+diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+index e70ddd8..ddfa1cd 100644
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -263,7 +263,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+ zone->glob = glob;
+ glob->zone_kernel = zone;
+ ret = kobject_init_and_add(
+- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
+ if (unlikely(ret != 0)) {
+ kobject_put(&zone->kobj);
+ return ret;
+@@ -346,7 +346,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+ zone->glob = glob;
+ glob->zone_dma32 = zone;
+ ret = kobject_init_and_add(
+- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
++ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
+ if (unlikely(ret != 0)) {
+ kobject_put(&zone->kobj);
+ return ret;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 9e4313e..46fad36 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
+ static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+ {
+- static atomic_t start_pool = ATOMIC_INIT(0);
++ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+- unsigned pool_offset = atomic_add_return(1, &start_pool);
++ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
+ struct ttm_page_pool *pool;
+ int shrink_pages = sc->nr_to_scan;
+
+diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
+index 9cf87d9..2000b7d 100644
+--- a/drivers/gpu/drm/via/via_drv.h
++++ b/drivers/gpu/drm/via/via_drv.h
+@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
+ typedef uint32_t maskarray_t[5];
+
+ typedef struct drm_via_irq {
+- atomic_t irq_received;
++ atomic_unchecked_t irq_received;
+ uint32_t pending_mask;
+ uint32_t enable_mask;
+ wait_queue_head_t irq_queue;
+@@ -75,7 +75,7 @@ typedef struct drm_via_private {
+ struct timeval last_vblank;
+ int last_vblank_valid;
+ unsigned usec_per_vblank;
+- atomic_t vbl_received;
++ atomic_unchecked_t vbl_received;
+ drm_via_state_t hc_state;
+ char pci_buf[VIA_PCI_BUF_SIZE];
+ const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
+index d391f48..10c8ca3 100644
+--- a/drivers/gpu/drm/via/via_irq.c
++++ b/drivers/gpu/drm/via/via_irq.c
+@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+ if (crtc != 0)
+ return 0;
+
+- return atomic_read(&dev_priv->vbl_received);
++ return atomic_read_unchecked(&dev_priv->vbl_received);
+ }
+
+ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+
+ status = VIA_READ(VIA_REG_INTERRUPT);
+ if (status & VIA_IRQ_VBLANK_PENDING) {
+- atomic_inc(&dev_priv->vbl_received);
+- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++ atomic_inc_unchecked(&dev_priv->vbl_received);
++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
+ do_gettimeofday(&cur_vblank);
+ if (dev_priv->last_vblank_valid) {
+ dev_priv->usec_per_vblank =
+@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+ dev_priv->last_vblank = cur_vblank;
+ dev_priv->last_vblank_valid = 1;
+ }
+- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
+ DRM_DEBUG("US per vblank is: %u\n",
+ dev_priv->usec_per_vblank);
+ }
+@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ if (status & cur_irq->pending_mask) {
+- atomic_inc(&cur_irq->irq_received);
++ atomic_inc_unchecked(&cur_irq->irq_received);
+ DRM_WAKEUP(&cur_irq->irq_queue);
+ handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+ masks[irq][4]));
+- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
+ } else {
+ DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+ (((cur_irq_sequence =
+- atomic_read(&cur_irq->irq_received)) -
++ atomic_read_unchecked(&cur_irq->irq_received)) -
+ *sequence) <= (1 << 23)));
+ }
+ *sequence = cur_irq_sequence;
+@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+- atomic_set(&cur_irq->irq_received, 0);
++ atomic_set_unchecked(&cur_irq->irq_received, 0);
+ cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+ cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+ DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+ case VIA_IRQ_RELATIVE:
+ irqwait->request.sequence +=
+- atomic_read(&cur_irq->irq_received);
++ atomic_read_unchecked(&cur_irq->irq_received);
+ irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ case VIA_IRQ_ABSOLUTE:
+ break;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 0e3fa7d..35f9ed6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -260,7 +260,7 @@ struct vmw_private {
+ * Fencing and IRQs.
+ */
+
+- atomic_t marker_seq;
++ atomic_unchecked_t marker_seq;
+ wait_queue_head_t fence_queue;
+ wait_queue_head_t fifo_queue;
+ int fence_queue_waiters; /* Protected by hw_mutex */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index a0c2f12..68ae6cb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+ (unsigned int) min,
+ (unsigned int) fifo->capabilities);
+
+- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
++ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+ vmw_marker_queue_init(&fifo->marker_queue);
+ return vmw_fifo_send_fence(dev_priv, &dummy);
+@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+ if (reserveable)
+ iowrite32(bytes, fifo_mem +
+ SVGA_FIFO_RESERVED);
+- return fifo_mem + (next_cmd >> 2);
++ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
+ } else {
+ need_bounce = true;
+ }
+@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+
+ fm = vmw_fifo_reserve(dev_priv, bytes);
+ if (unlikely(fm == NULL)) {
+- *seqno = atomic_read(&dev_priv->marker_seq);
++ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+ ret = -ENOMEM;
+ (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+ false, 3*HZ);
+@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+ }
+
+ do {
+- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
++ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
+ } while (*seqno == 0);
+
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index 66917c6..2dcc8ae 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -135,7 +135,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
+ int ret;
+
+ num_clips = arg->num_clips;
+- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
++ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
+
+ if (unlikely(num_clips == 0))
+ return 0;
+@@ -221,7 +221,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+ int ret;
+
+ num_clips = arg->num_clips;
+- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
++ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
+
+ if (unlikely(num_clips == 0))
+ return 0;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index cabc95f..14b3d77 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
+ * emitted. Then the fence is stale and signaled.
+ */
+
+- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
++ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
+ > VMW_FENCE_WRAP);
+
+ return ret;
+@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+
+ if (fifo_idle)
+ down_read(&fifo_state->rwsem);
+- signal_seq = atomic_read(&dev_priv->marker_seq);
++ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
+ ret = 0;
+
+ for (;;) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+index 8a8725c2..afed796 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
+@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
+ while (!vmw_lag_lt(queue, us)) {
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->head))
+- seqno = atomic_read(&dev_priv->marker_seq);
++ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
+ else {
+ marker = list_first_entry(&queue->head,
+ struct vmw_marker, head);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 9ac4389..5c05af3 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2102,7 +2102,7 @@ static bool hid_ignore(struct hid_device *hdev)
+
+ int hid_add_device(struct hid_device *hdev)
+ {
+- static atomic_t id = ATOMIC_INIT(0);
++ static atomic_unchecked_t id = ATOMIC_INIT(0);
+ int ret;
+
+ if (WARN_ON(hdev->status & HID_STAT_ADDED))
+@@ -2117,7 +2117,7 @@ int hid_add_device(struct hid_device *hdev)
+ /* XXX hack, any other cleaner solution after the driver core
+ * is converted to allow more than 20 bytes as the device name? */
+ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
+- hdev->vendor, hdev->product, atomic_inc_return(&id));
++ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
+
+ hid_debug_register(hdev, dev_name(&hdev->dev));
+ ret = device_add(&hdev->dev);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index a605ba1..fb91952 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -195,6 +195,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
+ td->inputmode = field->report->id;
+ break;
+ case HID_DG_CONTACTMAX:
++ /* Ignore if value count is out of bounds. */
++ if (field->report_count < 1)
++ break;
+ td->maxcontacts = field->value[0];
+ if (td->mtclass->maxcontacts)
+ /* check if the maxcontacts is given by the class */
+@@ -506,7 +509,6 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
+ if (field->index == td->last_field_index
+ && td->num_received >= td->num_expected)
+ mt_emit_event(td, field->hidinput->input);
+-
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 4ef02b2..8a96831 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ break;
+
+ case HIDIOCAPPLICATION:
+- if (arg < 0 || arg >= hid->maxapplication)
++ if (arg >= hid->maxapplication)
+ break;
+
+ for (i = 0; i < hid->maxcollection; i++)
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index f4c3d28..82f45a9 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -402,8 +402,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ int ret = 0;
+ int t;
+
+- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+- atomic_inc(&vmbus_connection.next_gpadl_handle);
++ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
++ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
+
+ ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
+ if (ret)
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index 0fb100e..baf87e5 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
+ u64 output_address = (output) ? virt_to_phys(output) : 0;
+ u32 output_address_hi = output_address >> 32;
+ u32 output_address_lo = output_address & 0xFFFFFFFF;
+- void *hypercall_page = hv_context.hypercall_page;
++ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
+
+ __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+ "=a"(hv_status_lo) : "d" (control_hi),
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 0aee112..b72d21f 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -556,7 +556,7 @@ enum vmbus_connect_state {
+ struct vmbus_connection {
+ enum vmbus_connect_state conn_state;
+
+- atomic_t next_gpadl_handle;
++ atomic_unchecked_t next_gpadl_handle;
+
+ /*
+ * Represents channel interrupts. Each bit position represents a
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 44442d5..9f4b007 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
+ {
+ int ret = 0;
+
+- static atomic_t device_num = ATOMIC_INIT(0);
++ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
+
+ dev_set_name(&child_device_obj->device, "vmbus_0_%d",
+- atomic_inc_return(&device_num));
++ atomic_inc_return_unchecked(&device_num));
+
+ child_device_obj->device.bus = &hv_bus;
+ child_device_obj->device.parent = &hv_acpi_dev->dev;
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 66f6729..4de8c4a 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -124,7 +124,7 @@ struct rw_sensor_template {
+ struct device_attribute *devattr,
+ const char *buf, size_t count);
+ int index;
+-};
++} __do_const;
+
+ /* Averaging interval */
+ static int update_avg_interval(struct acpi_power_meter_resource *resource)
+@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
+ return res;
+
+ temp /= 1000;
+- if (temp < 0)
+- return -EINVAL;
+
+ mutex_lock(&resource->lock);
+ resource->trip[attr->index - 7] = temp;
+@@ -622,7 +620,7 @@ static int register_ro_attrs(struct acpi_power_meter_resource *resource,
+ struct ro_sensor_template *ro)
+ {
+ struct device *dev = &resource->acpi_dev->dev;
+- struct sensor_device_attribute *sensors =
++ sensor_device_attribute_no_const *sensors =
+ &resource->sensors[resource->num_sensors];
+ int res = 0;
+
+@@ -650,7 +648,7 @@ static int register_rw_attrs(struct acpi_power_meter_resource *resource,
+ struct rw_sensor_template *rw)
+ {
+ struct device *dev = &resource->acpi_dev->dev;
+- struct sensor_device_attribute *sensors =
++ sensor_device_attribute_no_const *sensors =
+ &resource->sensors[resource->num_sensors];
+ int res = 0;
+
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 0b86d47..8066c3f 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -1082,7 +1082,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
+ {
+ struct applesmc_node_group *grp;
+ struct applesmc_dev_attr *node;
+- struct attribute *attr;
++ attribute_no_const *attr;
+ int ret, i;
+
+ for (grp = groups; grp->format; grp++) {
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index 83d2fbd6..93017f7 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -149,10 +149,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
+ struct atk_sensor_data {
+ struct list_head list;
+ struct atk_data *data;
+- struct device_attribute label_attr;
+- struct device_attribute input_attr;
+- struct device_attribute limit1_attr;
+- struct device_attribute limit2_attr;
++ device_attribute_no_const label_attr;
++ device_attribute_no_const input_attr;
++ device_attribute_no_const limit1_attr;
++ device_attribute_no_const limit2_attr;
+ char label_attr_name[ATTR_NAME_SIZE];
+ char input_attr_name[ATTR_NAME_SIZE];
+ char limit1_attr_name[ATTR_NAME_SIZE];
+@@ -271,7 +271,7 @@ static ssize_t atk_name_show(struct device *dev,
+ static struct device_attribute atk_name_attr =
+ __ATTR(name, 0444, atk_name_show, NULL);
+
+-static void atk_init_attribute(struct device_attribute *attr, char *name,
++static void atk_init_attribute(device_attribute_no_const *attr, char *name,
+ sysfs_show_func show)
+ {
+ sysfs_attr_init(&attr->attr);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 3d630bb..77756d7 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -787,7 +787,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block coretemp_cpu_notifier __refdata = {
++static struct notifier_block coretemp_cpu_notifier = {
+ .notifier_call = coretemp_cpu_callback,
+ };
+
+diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
+index 6a967d7..7f0f923 100644
+--- a/drivers/hwmon/ibmaem.c
++++ b/drivers/hwmon/ibmaem.c
+@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
+ struct aem_rw_sensor_template *rw)
+ {
+ struct device *dev = &data->pdev->dev;
+- struct sensor_device_attribute *sensors = data->sensors;
++ sensor_device_attribute_no_const *sensors = data->sensors;
+ int err;
+
+ /* Set up read-only sensors */
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index d89b339..fdb124c 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -809,7 +809,7 @@ static ssize_t pmbus_show_label(struct device *dev,
+
+ #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
+ do { \
+- struct sensor_device_attribute *a \
++ sensor_device_attribute_no_const *a \
+ = &data->_type##s[data->num_##_type##s].attribute; \
+ BUG_ON(data->num_attributes >= data->max_attributes); \
+ sysfs_attr_init(&a->dev_attr.attr); \
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index 3e3153e..d445962 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -166,7 +166,7 @@ struct sht15_data {
+ int supply_uV;
+ bool supply_uV_valid;
+ struct work_struct update_supply_work;
+- atomic_t interrupt_handled;
++ atomic_unchecked_t interrupt_handled;
+ };
+
+ /**
+@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
+ return ret;
+
+ gpio_direction_input(data->pdata->gpio_data);
+- atomic_set(&data->interrupt_handled, 0);
++ atomic_set_unchecked(&data->interrupt_handled, 0);
+
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ if (gpio_get_value(data->pdata->gpio_data) == 0) {
+ disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ /* Only relevant if the interrupt hasn't occurred. */
+- if (!atomic_read(&data->interrupt_handled))
++ if (!atomic_read_unchecked(&data->interrupt_handled))
+ schedule_work(&data->read_work);
+ }
+ ret = wait_event_timeout(data->wait_queue,
+@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
+
+ /* First disable the interrupt */
+ disable_irq_nosync(irq);
+- atomic_inc(&data->interrupt_handled);
++ atomic_inc_unchecked(&data->interrupt_handled);
+ /* Then schedule a reading work struct */
+ if (data->state != SHT15_READING_NOTHING)
+ schedule_work(&data->read_work);
+@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
+ * If not, then start the interrupt again - care here as could
+ * have gone low in meantime so verify it hasn't!
+ */
+- atomic_set(&data->interrupt_handled, 0);
++ atomic_set_unchecked(&data->interrupt_handled, 0);
+ enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ /* If still not occurred or another handler has been scheduled */
+ if (gpio_get_value(data->pdata->gpio_data)
+- || atomic_read(&data->interrupt_handled))
++ || atomic_read_unchecked(&data->interrupt_handled))
+ return;
+ }
+
+diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
+index 8eac67d..d7b2fa5 100644
+--- a/drivers/hwmon/via-cputemp.c
++++ b/drivers/hwmon/via-cputemp.c
+@@ -304,7 +304,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
++static struct notifier_block via_cputemp_cpu_notifier = {
+ .notifier_call = via_cputemp_cpu_callback,
+ };
+
+diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
+index 378fcb5..5e91fa8 100644
+--- a/drivers/i2c/busses/i2c-amd756-s4882.c
++++ b/drivers/i2c/busses/i2c-amd756-s4882.c
+@@ -43,7 +43,7 @@
+ extern struct i2c_adapter amd756_smbus;
+
+ static struct i2c_adapter *s4882_adapter;
+-static struct i2c_algorithm *s4882_algo;
++static i2c_algorithm_no_const *s4882_algo;
+
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(amd756_lock);
+diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
+index 7636671..53a2cab 100644
+--- a/drivers/i2c/busses/i2c-diolan-u2c.c
++++ b/drivers/i2c/busses/i2c-diolan-u2c.c
+@@ -99,7 +99,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
+ /* usb layer */
+
+ /* Send command to device, and get response. */
+-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
++static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
+ {
+ int ret = 0;
+ int actual;
+diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
+index 29015eb..af2d8e9 100644
+--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
++++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
+@@ -41,7 +41,7 @@
+ extern struct i2c_adapter *nforce2_smbus;
+
+ static struct i2c_adapter *s4985_adapter;
+-static struct i2c_algorithm *s4985_algo;
++static i2c_algorithm_no_const *s4985_algo;
+
+ /* Wrapper access functions for multiplexed SMBus */
+ static DEFINE_MUTEX(nforce2_lock);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 57a45ce8..51bd6c1 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -276,7 +276,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
+ res = -EINVAL;
+ break;
+ }
+- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
++ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
+ rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
+ if (IS_ERR(rdwr_pa[i].buf)) {
+ res = PTR_ERR(rdwr_pa[i].buf);
+diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
+index 57d00ca..0145194 100644
+--- a/drivers/ide/aec62xx.c
++++ b/drivers/ide/aec62xx.c
+@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
+ .cable_detect = atp86x_cable_detect,
+ };
+
+-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
++static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
+ { /* 0: AEC6210 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_aec62xx,
+diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
+index 2c8016a..911a27c 100644
+--- a/drivers/ide/alim15x3.c
++++ b/drivers/ide/alim15x3.c
+@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info ali15x3_chipset __devinitdata = {
++static const struct ide_port_info ali15x3_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_ali15x3,
+ .init_hwif = init_hwif_ali15x3,
+diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
+index 3747b25..56fc995 100644
+--- a/drivers/ide/amd74xx.c
++++ b/drivers/ide/amd74xx.c
+@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
+ .udma_mask = udma, \
+ }
+
+-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
++static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
+ /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
+ /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
+ /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
+diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
+index 15f0ead..cb43480 100644
+--- a/drivers/ide/atiixp.c
++++ b/drivers/ide/atiixp.c
+@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
+ .cable_detect = atiixp_cable_detect,
+ };
+
+-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
++static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
+ { /* 0: IXP200/300/400/700 */
+ .name = DRV_NAME,
+ .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
+diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
+index 5f80312..d1fc438 100644
+--- a/drivers/ide/cmd64x.c
++++ b/drivers/ide/cmd64x.c
+@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
++static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
+ { /* 0: CMD643 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_cmd64x,
+diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
+index 2c1e5f7..1444762 100644
+--- a/drivers/ide/cs5520.c
++++ b/drivers/ide/cs5520.c
+@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
+ .set_dma_mode = cs5520_set_dma_mode,
+ };
+
+-static const struct ide_port_info cyrix_chipset __devinitdata = {
++static const struct ide_port_info cyrix_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
+ .port_ops = &cs5520_port_ops,
+diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
+index 4dc4eb9..49b40ad 100644
+--- a/drivers/ide/cs5530.c
++++ b/drivers/ide/cs5530.c
+@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
+ .udma_filter = cs5530_udma_filter,
+ };
+
+-static const struct ide_port_info cs5530_chipset __devinitdata = {
++static const struct ide_port_info cs5530_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_cs5530,
+ .init_hwif = init_hwif_cs5530,
+diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
+index 5059faf..18d4c85 100644
+--- a/drivers/ide/cs5535.c
++++ b/drivers/ide/cs5535.c
+@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
+ .cable_detect = cs5535_cable_detect,
+ };
+
+-static const struct ide_port_info cs5535_chipset __devinitdata = {
++static const struct ide_port_info cs5535_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .port_ops = &cs5535_port_ops,
+ .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
+diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
+index 847553f..3ffb49d 100644
+--- a/drivers/ide/cy82c693.c
++++ b/drivers/ide/cy82c693.c
+@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
+ .set_dma_mode = cy82c693_set_dma_mode,
+ };
+
+-static const struct ide_port_info cy82c693_chipset __devinitdata = {
++static const struct ide_port_info cy82c693_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_iops = init_iops_cy82c693,
+ .port_ops = &cy82c693_port_ops,
+diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
+index 58c51cd..4aec3b8 100644
+--- a/drivers/ide/hpt366.c
++++ b/drivers/ide/hpt366.c
+@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
+ }
+ };
+
+-static const struct hpt_info hpt36x __devinitdata = {
++static const struct hpt_info hpt36x __devinitconst = {
+ .chip_name = "HPT36x",
+ .chip_type = HPT36x,
+ .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
+@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
+ .timings = &hpt36x_timings
+ };
+
+-static const struct hpt_info hpt370 __devinitdata = {
++static const struct hpt_info hpt370 __devinitconst = {
+ .chip_name = "HPT370",
+ .chip_type = HPT370,
+ .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
+@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt370a __devinitdata = {
++static const struct hpt_info hpt370a __devinitconst = {
+ .chip_name = "HPT370A",
+ .chip_type = HPT370A,
+ .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
+@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt374 __devinitdata = {
++static const struct hpt_info hpt374 __devinitconst = {
+ .chip_name = "HPT374",
+ .chip_type = HPT374,
+ .udma_mask = ATA_UDMA5,
+@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt372 __devinitdata = {
++static const struct hpt_info hpt372 __devinitconst = {
+ .chip_name = "HPT372",
+ .chip_type = HPT372,
+ .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt372a __devinitdata = {
++static const struct hpt_info hpt372a __devinitconst = {
+ .chip_name = "HPT372A",
+ .chip_type = HPT372A,
+ .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt302 __devinitdata = {
++static const struct hpt_info hpt302 __devinitconst = {
+ .chip_name = "HPT302",
+ .chip_type = HPT302,
+ .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt371 __devinitdata = {
++static const struct hpt_info hpt371 __devinitconst = {
+ .chip_name = "HPT371",
+ .chip_type = HPT371,
+ .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt372n __devinitdata = {
++static const struct hpt_info hpt372n __devinitconst = {
+ .chip_name = "HPT372N",
+ .chip_type = HPT372N,
+ .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt302n __devinitdata = {
++static const struct hpt_info hpt302n __devinitconst = {
+ .chip_name = "HPT302N",
+ .chip_type = HPT302N,
+ .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
+ .timings = &hpt37x_timings
+ };
+
+-static const struct hpt_info hpt371n __devinitdata = {
++static const struct hpt_info hpt371n __devinitconst = {
+ .chip_name = "HPT371N",
+ .chip_type = HPT371N,
+ .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
+@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
++static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
+ { /* 0: HPT36x */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_hpt366,
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index 8126824..55a2798 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ if ((unsigned long)buf & alignment
+ || blk_rq_bytes(rq) & q->dma_pad_mask
+- || object_is_on_stack(buf))
++ || object_starts_on_stack(buf))
+ drive->dma = 0;
+ }
+ }
+diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
+index a743e68..1cfd674 100644
+--- a/drivers/ide/ide-pci-generic.c
++++ b/drivers/ide/ide-pci-generic.c
+@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
+ .udma_mask = ATA_UDMA6, \
+ }
+
+-static const struct ide_port_info generic_chipsets[] __devinitdata = {
++static const struct ide_port_info generic_chipsets[] __devinitconst = {
+ /* 0: Unknown */
+ DECLARE_GENERIC_PCI_DEV(0),
+
+diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
+index 560e66d..d5dd180 100644
+--- a/drivers/ide/it8172.c
++++ b/drivers/ide/it8172.c
+@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
+ .set_dma_mode = it8172_set_dma_mode,
+ };
+
+-static const struct ide_port_info it8172_port_info __devinitdata = {
++static const struct ide_port_info it8172_port_info __devinitconst = {
+ .name = DRV_NAME,
+ .port_ops = &it8172_port_ops,
+ .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
+diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
+index 46816ba..1847aeb 100644
+--- a/drivers/ide/it8213.c
++++ b/drivers/ide/it8213.c
+@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
+ .cable_detect = it8213_cable_detect,
+ };
+
+-static const struct ide_port_info it8213_chipset __devinitdata = {
++static const struct ide_port_info it8213_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { {0x41, 0x80, 0x80} },
+ .port_ops = &it8213_port_ops,
+diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
+index 2e3169f..c5611db 100644
+--- a/drivers/ide/it821x.c
++++ b/drivers/ide/it821x.c
+@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
+ .cable_detect = it821x_cable_detect,
+ };
+
+-static const struct ide_port_info it821x_chipset __devinitdata = {
++static const struct ide_port_info it821x_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_it821x,
+ .init_hwif = init_hwif_it821x,
+diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
+index 74c2c4a..efddd7d 100644
+--- a/drivers/ide/jmicron.c
++++ b/drivers/ide/jmicron.c
+@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
+ .cable_detect = jmicron_cable_detect,
+ };
+
+-static const struct ide_port_info jmicron_chipset __devinitdata = {
++static const struct ide_port_info jmicron_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
+ .port_ops = &jmicron_port_ops,
+diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
+index 95327a2..73f78d8 100644
+--- a/drivers/ide/ns87415.c
++++ b/drivers/ide/ns87415.c
+@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
+ .dma_sff_read_status = superio_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info ns87415_chipset __devinitdata = {
++static const struct ide_port_info ns87415_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_hwif = init_hwif_ns87415,
+ .tp_ops = &ns87415_tp_ops,
+diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
+index 1a53a4c..39edc66 100644
+--- a/drivers/ide/opti621.c
++++ b/drivers/ide/opti621.c
+@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
+ .set_pio_mode = opti621_set_pio_mode,
+ };
+
+-static const struct ide_port_info opti621_chipset __devinitdata = {
++static const struct ide_port_info opti621_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
+ .port_ops = &opti621_port_ops,
+diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
+index 9546fe2..2e5ceb6 100644
+--- a/drivers/ide/pdc202xx_new.c
++++ b/drivers/ide/pdc202xx_new.c
+@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
+ .udma_mask = udma, \
+ }
+
+-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
++static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
+ /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
+ /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
+ };
+diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
+index 3a35ec6..5634510 100644
+--- a/drivers/ide/pdc202xx_old.c
++++ b/drivers/ide/pdc202xx_old.c
+@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
+ .max_sectors = sectors, \
+ }
+
+-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
++static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
+ { /* 0: PDC20246 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_pdc202xx,
+diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
+index 1892e81..fe0fd60 100644
+--- a/drivers/ide/piix.c
++++ b/drivers/ide/piix.c
+@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
+ .udma_mask = udma, \
+ }
+
+-static const struct ide_port_info piix_pci_info[] __devinitdata = {
++static const struct ide_port_info piix_pci_info[] __devinitconst = {
+ /* 0: MPIIX */
+ { /*
+ * MPIIX actually has only a single IDE channel mapped to
+diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
+index a6414a8..c04173e 100644
+--- a/drivers/ide/rz1000.c
++++ b/drivers/ide/rz1000.c
+@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
+ }
+ }
+
+-static const struct ide_port_info rz1000_chipset __devinitdata = {
++static const struct ide_port_info rz1000_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .host_flags = IDE_HFLAG_NO_DMA,
+ };
+diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
+index 356b9b5..d4758eb 100644
+--- a/drivers/ide/sc1200.c
++++ b/drivers/ide/sc1200.c
+@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info sc1200_chipset __devinitdata = {
++static const struct ide_port_info sc1200_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .port_ops = &sc1200_port_ops,
+ .dma_ops = &sc1200_dma_ops,
+diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
+index b7f5b0c..9701038 100644
+--- a/drivers/ide/scc_pata.c
++++ b/drivers/ide/scc_pata.c
+@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
+ .dma_sff_read_status = scc_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info scc_chipset __devinitdata = {
++static const struct ide_port_info scc_chipset __devinitconst = {
+ .name = "sccIDE",
+ .init_iops = init_iops_scc,
+ .init_dma = scc_init_dma,
+diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
+index 35fb8da..24d72ef 100644
+--- a/drivers/ide/serverworks.c
++++ b/drivers/ide/serverworks.c
+@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
+ .cable_detect = svwks_cable_detect,
+ };
+
+-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
++static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
+ { /* 0: OSB4 */
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_svwks,
+diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
+index ddeda44..46f7e30 100644
+--- a/drivers/ide/siimage.c
++++ b/drivers/ide/siimage.c
+@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
+ .udma_mask = ATA_UDMA6, \
+ }
+
+-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
++static const struct ide_port_info siimage_chipsets[] __devinitconst = {
+ /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
+ /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
+ };
+diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
+index 4a00225..09e61b4 100644
+--- a/drivers/ide/sis5513.c
++++ b/drivers/ide/sis5513.c
+@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
+ .cable_detect = sis_cable_detect,
+ };
+
+-static const struct ide_port_info sis5513_chipset __devinitdata = {
++static const struct ide_port_info sis5513_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_sis5513,
+ .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
+diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
+index f21dc2a..d051cd2 100644
+--- a/drivers/ide/sl82c105.c
++++ b/drivers/ide/sl82c105.c
+@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info sl82c105_chipset __devinitdata = {
++static const struct ide_port_info sl82c105_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_sl82c105,
+ .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
+diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
+index 864ffe0..863a5e92 100644
+--- a/drivers/ide/slc90e66.c
++++ b/drivers/ide/slc90e66.c
+@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
+ .cable_detect = slc90e66_cable_detect,
+ };
+
+-static const struct ide_port_info slc90e66_chipset __devinitdata = {
++static const struct ide_port_info slc90e66_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
+ .port_ops = &slc90e66_port_ops,
+diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
+index 4799d5c..1794678 100644
+--- a/drivers/ide/tc86c001.c
++++ b/drivers/ide/tc86c001.c
+@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
+ .dma_sff_read_status = ide_dma_sff_read_status,
+ };
+
+-static const struct ide_port_info tc86c001_chipset __devinitdata = {
++static const struct ide_port_info tc86c001_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_hwif = init_hwif_tc86c001,
+ .port_ops = &tc86c001_port_ops,
+diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
+index 281c914..55ce1b8 100644
+--- a/drivers/ide/triflex.c
++++ b/drivers/ide/triflex.c
+@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
+ .set_dma_mode = triflex_set_mode,
+ };
+
+-static const struct ide_port_info triflex_device __devinitdata = {
++static const struct ide_port_info triflex_device __devinitconst = {
+ .name = DRV_NAME,
+ .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
+ .port_ops = &triflex_port_ops,
+diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
+index 4b42ca0..e494a98 100644
+--- a/drivers/ide/trm290.c
++++ b/drivers/ide/trm290.c
+@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
+ .dma_check = trm290_dma_check,
+ };
+
+-static const struct ide_port_info trm290_chipset __devinitdata = {
++static const struct ide_port_info trm290_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_hwif = init_hwif_trm290,
+ .tp_ops = &trm290_tp_ops,
+diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
+index f46f49c..eb77678 100644
+--- a/drivers/ide/via82cxxx.c
++++ b/drivers/ide/via82cxxx.c
+@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
+ .cable_detect = via82cxxx_cable_detect,
+ };
+
+-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
++static const struct ide_port_info via82cxxx_chipset __devinitconst = {
+ .name = DRV_NAME,
+ .init_chipset = init_chipset_via82cxxx,
+ .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
+index eb0e2cc..14241c7 100644
+--- a/drivers/ieee802154/fakehard.c
++++ b/drivers/ieee802154/fakehard.c
+@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
+ phy->transmit_power = 0xbf;
+
+ dev->netdev_ops = &fake_ops;
+- dev->ml_priv = &fake_mlme;
++ dev->ml_priv = (void *)&fake_mlme;
+
+ priv = netdev_priv(dev);
+ priv->phy = phy;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 8b72f39..55df4c8 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
+
+ struct cm_counter_group {
+ struct kobject obj;
+- atomic_long_t counter[CM_ATTR_COUNT];
++ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
+ };
+
+ struct cm_counter_attribute {
+@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
+ struct ib_mad_send_buf *msg = NULL;
+ int ret;
+
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_REQ_COUNTER]);
+
+ /* Quick state check to discard duplicate REQs. */
+@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
+ if (!cm_id_priv)
+ return;
+
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_REP_COUNTER]);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ if (ret)
+@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
+ if (cm_id_priv->id.state != IB_CM_REP_SENT &&
+ cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
+ spin_unlock_irq(&cm_id_priv->lock);
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_RTU_COUNTER]);
+ goto out;
+ }
+@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
+ cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
+ dreq_msg->local_comm_id);
+ if (!cm_id_priv) {
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ cm_issue_drep(work->port, work->mad_recv_wc);
+ return -EINVAL;
+@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
+ case IB_CM_MRA_REP_RCVD:
+ break;
+ case IB_CM_TIMEWAIT:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ goto unlock;
+@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
+ cm_free_msg(msg);
+ goto deref;
+ case IB_CM_DREQ_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_DREQ_COUNTER]);
+ goto unlock;
+ default:
+@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
+ ib_modify_mad(cm_id_priv->av.port->mad_agent,
+ cm_id_priv->msg, timeout)) {
+ if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+- atomic_long_inc(&work->port->
++ atomic_long_inc_unchecked(&work->port->
+ counter_group[CM_RECV_DUPLICATES].
+ counter[CM_MRA_COUNTER]);
+ goto out;
+@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
+ break;
+ case IB_CM_MRA_REQ_RCVD:
+ case IB_CM_MRA_REP_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_MRA_COUNTER]);
+ /* fall through */
+ default:
+@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
+ case IB_CM_LAP_IDLE:
+ break;
+ case IB_CM_MRA_LAP_SENT:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_LAP_COUNTER]);
+ if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ goto unlock;
+@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
+ cm_free_msg(msg);
+ goto deref;
+ case IB_CM_LAP_RCVD:
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_LAP_COUNTER]);
+ goto unlock;
+ default:
+@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
+ cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
+ if (cur_cm_id_priv) {
+ spin_unlock_irq(&cm.lock);
+- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
+ counter[CM_SIDR_REQ_COUNTER]);
+ goto out; /* Duplicate message. */
+ }
+@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
+ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
+ msg->retries = 1;
+
+- atomic_long_add(1 + msg->retries,
++ atomic_long_add_unchecked(1 + msg->retries,
+ &port->counter_group[CM_XMIT].counter[attr_index]);
+ if (msg->retries)
+- atomic_long_add(msg->retries,
++ atomic_long_add_unchecked(msg->retries,
+ &port->counter_group[CM_XMIT_RETRIES].
+ counter[attr_index]);
+
+@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+ }
+
+ attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
+- atomic_long_inc(&port->counter_group[CM_RECV].
++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
+ counter[attr_id - CM_ATTR_ID_OFFSET]);
+
+ work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
+@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
+ cm_attr = container_of(attr, struct cm_counter_attribute, attr);
+
+ return sprintf(buf, "%ld\n",
+- atomic_long_read(&group->counter[cm_attr->index]));
++ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
+ }
+
+ static const struct sysfs_ops cm_counter_ops = {
+diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
+index 176c8f9..2627b62 100644
+--- a/drivers/infiniband/core/fmr_pool.c
++++ b/drivers/infiniband/core/fmr_pool.c
+@@ -98,8 +98,8 @@ struct ib_fmr_pool {
+
+ struct task_struct *thread;
+
+- atomic_t req_ser;
+- atomic_t flush_ser;
++ atomic_unchecked_t req_ser;
++ atomic_unchecked_t flush_ser;
+
+ wait_queue_head_t force_wait;
+ };
+@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+ struct ib_fmr_pool *pool = pool_ptr;
+
+ do {
+- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
+ ib_fmr_batch_release(pool);
+
+- atomic_inc(&pool->flush_ser);
++ atomic_inc_unchecked(&pool->flush_ser);
+ wake_up_interruptible(&pool->force_wait);
+
+ if (pool->flush_function)
+@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
++ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
+ !kthread_should_stop())
+ schedule();
+ __set_current_state(TASK_RUNNING);
+@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
+ pool->dirty_watermark = params->dirty_watermark;
+ pool->dirty_len = 0;
+ spin_lock_init(&pool->pool_lock);
+- atomic_set(&pool->req_ser, 0);
+- atomic_set(&pool->flush_ser, 0);
++ atomic_set_unchecked(&pool->req_ser, 0);
++ atomic_set_unchecked(&pool->flush_ser, 0);
+ init_waitqueue_head(&pool->force_wait);
+
+ pool->thread = kthread_run(ib_fmr_cleanup_thread,
+@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
+ }
+ spin_unlock_irq(&pool->pool_lock);
+
+- serial = atomic_inc_return(&pool->req_ser);
++ serial = atomic_inc_return_unchecked(&pool->req_ser);
+ wake_up_process(pool->thread);
+
+ if (wait_event_interruptible(pool->force_wait,
+- atomic_read(&pool->flush_ser) - serial >= 0))
++ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
+ return -EINTR;
+
+ return 0;
+@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
+ } else {
+ list_add_tail(&fmr->list, &pool->dirty_list);
+ if (++pool->dirty_len >= pool->dirty_watermark) {
+- atomic_inc(&pool->req_ser);
++ atomic_inc_unchecked(&pool->req_ser);
+ wake_up_process(pool->thread);
+ }
+ }
+diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
+index 40c8353..946b0e4 100644
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ int err;
+ struct fw_ri_tpte tpt;
+ u32 stag_idx;
+- static atomic_t key;
++ static atomic_unchecked_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ &rdev->resource.tpt_fifo_lock);
+ if (!stag_idx)
+ return -ENOMEM;
+- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
++ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
+ }
+ PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
+diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
+index e571e60..523c505 100644
+--- a/drivers/infiniband/hw/ehca/ehca_irq.c
++++ b/drivers/infiniband/hw/ehca/ehca_irq.c
+@@ -883,7 +883,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block comp_pool_callback_nb __cpuinitdata = {
++static struct notifier_block comp_pool_callback_nb = {
+ .notifier_call = comp_pool_callback,
+ .priority = 0,
+ };
+diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
+index 31ae1b1..fe606ac 100644
+--- a/drivers/infiniband/hw/ipath/ipath_fs.c
++++ b/drivers/infiniband/hw/ipath/ipath_fs.c
+@@ -410,6 +410,7 @@ static struct file_system_type ipathfs_fs_type = {
+ .mount = ipathfs_mount,
+ .kill_sb = ipathfs_kill_super,
+ };
++MODULE_ALIAS_FS("ipathfs");
+
+ int __init ipath_init_ipathfs(void)
+ {
+diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
+index 79b3dbc..96e5fcc 100644
+--- a/drivers/infiniband/hw/ipath/ipath_rc.c
++++ b/drivers/infiniband/hw/ipath/ipath_rc.c
+@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ struct ib_atomic_eth *ateth;
+ struct ipath_ack_entry *e;
+ u64 vaddr;
+- atomic64_t *maddr;
++ atomic64_unchecked_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+- (u64) atomic64_add_return(sdata, maddr) - sdata :
++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
+index 1f95bba..9530f87 100644
+--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
++++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
+@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+- atomic64_t *maddr;
++ atomic64_unchecked_t *maddr;
+ enum ib_wc_status send_status;
+
+ /*
+@@ -382,11 +382,11 @@ again:
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
++ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+- (u64) atomic64_add_return(sdata, maddr) - sdata :
++ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ goto send_comp;
+diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
+index 9d3e5c1..6f166df 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
++++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
+@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
+ mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
+ }
+
+-int mthca_QUERY_FW(struct mthca_dev *dev)
++int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
+ {
+ struct mthca_mailbox *mailbox;
+ u32 *outbox;
+@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ CMD_TIME_CLASS_B);
+ }
+
+-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
++int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int num_mtt)
+ {
+ return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
+@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
+ 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
+ }
+
+-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
++int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int eq_num)
+ {
+ return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
+@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
+ CMD_TIME_CLASS_B);
+ }
+
+-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
++int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+ int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ void *in_mad, void *response_mad)
+ {
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index aa12a53..c145bc2 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -692,7 +692,7 @@ err_close:
+ return err;
+ }
+
+-static int mthca_setup_hca(struct mthca_dev *dev)
++static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
+ {
+ int err;
+
+diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
+index ed9a989..6aa5dc2 100644
+--- a/drivers/infiniband/hw/mthca/mthca_mr.c
++++ b/drivers/infiniband/hw/mthca/mthca_mr.c
+@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
+ * through the bitmaps)
+ */
+
+-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
++static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
+ {
+ int o;
+ int m;
+@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
+ return key;
+ }
+
+-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
++int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+ u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
+ {
+ struct mthca_mailbox *mailbox;
+@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
+ return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
+ }
+
+-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
++int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+ u64 *buffer_list, int buffer_size_shift,
+ int list_len, u64 iova, u64 total_size,
+ u32 access, struct mthca_mr *mr)
+diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
+index 5b71d43..35a9e14 100644
+--- a/drivers/infiniband/hw/mthca/mthca_provider.c
++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -763,7 +763,7 @@ unlock:
+ return 0;
+ }
+
+-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
++static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+ {
+ struct mthca_dev *dev = to_mdev(ibcq->device);
+ struct mthca_cq *cq = to_mcq(ibcq);
+diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
+index 5965b3d..16817fb 100644
+--- a/drivers/infiniband/hw/nes/nes.c
++++ b/drivers/infiniband/hw/nes/nes.c
+@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
+ LIST_HEAD(nes_adapter_list);
+ static LIST_HEAD(nes_dev_list);
+
+-atomic_t qps_destroyed;
++atomic_unchecked_t qps_destroyed;
+
+ static unsigned int ee_flsh_adapter;
+ static unsigned int sysfs_nonidx_addr;
+@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
+ struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+- atomic_inc(&qps_destroyed);
++ atomic_inc_unchecked(&qps_destroyed);
+
+ /* Free the control structures */
+
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index 3ade373..f3b68b7 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
+ extern unsigned int wqm_quanta;
+ extern struct list_head nes_adapter_list;
+
+-extern atomic_t cm_connects;
+-extern atomic_t cm_accepts;
+-extern atomic_t cm_disconnects;
+-extern atomic_t cm_closes;
+-extern atomic_t cm_connecteds;
+-extern atomic_t cm_connect_reqs;
+-extern atomic_t cm_rejects;
+-extern atomic_t mod_qp_timouts;
+-extern atomic_t qps_created;
+-extern atomic_t qps_destroyed;
+-extern atomic_t sw_qps_destroyed;
++extern atomic_unchecked_t cm_connects;
++extern atomic_unchecked_t cm_accepts;
++extern atomic_unchecked_t cm_disconnects;
++extern atomic_unchecked_t cm_closes;
++extern atomic_unchecked_t cm_connecteds;
++extern atomic_unchecked_t cm_connect_reqs;
++extern atomic_unchecked_t cm_rejects;
++extern atomic_unchecked_t mod_qp_timouts;
++extern atomic_unchecked_t qps_created;
++extern atomic_unchecked_t qps_destroyed;
++extern atomic_unchecked_t sw_qps_destroyed;
+ extern u32 mh_detected;
+ extern u32 mh_pauses_sent;
+ extern u32 cm_packets_sent;
+@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
+ extern u32 cm_packets_received;
+ extern u32 cm_packets_dropped;
+ extern u32 cm_packets_retrans;
+-extern atomic_t cm_listens_created;
+-extern atomic_t cm_listens_destroyed;
++extern atomic_unchecked_t cm_listens_created;
++extern atomic_unchecked_t cm_listens_destroyed;
+ extern u32 cm_backlog_drops;
+-extern atomic_t cm_loopbacks;
+-extern atomic_t cm_nodes_created;
+-extern atomic_t cm_nodes_destroyed;
+-extern atomic_t cm_accel_dropped_pkts;
+-extern atomic_t cm_resets_recvd;
+-extern atomic_t pau_qps_created;
+-extern atomic_t pau_qps_destroyed;
++extern atomic_unchecked_t cm_loopbacks;
++extern atomic_unchecked_t cm_nodes_created;
++extern atomic_unchecked_t cm_nodes_destroyed;
++extern atomic_unchecked_t cm_accel_dropped_pkts;
++extern atomic_unchecked_t cm_resets_recvd;
++extern atomic_unchecked_t pau_qps_created;
++extern atomic_unchecked_t pau_qps_destroyed;
+
+ extern u32 int_mod_timer_init;
+ extern u32 int_mod_cq_depth_256;
+diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
+index 0a52d72..0642f36 100644
+--- a/drivers/infiniband/hw/nes/nes_cm.c
++++ b/drivers/infiniband/hw/nes/nes_cm.c
+@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
+ u32 cm_packets_retrans;
+ u32 cm_packets_created;
+ u32 cm_packets_received;
+-atomic_t cm_listens_created;
+-atomic_t cm_listens_destroyed;
++atomic_unchecked_t cm_listens_created;
++atomic_unchecked_t cm_listens_destroyed;
+ u32 cm_backlog_drops;
+-atomic_t cm_loopbacks;
+-atomic_t cm_nodes_created;
+-atomic_t cm_nodes_destroyed;
+-atomic_t cm_accel_dropped_pkts;
+-atomic_t cm_resets_recvd;
++atomic_unchecked_t cm_loopbacks;
++atomic_unchecked_t cm_nodes_created;
++atomic_unchecked_t cm_nodes_destroyed;
++atomic_unchecked_t cm_accel_dropped_pkts;
++atomic_unchecked_t cm_resets_recvd;
+
+ static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
+@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
+
+ static struct nes_cm_core *g_cm_core;
+
+-atomic_t cm_connects;
+-atomic_t cm_accepts;
+-atomic_t cm_disconnects;
+-atomic_t cm_closes;
+-atomic_t cm_connecteds;
+-atomic_t cm_connect_reqs;
+-atomic_t cm_rejects;
++atomic_unchecked_t cm_connects;
++atomic_unchecked_t cm_accepts;
++atomic_unchecked_t cm_disconnects;
++atomic_unchecked_t cm_closes;
++atomic_unchecked_t cm_connecteds;
++atomic_unchecked_t cm_connect_reqs;
++atomic_unchecked_t cm_rejects;
+
+ int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
+ {
+@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+ kfree(listener);
+ listener = NULL;
+ ret = 0;
+- atomic_inc(&cm_listens_destroyed);
++ atomic_inc_unchecked(&cm_listens_destroyed);
+ } else {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+ cm_node->rem_mac);
+
+ add_hte_node(cm_core, cm_node);
+- atomic_inc(&cm_nodes_created);
++ atomic_inc_unchecked(&cm_nodes_created);
+
+ return cm_node;
+ }
+@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+ }
+
+ atomic_dec(&cm_core->node_cnt);
+- atomic_inc(&cm_nodes_destroyed);
++ atomic_inc_unchecked(&cm_nodes_destroyed);
+ nesqp = cm_node->nesqp;
+ if (nesqp) {
+ nesqp->cm_node = NULL;
+@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
+
+ static void drop_packet(struct sk_buff *skb)
+ {
+- atomic_inc(&cm_accel_dropped_pkts);
++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ }
+
+@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ {
+
+ int reset = 0; /* whether to send reset in case of err.. */
+- atomic_inc(&cm_resets_recvd);
++ atomic_inc_unchecked(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
+ " refcnt=%d\n", cm_node, cm_node->state,
+ atomic_read(&cm_node->ref_count));
+@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return NULL;
+ }
+- atomic_inc(&cm_loopbacks);
++ atomic_inc_unchecked(&cm_loopbacks);
+ loopbackremotenode->loopbackpartner = cm_node;
+ loopbackremotenode->tcp_cntxt.rcv_wscale =
+ NES_CM_DEFAULT_RCV_WND_SCALE;
+@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
+ nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
+ else {
+ rem_ref_cm_node(cm_core, cm_node);
+- atomic_inc(&cm_accel_dropped_pkts);
++ atomic_inc_unchecked(&cm_accel_dropped_pkts);
+ dev_kfree_skb_any(skb);
+ }
+ break;
+@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
+
+ if ((cm_id) && (cm_id->event_handler)) {
+ if (issue_disconn) {
+- atomic_inc(&cm_disconnects);
++ atomic_inc_unchecked(&cm_disconnects);
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ cm_event.status = disconn_status;
+ cm_event.local_addr = cm_id->local_addr;
+@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
+ }
+
+ if (issue_close) {
+- atomic_inc(&cm_closes);
++ atomic_inc_unchecked(&cm_closes);
+ nes_disconnect(nesqp, 1);
+
+ cm_id->provider_data = nesqp;
+@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+
+ nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
+ nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
+- atomic_inc(&cm_accepts);
++ atomic_inc_unchecked(&cm_accepts);
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ netdev_refcnt_read(nesvnic->netdev));
+@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+ struct nes_cm_core *cm_core;
+ u8 *start_buff;
+
+- atomic_inc(&cm_rejects);
++ atomic_inc_unchecked(&cm_rejects);
+ cm_node = (struct nes_cm_node *)cm_id->provider_data;
+ loopback = cm_node->loopbackpartner;
+ cm_core = cm_node->cm_core;
+@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
+
+- atomic_inc(&cm_connects);
++ atomic_inc_unchecked(&cm_connects);
+ nesqp->active_conn = 1;
+
+ /* cache the cm_id in the qp */
+@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
+ g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+ return err;
+ }
+- atomic_inc(&cm_listens_created);
++ atomic_inc_unchecked(&cm_listens_created);
+ }
+
+ cm_id->add_ref(cm_id);
+@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
+
+ if (nesqp->destroyed)
+ return;
+- atomic_inc(&cm_connecteds);
++ atomic_inc_unchecked(&cm_connecteds);
+ nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
+ " local port 0x%04X. jiffies = %lu.\n",
+ nesqp->hwqp.qp_id,
+@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
+
+ cm_id->add_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+- atomic_inc(&cm_closes);
++ atomic_inc_unchecked(&cm_closes);
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ cm_event.status = 0;
+ cm_event.provider_data = cm_id->provider_data;
+@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
+ return;
+ cm_id = cm_node->cm_id;
+
+- atomic_inc(&cm_connect_reqs);
++ atomic_inc_unchecked(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
+ return;
+ cm_id = cm_node->cm_id;
+
+- atomic_inc(&cm_connect_reqs);
++ atomic_inc_unchecked(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
+index b3b2a24..7bfaf1e 100644
+--- a/drivers/infiniband/hw/nes/nes_mgt.c
++++ b/drivers/infiniband/hw/nes/nes_mgt.c
+@@ -40,8 +40,8 @@
+ #include "nes.h"
+ #include "nes_mgt.h"
+
+-atomic_t pau_qps_created;
+-atomic_t pau_qps_destroyed;
++atomic_unchecked_t pau_qps_created;
++atomic_unchecked_t pau_qps_destroyed;
+
+ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
+ {
+@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
+ {
+ struct sk_buff *skb;
+ unsigned long flags;
+- atomic_inc(&pau_qps_destroyed);
++ atomic_inc_unchecked(&pau_qps_destroyed);
+
+ /* Free packets that have not yet been forwarded */
+ /* Lock is acquired by skb_dequeue when removing the skb */
+@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
+ skb_queue_head_init(&nesqp->pau_list);
+ spin_lock_init(&nesqp->pau_lock);
+- atomic_inc(&pau_qps_created);
++ atomic_inc_unchecked(&pau_qps_created);
+ nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
+ }
+
+diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
+index c00d2f3..8834298 100644
+--- a/drivers/infiniband/hw/nes/nes_nic.c
++++ b/drivers/infiniband/hw/nes/nes_nic.c
+@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+ target_stat_values[++index] = mh_detected;
+ target_stat_values[++index] = mh_pauses_sent;
+ target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
+- target_stat_values[++index] = atomic_read(&cm_connects);
+- target_stat_values[++index] = atomic_read(&cm_accepts);
+- target_stat_values[++index] = atomic_read(&cm_disconnects);
+- target_stat_values[++index] = atomic_read(&cm_connecteds);
+- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
+- target_stat_values[++index] = atomic_read(&cm_rejects);
+- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
+- target_stat_values[++index] = atomic_read(&qps_created);
+- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
+- target_stat_values[++index] = atomic_read(&qps_destroyed);
+- target_stat_values[++index] = atomic_read(&cm_closes);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
++ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
++ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
++ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
+ target_stat_values[++index] = cm_packets_sent;
+ target_stat_values[++index] = cm_packets_bounced;
+ target_stat_values[++index] = cm_packets_created;
+ target_stat_values[++index] = cm_packets_received;
+ target_stat_values[++index] = cm_packets_dropped;
+ target_stat_values[++index] = cm_packets_retrans;
+- target_stat_values[++index] = atomic_read(&cm_listens_created);
+- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
+ target_stat_values[++index] = cm_backlog_drops;
+- target_stat_values[++index] = atomic_read(&cm_loopbacks);
+- target_stat_values[++index] = atomic_read(&cm_nodes_created);
+- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
+- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
+- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
++ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
+ target_stat_values[++index] = nesadapter->free_4kpbl;
+ target_stat_values[++index] = nesadapter->free_256pbl;
+ target_stat_values[++index] = int_mod_timer_init;
+ target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
+ target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
+ target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
+- target_stat_values[++index] = atomic_read(&pau_qps_created);
+- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
++ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index b0471b4..9ba4e9f 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -46,9 +46,9 @@
+
+ #include <rdma/ib_umem.h>
+
+-atomic_t mod_qp_timouts;
+-atomic_t qps_created;
+-atomic_t sw_qps_destroyed;
++atomic_unchecked_t mod_qp_timouts;
++atomic_unchecked_t qps_created;
++atomic_unchecked_t sw_qps_destroyed;
+
+ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
+
+@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ if (init_attr->create_flags)
+ return ERR_PTR(-EINVAL);
+
+- atomic_inc(&qps_created);
++ atomic_inc_unchecked(&qps_created);
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
+ struct iw_cm_event cm_event;
+ int ret = 0;
+
+- atomic_inc(&sw_qps_destroyed);
++ atomic_inc_unchecked(&sw_qps_destroyed);
+ nesqp->destroyed = 1;
+
+ /* Blow away the connection if it exists. */
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index b881bdc..c2e360c 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -51,6 +51,7 @@
+ #include <linux/completion.h>
+ #include <linux/kref.h>
+ #include <linux/sched.h>
++#include <linux/slab.h>
+
+ #include "qib_common.h"
+ #include "qib_verbs.h"
+diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
+index df7fa25..f11b448 100644
+--- a/drivers/infiniband/hw/qib/qib_fs.c
++++ b/drivers/infiniband/hw/qib/qib_fs.c
+@@ -603,6 +603,7 @@ static struct file_system_type qibfs_fs_type = {
+ .mount = qibfs_mount,
+ .kill_sb = qibfs_kill_super,
+ };
++MODULE_ALIAS_FS("ipathfs");
+
+ int __init qib_init_qibfs(void)
+ {
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
+index c351aa4..e6967c2 100644
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
+ */
+ static void gameport_init_port(struct gameport *gameport)
+ {
+- static atomic_t gameport_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
+
+ __module_get(THIS_MODULE);
+
+ mutex_init(&gameport->drv_mutex);
+ device_initialize(&gameport->dev);
+ dev_set_name(&gameport->dev, "gameport%lu",
+- (unsigned long)atomic_inc_return(&gameport_no) - 1);
++ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
+ gameport->dev.bus = &gameport_bus;
+ gameport->dev.release = gameport_release_port;
+ if (gameport->parent)
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index da38d97..2aa0b79 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
+ */
+ int input_register_device(struct input_dev *dev)
+ {
+- static atomic_t input_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
+ struct input_handler *handler;
+ const char *path;
+ int error;
+@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
+ dev->setkeycode = input_default_setkeycode;
+
+ dev_set_name(&dev->dev, "input%ld",
+- (unsigned long) atomic_inc_return(&input_no) - 1);
++ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
+
+ error = device_add(&dev->dev);
+ if (error)
+diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
+index b8d8611..7a4a04b 100644
+--- a/drivers/input/joystick/sidewinder.c
++++ b/drivers/input/joystick/sidewinder.c
+@@ -30,6 +30,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/init.h>
+ #include <linux/input.h>
+ #include <linux/gameport.h>
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 2189cbf..05ad609 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
+
+ static int xpad_led_probe(struct usb_xpad *xpad)
+ {
+- static atomic_t led_seq = ATOMIC_INIT(0);
++ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
+ long led_no;
+ struct xpad_led *led;
+ struct led_classdev *led_cdev;
+@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
+ if (!led)
+ return -ENOMEM;
+
+- led_no = (long)atomic_inc_return(&led_seq) - 1;
++ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
+
+ snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ led->xpad = xpad;
+diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
+index 9b84b0c..027158f 100644
+--- a/drivers/input/mouse/psmouse.h
++++ b/drivers/input/mouse/psmouse.h
+@@ -110,7 +110,7 @@ struct psmouse_attribute {
+ ssize_t (*set)(struct psmouse *psmouse, void *data,
+ const char *buf, size_t count);
+ bool protect;
+-};
++} __do_const;
+ #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
+
+ ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
+diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
+index 0110b5a..d3ad144 100644
+--- a/drivers/input/mousedev.c
++++ b/drivers/input/mousedev.c
+@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
+
+ spin_unlock_irq(&client->packet_lock);
+
+- if (copy_to_user(buffer, data, count))
++ if (count > sizeof(data) || copy_to_user(buffer, data, count))
+ return -EFAULT;
+
+ return count;
+diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
+index ba70058..571d25d 100644
+--- a/drivers/input/serio/serio.c
++++ b/drivers/input/serio/serio.c
+@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
+ */
+ static void serio_init_port(struct serio *serio)
+ {
+- static atomic_t serio_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
+
+ __module_get(THIS_MODULE);
+
+@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
+ mutex_init(&serio->drv_mutex);
+ device_initialize(&serio->dev);
+ dev_set_name(&serio->dev, "serio%ld",
+- (long)atomic_inc_return(&serio_no) - 1);
++ (long)atomic_inc_return_unchecked(&serio_no) - 1);
+ serio->dev.bus = &serio_bus;
+ serio->dev.release = serio_release_port;
+ serio->dev.groups = serio_device_attr_groups;
+diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
+index 4d4cd14..d6fdd87 100644
+--- a/drivers/input/serio/serio_raw.c
++++ b/drivers/input/serio/serio_raw.c
+@@ -280,7 +280,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
+
+ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
+ {
+- static atomic_t serio_raw_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
+ struct serio_raw *serio_raw;
+ int err;
+
+@@ -291,7 +291,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
+ }
+
+ snprintf(serio_raw->name, sizeof(serio_raw->name),
+- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
++ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
+ kref_init(&serio_raw->kref);
+ INIT_LIST_HEAD(&serio_raw->client_list);
+ init_waitqueue_head(&serio_raw->wait);
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index e44933d..9ba484a 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -83,8 +83,8 @@ struct capiminor {
+
+ struct capi20_appl *ap;
+ u32 ncci;
+- atomic_t datahandle;
+- atomic_t msgid;
++ atomic_unchecked_t datahandle;
++ atomic_unchecked_t msgid;
+
+ struct tty_port port;
+ int ttyinstop;
+@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
+ capimsg_setu16(s, 2, mp->ap->applid);
+ capimsg_setu8 (s, 4, CAPI_DATA_B3);
+ capimsg_setu8 (s, 5, CAPI_RESP);
+- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
++ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
+ capimsg_setu32(s, 8, mp->ncci);
+ capimsg_setu16(s, 12, datahandle);
+ }
+@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
+ mp->outbytes -= len;
+ spin_unlock_bh(&mp->outlock);
+
+- datahandle = atomic_inc_return(&mp->datahandle);
++ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
+ skb_push(skb, CAPI_DATA_B3_REQ_LEN);
+ memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+ capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
+ capimsg_setu16(skb->data, 2, mp->ap->applid);
+ capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
+ capimsg_setu8 (skb->data, 5, CAPI_REQ);
+- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
++ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
+ capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
+ capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
+ capimsg_setu16(skb->data, 16, len); /* Data length */
+diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
+index 2b33b26..a9c638b 100644
+--- a/drivers/isdn/capi/kcapi.c
++++ b/drivers/isdn/capi/kcapi.c
+@@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr)
+
+ static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
+ {
+- if (contr - 1 >= CAPI_MAXCONTR)
++ if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
+ return NULL;
+
+ return capi_controller[contr - 1];
+@@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
+ {
+ lockdep_assert_held(&capi_controller_lock);
+
+- if (applid - 1 >= CAPI_MAXAPPL)
++ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
+ return NULL;
+
+ return capi_applications[applid - 1];
+@@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
+
+ static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
+ {
+- if (applid - 1 >= CAPI_MAXAPPL)
++ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
+ return NULL;
+
+ return rcu_dereference(capi_applications[applid - 1]);
+diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
+index db621db..825ea1a 100644
+--- a/drivers/isdn/gigaset/common.c
++++ b/drivers/isdn/gigaset/common.c
+@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
+ cs->commands_pending = 0;
+ cs->cur_at_seq = 0;
+ cs->gotfwver = -1;
+- cs->open_count = 0;
++ local_set(&cs->open_count, 0);
+ cs->dev = NULL;
+ cs->tty = NULL;
+ cs->tty_dev = NULL;
+diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
+index 212efaf..f187c6b 100644
+--- a/drivers/isdn/gigaset/gigaset.h
++++ b/drivers/isdn/gigaset/gigaset.h
+@@ -35,6 +35,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/list.h>
+ #include <linux/atomic.h>
++#include <asm/local.h>
+
+ #define GIG_VERSION {0, 5, 0, 0}
+ #define GIG_COMPAT {0, 4, 0, 0}
+@@ -433,7 +434,7 @@ struct cardstate {
+ spinlock_t cmdlock;
+ unsigned curlen, cmdbytes;
+
+- unsigned open_count;
++ local_t open_count;
+ struct tty_struct *tty;
+ struct tasklet_struct if_wake_tasklet;
+ unsigned control_state;
+diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
+index ee0a549..a7c9798 100644
+--- a/drivers/isdn/gigaset/interface.c
++++ b/drivers/isdn/gigaset/interface.c
+@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
+ }
+ tty->driver_data = cs;
+
+- ++cs->open_count;
+-
+- if (cs->open_count == 1) {
++ if (local_inc_return(&cs->open_count) == 1) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = tty;
+ spin_unlock_irqrestore(&cs->lock, flags);
+@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+- if (!--cs->open_count) {
++ if (!local_dec_return(&cs->open_count)) {
+ spin_lock_irqsave(&cs->lock, flags);
+ cs->tty = NULL;
+ spin_unlock_irqrestore(&cs->lock, flags);
+@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else {
+ retval = 0;
+@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
+ retval = -ENODEV;
+ goto done;
+ }
+- if (!cs->open_count) {
++ if (!local_read(&cs->open_count)) {
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ retval = -ENODEV;
+ goto done;
+@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
+ if (!cs->connected) {
+ gig_dbg(DEBUG_IF, "not connected");
+ retval = -ENODEV;
+- } else if (!cs->open_count)
++ } else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED) {
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected");
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else if (cs->mstate != MS_LOCKED)
+ dev_warn(cs->dev, "can't write to unlocked device\n");
+@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
+@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
+
+ if (!cs->connected)
+ gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
+- else if (!cs->open_count)
++ else if (!local_read(&cs->open_count))
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ else
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
+@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
+ goto out;
+ }
+
+- if (!cs->open_count) {
++ if (!local_read(&cs->open_count)) {
+ dev_warn(cs->dev, "%s: device not opened\n", __func__);
+ goto out;
+ }
+diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
+index 5e3300d..dc7d752 100644
+--- a/drivers/isdn/gigaset/usb-gigaset.c
++++ b/drivers/isdn/gigaset/usb-gigaset.c
+@@ -546,7 +546,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
+ gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
+ memcpy(cs->hw.usb->bchars, buf, 6);
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
+- 0, 0, &buf, 6, 2000);
++ 0, 0, buf, 6, 2000);
+ }
+
+ static int gigaset_freebcshw(struct bc_state *bcs)
+diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
+index 2a57da59..e7a12ed 100644
+--- a/drivers/isdn/hardware/avm/b1.c
++++ b/drivers/isdn/hardware/avm/b1.c
+@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
+ }
+ if (left) {
+ if (t4file->user) {
+- if (copy_from_user(buf, dp, left))
++ if (left > sizeof buf || copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
+ }
+ if (left) {
+ if (config->user) {
+- if (copy_from_user(buf, dp, left))
++ if (left > sizeof buf || copy_from_user(buf, dp, left))
+ return -EFAULT;
+ } else {
+ memcpy(buf, dp, left);
+diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
+index 6ddb795e..bd2e875 100644
+--- a/drivers/isdn/i4l/isdn_common.c
++++ b/drivers/isdn/i4l/isdn_common.c
+@@ -1656,6 +1656,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
+ } else
+ return -EINVAL;
+ case IIOCDBGVAR:
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
+ if (arg) {
+ if (copy_to_user(argp, &dev, sizeof(ulong)))
+ return -EFAULT;
+diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
+index 2339d73..802ab87a 100644
+--- a/drivers/isdn/i4l/isdn_net.c
++++ b/drivers/isdn/i4l/isdn_net.c
+@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
+ {
+ isdn_net_local *lp = netdev_priv(dev);
+ unsigned char *p;
+- ushort len = 0;
++ int len = 0;
+
+ switch (lp->p_encap) {
+ case ISDN_NET_ENCAP_ETHER:
+diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
+index 1f355bb..43f1fea 100644
+--- a/drivers/isdn/icn/icn.c
++++ b/drivers/isdn/icn/icn.c
+@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
+ if (count > len)
+ count = len;
+ if (user) {
+- if (copy_from_user(msg, buf, count))
++ if (count > sizeof msg || copy_from_user(msg, buf, count))
+ return -EFAULT;
+ } else
+ memcpy(msg, buf, count);
+diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
+index 4d395de..c504763 100644
+--- a/drivers/isdn/mISDN/dsp_cmx.c
++++ b/drivers/isdn/mISDN/dsp_cmx.c
+@@ -1623,7 +1623,7 @@ u32 dsp_spl_jiffies; /* calculate the next time to fire */
+ static u16 dsp_count; /* last sample count */
+ static int dsp_count_valid ; /* if we have last sample count */
+
+-void
++void __intentional_overflow(-1)
+ dsp_cmx_send(void *arg)
+ {
+ struct dsp_conf *conf;
+diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
+index a498135..6a39f48 100644
+--- a/drivers/leds/leds-clevo-mail.c
++++ b/drivers/leds/leds-clevo-mail.c
+@@ -39,7 +39,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
+ * detected as working, but in reality it is not) as low as
+ * possible.
+ */
+-static struct dmi_system_id __initdata mail_led_whitelist[] = {
++static const struct dmi_system_id __initconst mail_led_whitelist[] = {
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo D410J",
+diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
+index b3393a9..33f6979 100644
+--- a/drivers/leds/leds-mc13783.c
++++ b/drivers/leds/leds-mc13783.c
+@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
++ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
+index 614ebeb..ce439fd 100644
+--- a/drivers/leds/leds-ss4200.c
++++ b/drivers/leds/leds-ss4200.c
+@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
+ * detected as working, but in reality it is not) as low as
+ * possible.
+ */
+-static struct dmi_system_id __initdata nas_led_whitelist[] = {
++static const struct dmi_system_id __initconst nas_led_whitelist[] = {
+ {
+ .callback = ss4200_led_dmi_callback,
+ .ident = "Intel SS4200-E",
+diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
+index b5fdcb7..8ed3519 100644
+--- a/drivers/lguest/core.c
++++ b/drivers/lguest/core.c
+@@ -92,9 +92,17 @@ static __init int map_switcher(void)
+ * it's worked so far. The end address needs +1 because __get_vm_area
+ * allocates an extra guard page, so we need space for that.
+ */
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#else
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+ VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
++#endif
++
+ if (!switcher_vma) {
+ err = -ENOMEM;
+ printk("lguest: could not map switcher pages high\n");
+@@ -119,7 +127,7 @@ static __init int map_switcher(void)
+ * Now the Switcher is mapped at the right address, we can't fail!
+ * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
+ */
+- memcpy(switcher_vma->addr, start_switcher_text,
++ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
+ end_switcher_text - start_switcher_text);
+
+ printk(KERN_INFO "lguest: mapped switcher at %p\n",
+diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
+index 3b62be16..e33134a 100644
+--- a/drivers/lguest/page_tables.c
++++ b/drivers/lguest/page_tables.c
+@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
+ /*:*/
+
+ #ifdef CONFIG_X86_PAE
+-static void release_pmd(pmd_t *spmd)
++static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
+ {
+ /* If the entry's not present, there's nothing to release. */
+ if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
+index 65af42f..530c87a 100644
+--- a/drivers/lguest/x86/core.c
++++ b/drivers/lguest/x86/core.c
+@@ -59,7 +59,7 @@ static struct {
+ /* Offset from where switcher.S was compiled to where we've copied it */
+ static unsigned long switcher_offset(void)
+ {
+- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
++ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
+ }
+
+ /* This cpu's struct lguest_pages. */
+@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
+ * These copies are pretty cheap, so we do them unconditionally: */
+ /* Save the current Host top-level page directory.
+ */
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pages->state.host_cr3 = read_cr3();
++#else
+ pages->state.host_cr3 = __pa(current->mm->pgd);
++#endif
++
+ /*
+ * Set up the Guest's page tables to see this CPU's pages (and no
+ * other CPU's pages).
+@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
+ * compiled-in switcher code and the high-mapped copy we just made.
+ */
+ for (i = 0; i < IDT_ENTRIES; i++)
+- default_idt_entries[i] += switcher_offset();
++ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
+
+ /*
+ * Set up the Switcher's per-cpu areas.
+@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
+ * it will be undisturbed when we switch. To change %cs and jump we
+ * need this structure to feed to Intel's "lcall" instruction.
+ */
+- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
++ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
+ lguest_entry.segment = LGUEST_CS;
+
+ /*
+diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
+index 40634b0..4f5855e 100644
+--- a/drivers/lguest/x86/switcher_32.S
++++ b/drivers/lguest/x86/switcher_32.S
+@@ -87,6 +87,7 @@
+ #include <asm/page.h>
+ #include <asm/segment.h>
+ #include <asm/lguest.h>
++#include <asm/processor-flags.h>
+
+ // We mark the start of the code to copy
+ // It's placed in .text tho it's never run here
+@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
+ // Changes type when we load it: damn Intel!
+ // For after we switch over our page tables
+ // That entry will be read-only: we'd crash.
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %edx
++ xor $X86_CR0_WP, %edx
++ mov %edx, %cr0
++#endif
++
+ movl $(GDT_ENTRY_TSS*8), %edx
+ ltr %dx
+
+@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
+ // Let's clear it again for our return.
+ // The GDT descriptor of the Host
+ // Points to the table after two "size" bytes
+- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
++ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
+ // Clear "used" from type field (byte 5, bit 2)
+- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
++ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %eax
++ xor $X86_CR0_WP, %eax
++ mov %eax, %cr0
++#endif
+
+ // Once our page table's switched, the Guest is live!
+ // The Host fades as we run this final step.
+@@ -295,13 +309,12 @@ deliver_to_host:
+ // I consulted gcc, and it gave
+ // These instructions, which I gladly credit:
+ leal (%edx,%ebx,8), %eax
+- movzwl (%eax),%edx
+- movl 4(%eax), %eax
+- xorw %ax, %ax
+- orl %eax, %edx
++ movl 4(%eax), %edx
++ movw (%eax), %dx
+ // Now the address of the handler's in %edx
+ // We call it now: its "iret" drops us home.
+- jmp *%edx
++ ljmp $__KERNEL_CS, $1f
++1: jmp *%edx
+
+ // Every interrupt can come to us here
+ // But we must truly tell each apart.
+diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
+index 4daf9e5..b8d1d0f 100644
+--- a/drivers/macintosh/macio_asic.c
++++ b/drivers/macintosh/macio_asic.c
+@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
+ * MacIO is matched against any Apple ID, it's probe() function
+ * will then decide wether it applies or not
+ */
+-static const struct pci_device_id __devinitdata pci_ids [] = { {
++static const struct pci_device_id __devinitconst pci_ids [] = { {
+ .vendor = PCI_VENDOR_ID_APPLE,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index e6a300c..cc9c96c 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1601,7 +1601,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+ cmd == DM_LIST_VERSIONS_CMD)
+ return 0;
+
+- if ((cmd == DM_DEV_CREATE_CMD)) {
++ if (cmd == DM_DEV_CREATE_CMD) {
+ if (!*param->name) {
+ DMWARN("name not supplied when creating device");
+ return -EINVAL;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 1f23e04..08d9a20 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ {
+ struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+
+- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
++ if (!capable(CAP_SYS_ADMIN))
+ return;
+
+ spin_lock(&receiving_list_lock);
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index b7b649d..fcf5ef7 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -40,7 +40,7 @@ enum dm_raid1_error {
+
+ struct mirror {
+ struct mirror_set *ms;
+- atomic_t error_count;
++ atomic_unchecked_t error_count;
+ unsigned long error_type;
+ struct dm_dev *dev;
+ sector_t offset;
+@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
+ struct mirror *m;
+
+ for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+- if (!atomic_read(&m->error_count))
++ if (!atomic_read_unchecked(&m->error_count))
+ return m;
+
+ return NULL;
+@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
+ * simple way to tell if a device has encountered
+ * errors.
+ */
+- atomic_inc(&m->error_count);
++ atomic_inc_unchecked(&m->error_count);
+
+ if (test_and_set_bit(error_type, &m->error_type))
+ return;
+@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
+ struct mirror *m = get_default_mirror(ms);
+
+ do {
+- if (likely(!atomic_read(&m->error_count)))
++ if (likely(!atomic_read_unchecked(&m->error_count)))
+ return m;
+
+ if (m-- == ms->mirror)
+@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
+ {
+ struct mirror *default_mirror = get_default_mirror(m->ms);
+
+- return !atomic_read(&default_mirror->error_count);
++ return !atomic_read_unchecked(&default_mirror->error_count);
+ }
+
+ static int mirror_available(struct mirror_set *ms, struct bio *bio)
+@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
+ */
+ if (likely(region_in_sync(ms, region, 1)))
+ m = choose_mirror(ms, bio->bi_sector);
+- else if (m && atomic_read(&m->error_count))
++ else if (m && atomic_read_unchecked(&m->error_count))
+ m = NULL;
+
+ if (likely(m))
+@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
+ }
+
+ ms->mirror[mirror].ms = ms;
+- atomic_set(&(ms->mirror[mirror].error_count), 0);
++ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
+ ms->mirror[mirror].error_type = 0;
+ ms->mirror[mirror].offset = offset;
+
+@@ -1348,7 +1348,7 @@ static void mirror_resume(struct dm_target *ti)
+ */
+ static char device_status_char(struct mirror *m)
+ {
+- if (!atomic_read(&(m->error_count)))
++ if (!atomic_read_unchecked(&(m->error_count)))
+ return 'A';
+
+ return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index cbd41d2..1717044 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -20,7 +20,7 @@ struct stripe {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+- atomic_t error_count;
++ atomic_unchecked_t error_count;
+ };
+
+ struct stripe_c {
+@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ kfree(sc);
+ return r;
+ }
+- atomic_set(&(sc->stripe[i].error_count), 0);
++ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
+ }
+
+ ti->private = sc;
+@@ -314,7 +314,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
+ DMEMIT("%d ", sc->stripes);
+ for (i = 0; i < sc->stripes; i++) {
+ DMEMIT("%s ", sc->stripe[i].dev->name);
+- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
++ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
+ 'D' : 'A';
+ }
+ buffer[i] = '\0';
+@@ -360,8 +360,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+ */
+ for (i = 0; i < sc->stripes; i++)
+ if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
+- atomic_inc(&(sc->stripe[i].error_count));
+- if (atomic_read(&(sc->stripe[i].error_count)) <
++ atomic_inc_unchecked(&(sc->stripe[i].error_count));
++ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
+ DM_IO_ERROR_THRESHOLD)
+ schedule_work(&sc->trigger_event);
+ }
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 5c52582..91793db 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -328,7 +328,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
+ static int open_dev(struct dm_dev_internal *d, dev_t dev,
+ struct mapped_device *md)
+ {
+- static char *_claim_ptr = "I belong to device-mapper";
++ static char _claim_ptr[] = "I belong to device-mapper";
+ struct block_device *bdev;
+
+ int r;
+@@ -396,7 +396,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ if (!dev_size)
+ return 0;
+
+- if ((start >= dev_size) || (start + len > dev_size)) {
++ if ((start >= dev_size) || (len > dev_size - start)) {
+ DMWARN("%s: %s too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 237571a..fb6d19b 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
+
+ pmd->info.tm = tm;
+ pmd->info.levels = 2;
+- pmd->info.value_type.context = pmd->data_sm;
++ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+ pmd->info.value_type.size = sizeof(__le64);
+ pmd->info.value_type.inc = data_block_inc;
+ pmd->info.value_type.dec = data_block_dec;
+@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
+
+ pmd->bl_info.tm = tm;
+ pmd->bl_info.levels = 1;
+- pmd->bl_info.value_type.context = pmd->data_sm;
++ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
+ pmd->bl_info.value_type.size = sizeof(__le64);
+ pmd->bl_info.value_type.inc = data_block_inc;
+ pmd->bl_info.value_type.dec = data_block_dec;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 8953630..29b12d9 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -177,9 +177,9 @@ struct mapped_device {
+ /*
+ * Event handling.
+ */
+- atomic_t event_nr;
++ atomic_unchecked_t event_nr;
+ wait_queue_head_t eventq;
+- atomic_t uevent_seq;
++ atomic_unchecked_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+@@ -1871,8 +1871,8 @@ static struct mapped_device *alloc_dev(int minor)
+ rwlock_init(&md->map_lock);
+ atomic_set(&md->holders, 1);
+ atomic_set(&md->open_count, 0);
+- atomic_set(&md->event_nr, 0);
+- atomic_set(&md->uevent_seq, 0);
++ atomic_set_unchecked(&md->event_nr, 0);
++ atomic_set_unchecked(&md->uevent_seq, 0);
+ INIT_LIST_HEAD(&md->uevent_list);
+ spin_lock_init(&md->uevent_lock);
+
+@@ -2006,7 +2006,7 @@ static void event_callback(void *context)
+
+ dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
+
+- atomic_inc(&md->event_nr);
++ atomic_inc_unchecked(&md->event_nr);
+ wake_up(&md->eventq);
+ }
+
+@@ -2648,18 +2648,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+
+ uint32_t dm_next_uevent_seq(struct mapped_device *md)
+ {
+- return atomic_add_return(1, &md->uevent_seq);
++ return atomic_add_return_unchecked(1, &md->uevent_seq);
+ }
+
+ uint32_t dm_get_event_nr(struct mapped_device *md)
+ {
+- return atomic_read(&md->event_nr);
++ return atomic_read_unchecked(&md->event_nr);
+ }
+
+ int dm_wait_event(struct mapped_device *md, int event_nr)
+ {
+ return wait_event_interruptible(md->eventq,
+- (event_nr != atomic_read(&md->event_nr)));
++ (event_nr != atomic_read_unchecked(&md->event_nr)));
+ }
+
+ void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2d0544c..bc3c200 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
+ * start build, activate spare
+ */
+ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+-static atomic_t md_event_count;
++static atomic_unchecked_t md_event_count;
+ void md_new_event(struct mddev *mddev)
+ {
+- atomic_inc(&md_event_count);
++ atomic_inc_unchecked(&md_event_count);
+ wake_up(&md_event_waiters);
+ }
+ EXPORT_SYMBOL_GPL(md_new_event);
+@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
+ */
+ static void md_new_event_inintr(struct mddev *mddev)
+ {
+- atomic_inc(&md_event_count);
++ atomic_inc_unchecked(&md_event_count);
+ wake_up(&md_event_waiters);
+ }
+
+@@ -1534,7 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+
+ rdev->preferred_minor = 0xffff;
+ rdev->data_offset = le64_to_cpu(sb->data_offset);
+- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
++ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+
+ rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
+ bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
+@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
+ else
+ sb->resync_offset = cpu_to_le64(0);
+
+- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
++ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
+
+ sb->raid_disks = cpu_to_le32(mddev->raid_disks);
+ sb->size = cpu_to_le64(mddev->dev_sectors);
+@@ -2649,7 +2649,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
+ static ssize_t
+ errors_show(struct md_rdev *rdev, char *page)
+ {
+- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
++ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
+ }
+
+ static ssize_t
+@@ -2658,7 +2658,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+ if (*buf && (*e == 0 || *e == '\n')) {
+- atomic_set(&rdev->corrected_errors, n);
++ atomic_set_unchecked(&rdev->corrected_errors, n);
+ return len;
+ }
+ return -EINVAL;
+@@ -3052,8 +3052,8 @@ int md_rdev_init(struct md_rdev *rdev)
+ rdev->sb_loaded = 0;
+ rdev->bb_page = NULL;
+ atomic_set(&rdev->nr_pending, 0);
+- atomic_set(&rdev->read_errors, 0);
+- atomic_set(&rdev->corrected_errors, 0);
++ atomic_set_unchecked(&rdev->read_errors, 0);
++ atomic_set_unchecked(&rdev->corrected_errors, 0);
+
+ INIT_LIST_HEAD(&rdev->same_set);
+ init_waitqueue_head(&rdev->blocked_wait);
+@@ -6703,7 +6703,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+
+ spin_unlock(&pers_lock);
+ seq_printf(seq, "\n");
+- seq->poll_event = atomic_read(&md_event_count);
++ seq->poll_event = atomic_read_unchecked(&md_event_count);
+ return 0;
+ }
+ if (v == (void*)2) {
+@@ -6792,7 +6792,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+ chunk_kb ? "KB" : "B");
+ if (bitmap->file) {
+ seq_printf(seq, ", file: ");
+- seq_path(seq, &bitmap->file->f_path, " \t\n");
++ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
+ }
+
+ seq_printf(seq, "\n");
+@@ -6823,7 +6823,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
+ return error;
+
+ seq = file->private_data;
+- seq->poll_event = atomic_read(&md_event_count);
++ seq->poll_event = atomic_read_unchecked(&md_event_count);
+ return error;
+ }
+
+@@ -6837,7 +6837,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+ /* always allow read */
+ mask = POLLIN | POLLRDNORM;
+
+- if (seq->poll_event != atomic_read(&md_event_count))
++ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
+ mask |= POLLERR | POLLPRI;
+ return mask;
+ }
+@@ -6881,7 +6881,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
+ struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&disk->sync_io);
++ atomic_read_unchecked(&disk->sync_io);
+ /* sync IO will cause sync_io to increase before the disk_stats
+ * as sync_io is counted when a request starts, and
+ * disk_stats is counted when it completes.
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index cf742d9..7c7c745 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -120,13 +120,13 @@ struct md_rdev {
+ * only maintained for arrays that
+ * support hot removal
+ */
+- atomic_t read_errors; /* number of consecutive read errors that
++ atomic_unchecked_t read_errors; /* number of consecutive read errors that
+ * we have tried to ignore.
+ */
+ struct timespec last_read_error; /* monotonic time since our
+ * last read error
+ */
+- atomic_t corrected_errors; /* number of corrected read errors,
++ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
+ * for reporting to userspace and storing
+ * in superblock.
+ */
+@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
+
+ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
+ {
+- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ }
+
+ struct md_personality
+diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
+index fc90c11..c8cd9a9 100644
+--- a/drivers/md/persistent-data/dm-space-map-checker.c
++++ b/drivers/md/persistent-data/dm-space-map-checker.c
+@@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
+ /*----------------------------------------------------------------*/
+
+ struct sm_checker {
+- struct dm_space_map sm;
++ dm_space_map_no_const sm;
+
+ struct count_array old_counts;
+ struct count_array counts;
+diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
+index 1cbfc6b..56e1dbb 100644
+--- a/drivers/md/persistent-data/dm-space-map.h
++++ b/drivers/md/persistent-data/dm-space-map.h
+@@ -60,6 +60,7 @@ struct dm_space_map {
+ int (*root_size)(struct dm_space_map *sm, size_t *result);
+ int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+ };
++typedef struct dm_space_map __no_const dm_space_map_no_const;
+
+ /*----------------------------------------------------------------*/
+
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index c706a7b..2cc7511 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1591,7 +1591,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+ if (r1_sync_page_io(rdev, sect, s,
+ bio->bi_io_vec[idx].bv_page,
+ READ) != 0)
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ }
+ sectors -= s;
+ sect += s;
+@@ -1810,7 +1810,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ test_bit(In_sync, &rdev->flags)) {
+ if (r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, READ)) {
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ printk(KERN_INFO
+ "md/raid1:%s: read error corrected "
+ "(%d sectors at %llu on %s)\n",
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 8bba438..f065cc3 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1465,7 +1465,7 @@ static void end_sync_read(struct bio *bio, int error)
+ /* The write handler will notice the lack of
+ * R10BIO_Uptodate and record any errors etc
+ */
+- atomic_add(r10_bio->sectors,
++ atomic_add_unchecked(r10_bio->sectors,
+ &conf->mirrors[d].rdev->corrected_errors);
+
+ /* for reconstruct, we always reschedule after a read.
+@@ -1765,7 +1765,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ struct timespec cur_time_mon;
+ unsigned long hours_since_last;
+- unsigned int read_errors = atomic_read(&rdev->read_errors);
++ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
+
+ ktime_get_ts(&cur_time_mon);
+
+@@ -1787,9 +1787,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+ * overflowing the shift of read_errors by hours_since_last.
+ */
+ if (hours_since_last >= 8 * sizeof(read_errors))
+- atomic_set(&rdev->read_errors, 0);
++ atomic_set_unchecked(&rdev->read_errors, 0);
+ else
+- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
++ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
+ }
+
+ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
+@@ -1839,8 +1839,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ return;
+
+ check_decay_read_errors(mddev, rdev);
+- atomic_inc(&rdev->read_errors);
+- if (atomic_read(&rdev->read_errors) > max_read_errors) {
++ atomic_inc_unchecked(&rdev->read_errors);
++ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
+ char b[BDEVNAME_SIZE];
+ bdevname(rdev->bdev, b);
+
+@@ -1848,7 +1848,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ "md/raid10:%s: %s: Raid device exceeded "
+ "read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), b,
+- atomic_read(&rdev->read_errors), max_read_errors);
++ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
+ printk(KERN_NOTICE
+ "md/raid10:%s: %s: Failing raid device\n",
+ mdname(mddev), b);
+@@ -1993,7 +1993,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+- atomic_add(s, &rdev->corrected_errors);
++ atomic_add_unchecked(s, &rdev->corrected_errors);
+ }
+
+ rdev_dec_pending(rdev, mddev);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 26ef63a..bd587cd 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1618,19 +1618,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
++ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ }
+- if (atomic_read(&conf->disks[i].rdev->read_errors))
+- atomic_set(&conf->disks[i].rdev->read_errors, 0);
++ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
++ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
+ } else {
+ const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
+ int retry = 0;
+ rdev = conf->disks[i].rdev;
+
+ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+- atomic_inc(&rdev->read_errors);
++ atomic_inc_unchecked(&rdev->read_errors);
+ if (conf->mddev->degraded >= conf->max_degraded)
+ printk_ratelimited(
+ KERN_WARNING
+@@ -1650,7 +1650,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
+- else if (atomic_read(&rdev->read_errors)
++ else if (atomic_read_unchecked(&rdev->read_errors)
+ > conf->max_nr_stripes)
+ printk(KERN_WARNING
+ "md/raid:%s: Too many read errors, failing device %s.\n",
+diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
+index ba9a643..e474ab5 100644
+--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
++++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
+@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long)&_driverdata }
+
+-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
++static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
+ DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
+diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
+index d5cda35..017af46 100644
+--- a/drivers/media/dvb/dvb-core/dvbdev.c
++++ b/drivers/media/dvb/dvb-core/dvbdev.c
+@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ const struct dvb_device *template, void *priv, int type)
+ {
+ struct dvb_device *dvbdev;
+- struct file_operations *dvbdevfops;
++ file_operations_no_const *dvbdevfops;
+ struct device *clsdev;
+ int minor;
+ int id;
+diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
+index 9f2a02c..5920f88 100644
+--- a/drivers/media/dvb/dvb-usb/cxusb.c
++++ b/drivers/media/dvb/dvb-usb/cxusb.c
+@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
+ struct dib0700_adapter_state {
+ int (*set_param_save) (struct dvb_frontend *,
+ struct dvb_frontend_parameters *);
+-};
++} __no_const;
+
+ static int dib7070_set_param_override(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *fep)
+diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
+index f103ec1..5e8968b 100644
+--- a/drivers/media/dvb/dvb-usb/dw2102.c
++++ b/drivers/media/dvb/dvb-usb/dw2102.c
+@@ -95,7 +95,7 @@ struct su3000_state {
+
+ struct s6x0_state {
+ int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
+-};
++} __no_const;
+
+ /* debug */
+ static int dvb_usb_dw2102_debug;
+diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
+index 404f63a..4796533 100644
+--- a/drivers/media/dvb/frontends/dib3000.h
++++ b/drivers/media/dvb/frontends/dib3000.h
+@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
+ int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
+ int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
+ int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
+-};
++} __no_const;
+
+ #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
+ extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
+diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
+index 90bf573..e8463da 100644
+--- a/drivers/media/dvb/frontends/ds3000.c
++++ b/drivers/media/dvb/frontends/ds3000.c
+@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
+
+ for (i = 0; i < 30 ; i++) {
+ ds3000_read_status(fe, &status);
+- if (status && FE_HAS_LOCK)
++ if (status & FE_HAS_LOCK)
+ break;
+
+ msleep(10);
+diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
+index 0564192..75b16f5 100644
+--- a/drivers/media/dvb/ngene/ngene-cards.c
++++ b/drivers/media/dvb/ngene/ngene-cards.c
+@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
+
+ /****************************************************************************/
+
+-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
++static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
+ NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
+diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
+index 16a089f..1661b11 100644
+--- a/drivers/media/radio/radio-cadet.c
++++ b/drivers/media/radio/radio-cadet.c
+@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
+ unsigned char readbuf[RDS_BUFFER];
+ int i = 0;
+
++ if (count > RDS_BUFFER)
++ return -EFAULT;
+ mutex_lock(&dev->lock);
+ if (dev->rdsstat == 0) {
+ dev->rdsstat = 1;
+@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
+ readbuf[i++] = dev->rdsbuf[dev->rdsout++];
+ mutex_unlock(&dev->lock);
+
+- if (copy_to_user(data, readbuf, i))
++ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
+ return -EFAULT;
+ return i;
+ }
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index a47ba33..deafb02 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
+
+ int rc_register_device(struct rc_dev *dev)
+ {
+- static atomic_t devno = ATOMIC_INIT(0);
++ static atomic_unchecked_t devno = ATOMIC_INIT(0);
+ struct rc_map *rc_map;
+ const char *path;
+ int rc;
+@@ -1063,7 +1063,7 @@ int rc_register_device(struct rc_dev *dev)
+ */
+ mutex_lock(&dev->lock);
+
+- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
++ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
+ dev_set_name(&dev->dev, "rc%ld", dev->devno);
+ dev_set_drvdata(&dev->dev, dev);
+ rc = device_add(&dev->dev);
+diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
+index 61287fc..8b08712 100644
+--- a/drivers/media/rc/redrat3.c
++++ b/drivers/media/rc/redrat3.c
+@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+ return carrier;
+ }
+
+-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
++static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
+ {
+ struct redrat3_dev *rr3 = rcdev->priv;
+ struct device *dev = rr3->dev;
+diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
+index 68d1240..46b32eb 100644
+--- a/drivers/media/video/cx88/cx88-alsa.c
++++ b/drivers/media/video/cx88/cx88-alsa.c
+@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
+ * Only boards with eeprom and byte 1 at eeprom=1 have it
+ */
+
+-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
++static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
+ {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0, }
+diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
+index 921c56d..7e6c4b2 100644
+--- a/drivers/media/video/cx88/cx88-video.c
++++ b/drivers/media/video/cx88/cx88-video.c
+@@ -49,9 +49,9 @@ MODULE_VERSION(CX88_VERSION);
+
+ /* ------------------------------------------------------------------ */
+
+-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
++static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+
+ module_param_array(video_nr, int, NULL, 0444);
+ module_param_array(vbi_nr, int, NULL, 0444);
+diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
+index 41108a9a..8ad2437 100644
+--- a/drivers/media/video/ivtv/ivtv-driver.c
++++ b/drivers/media/video/ivtv/ivtv-driver.c
+@@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
+ MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
+
+ /* ivtv instance counter */
+-static atomic_t ivtv_instance = ATOMIC_INIT(0);
++static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
+
+ /* Parameter declarations */
+ static int cardtype[IVTV_MAX_CARDS];
+diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
+index d345215..b607565 100644
+--- a/drivers/media/video/omap/omap_vout.c
++++ b/drivers/media/video/omap/omap_vout.c
+@@ -64,7 +64,6 @@ enum omap_vout_channels {
+ OMAP_VIDEO2,
+ };
+
+-static struct videobuf_queue_ops video_vbq_ops;
+ /* Variables configurable through module params*/
+ static u32 video1_numbuffers = 3;
+ static u32 video2_numbuffers = 3;
+@@ -1001,6 +1000,12 @@ static int omap_vout_open(struct file *file)
+ {
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
++ static struct videobuf_queue_ops video_vbq_ops = {
++ .buf_setup = omap_vout_buffer_setup,
++ .buf_prepare = omap_vout_buffer_prepare,
++ .buf_release = omap_vout_buffer_release,
++ .buf_queue = omap_vout_buffer_queue,
++ };
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+@@ -1018,10 +1023,6 @@ static int omap_vout_open(struct file *file)
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+- video_vbq_ops.buf_release = omap_vout_buffer_release;
+- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
+diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
+index a0895bf..b451f5b 100644
+--- a/drivers/media/video/timblogiw.c
++++ b/drivers/media/video/timblogiw.c
+@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
+
+ /* Platform device functions */
+
+-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
++static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
+ .vidioc_querycap = timblogiw_querycap,
+ .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
+ .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
+@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
+ .vidioc_enum_framesizes = timblogiw_enum_framesizes,
+ };
+
+-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
++static struct v4l2_file_operations timblogiw_fops = {
+ .owner = THIS_MODULE,
+ .open = timblogiw_open,
+ .release = timblogiw_close,
+diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
+index c68531b..82a9ea0 100644
+--- a/drivers/media/video/v4l2-compat-ioctl32.c
++++ b/drivers/media/video/v4l2-compat-ioctl32.c
+@@ -332,7 +332,7 @@ struct v4l2_buffer32 {
+ __u32 reserved;
+ };
+
+-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
++static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+ enum v4l2_memory memory)
+ {
+ void __user *up_pln;
+@@ -358,7 +358,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+ return 0;
+ }
+
+-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
++static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+ enum v4l2_memory memory)
+ {
+ if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
+diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
+index 8b0777f..e29f31e 100644
+--- a/drivers/media/video/v4l2-device.c
++++ b/drivers/media/video/v4l2-device.c
+@@ -74,9 +74,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
+ EXPORT_SYMBOL_GPL(v4l2_device_put);
+
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+- atomic_t *instance)
++ atomic_unchecked_t *instance)
+ {
+- int num = atomic_inc_return(instance) - 1;
++ int num = atomic_inc_return_unchecked(instance) - 1;
+ int len = strlen(basename);
+
+ if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
+diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
+index 639abee..e2336f4 100644
+--- a/drivers/media/video/v4l2-ioctl.c
++++ b/drivers/media/video/v4l2-ioctl.c
+@@ -2197,7 +2197,7 @@ static unsigned long cmd_input_size(unsigned int cmd)
+ }
+
+ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+- void * __user *user_ptr, void ***kernel_ptr)
++ void __user **user_ptr, void ***kernel_ptr)
+ {
+ int ret = 0;
+
+@@ -2212,7 +2212,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ ret = -EINVAL;
+ break;
+ }
+- *user_ptr = (void __user *)buf->m.planes;
++ *user_ptr = (void __force_user *)buf->m.planes;
+ *kernel_ptr = (void *)&buf->m.planes;
+ *array_size = sizeof(struct v4l2_plane) * buf->length;
+ ret = 1;
+@@ -2230,7 +2230,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ ret = -EINVAL;
+ break;
+ }
+- *user_ptr = (void __user *)ctrls->controls;
++ *user_ptr = (void __force_user *)ctrls->controls;
+ *kernel_ptr = (void *)&ctrls->controls;
+ *array_size = sizeof(struct v4l2_ext_control)
+ * ctrls->count;
+diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
+index 668f5c6..65df5f2 100644
+--- a/drivers/memstick/host/r592.c
++++ b/drivers/memstick/host/r592.c
+@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
+ /* Executes one TPC (data is read/written from small or large fifo) */
+ static void r592_execute_tpc(struct r592_device *dev)
+ {
+- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
++ bool is_write;
+ int len, error;
+ u32 status, reg;
+
+@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
+ return;
+ }
+
++ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+ len = dev->req->long_data ?
+ dev->req->sg.length : dev->req->data_len;
+
+diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
+index e9c6a60..a1d04d6 100644
+--- a/drivers/message/fusion/mptbase.c
++++ b/drivers/message/fusion/mptbase.c
+@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+ seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+ seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
++#else
+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+ (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
++#endif
++
+ /*
+ * Rounding UP to nearest 4-kB boundary here...
+ */
+@@ -6767,7 +6772,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+ ioc->facts.GlobalCredits);
+
+ seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL, NULL);
++#else
+ (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
++#endif
+ sz = (ioc->reply_sz * ioc->reply_depth) + 128;
+ seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
+ ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index 9d95042..b808101 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
+ return 0;
+ }
+
++static inline void
++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
++{
++ if (phy_info->port_details) {
++ phy_info->port_details->rphy = rphy;
++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
++ ioc->name, rphy));
++ }
++
++ if (rphy) {
++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
++ ioc->name, rphy, rphy->dev.release));
++ }
++}
++
+ /* no mutex */
+ static void
+ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
+@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
+ return NULL;
+ }
+
+-static inline void
+-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+-{
+- if (phy_info->port_details) {
+- phy_info->port_details->rphy = rphy;
+- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+- ioc->name, rphy));
+- }
+-
+- if (rphy) {
+- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+- ioc->name, rphy, rphy->dev.release));
+- }
+-}
+-
+ static inline struct sas_port *
+ mptsas_get_port(struct mptsas_phyinfo *phy_info)
+ {
+diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
+index 0c3ced7..1fe34ec 100644
+--- a/drivers/message/fusion/mptscsih.c
++++ b/drivers/message/fusion/mptscsih.c
+@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
+
+ h = shost_priv(SChost);
+
+- if (h) {
+- if (h->info_kbuf == NULL)
+- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+- return h->info_kbuf;
+- h->info_kbuf[0] = '\0';
++ if (!h)
++ return NULL;
+
+- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+- h->info_kbuf[size-1] = '\0';
+- }
++ if (h->info_kbuf == NULL)
++ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
++ return h->info_kbuf;
++ h->info_kbuf[0] = '\0';
++
++ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
++ h->info_kbuf[size-1] = '\0';
+
+ return h->info_kbuf;
+ }
+diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
+index 07dbeaf..59a658c 100644
+--- a/drivers/message/i2o/i2o_proc.c
++++ b/drivers/message/i2o/i2o_proc.c
+@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
+ "Array Controller Device"
+ };
+
+-static char *chtostr(u8 * chars, int n)
+-{
+- char tmp[256];
+- tmp[0] = 0;
+- return strncat(tmp, (char *)chars, n);
+-}
+-
+ static int i2o_report_query_status(struct seq_file *seq, int block_status,
+ char *group)
+ {
+@@ -721,9 +714,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
+ static int i2o_seq_show_hw(struct seq_file *seq, void *v)
+ {
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+- static u32 work32[5];
+- static u8 *work8 = (u8 *) work32;
+- static u16 *work16 = (u16 *) work32;
++ u32 work32[5];
++ u8 *work8 = (u8 *) work32;
++ u16 *work16 = (u16 *) work32;
+ int token;
+ u32 hwcap;
+
+@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
+
+ seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+ seq_printf(seq, "%-#8x", ddm_table.module_id);
+- seq_printf(seq, "%-29s",
+- chtostr(ddm_table.module_name_version, 28));
++ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
+ seq_printf(seq, "%9d ", ddm_table.data_size);
+ seq_printf(seq, "%8d", ddm_table.code_size);
+
+@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
+
+ seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+ seq_printf(seq, "%-#8x", dst->module_id);
+- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
+- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
++ seq_printf(seq, "%-.28s", dst->module_name_version);
++ seq_printf(seq, "%-.8s", dst->date);
+ seq_printf(seq, "%8d ", dst->module_size);
+ seq_printf(seq, "%8d ", dst->mpb_size);
+ seq_printf(seq, "0x%04x", dst->module_flags);
+@@ -1257,9 +1249,9 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
+ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
+ {
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
++ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
+ // == (allow) 512d bytes (max)
+- static u16 *work16 = (u16 *) work32;
++ u16 *work16 = (u16 *) work32;
+ int token;
+
+ token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
+@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
+ seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
+ seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
+ seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
+- seq_printf(seq, "Vendor info : %s\n",
+- chtostr((u8 *) (work32 + 2), 16));
+- seq_printf(seq, "Product info : %s\n",
+- chtostr((u8 *) (work32 + 6), 16));
+- seq_printf(seq, "Description : %s\n",
+- chtostr((u8 *) (work32 + 10), 16));
+- seq_printf(seq, "Product rev. : %s\n",
+- chtostr((u8 *) (work32 + 14), 8));
++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, (u8 *) (work32 + 16),
+@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
+ }
+
+ seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+- seq_printf(seq, "Module name : %s\n",
+- chtostr(result.module_name, 24));
+- seq_printf(seq, "Module revision : %s\n",
+- chtostr(result.module_rev, 8));
++ seq_printf(seq, "Module name : %.24s\n", result.module_name);
++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
+ return 0;
+ }
+
+- seq_printf(seq, "Device name : %s\n",
+- chtostr(result.device_name, 64));
+- seq_printf(seq, "Service name : %s\n",
+- chtostr(result.service_name, 64));
+- seq_printf(seq, "Physical name : %s\n",
+- chtostr(result.physical_location, 64));
+- seq_printf(seq, "Instance number : %s\n",
+- chtostr(result.instance_number, 4));
++ seq_printf(seq, "Device name : %.64s\n", result.device_name);
++ seq_printf(seq, "Service name : %.64s\n", result.service_name);
++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
+
+ return 0;
+ }
+@@ -1374,9 +1356,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
+ static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
+ {
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+- static u32 work32[12];
+- static u16 *work16 = (u16 *) work32;
+- static u8 *work8 = (u8 *) work32;
++ u32 work32[12];
++ u16 *work16 = (u16 *) work32;
++ u8 *work8 = (u8 *) work32;
+ int token;
+
+ token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
+diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
+index a8c08f3..155fe3d 100644
+--- a/drivers/message/i2o/iop.c
++++ b/drivers/message/i2o/iop.c
+@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+
+- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
+- atomic_inc(&c->context_list_counter);
++ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
++ atomic_inc_unchecked(&c->context_list_counter);
+
+- entry->context = atomic_read(&c->context_list_counter);
++ entry->context = atomic_read_unchecked(&c->context_list_counter);
+
+ list_add(&entry->list, &c->context_list);
+
+@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
+
+ #if BITS_PER_LONG == 64
+ spin_lock_init(&c->context_list_lock);
+- atomic_set(&c->context_list_counter, 0);
++ atomic_set_unchecked(&c->context_list_counter, 0);
+ INIT_LIST_HEAD(&c->context_list);
+ #endif
+
+diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
+index 4eec7b7..f468a4e 100644
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -937,9 +937,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
+
+ err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
+ IRQF_ONESHOT, "ab3100-core", ab3100);
+- /* This real unpredictable IRQ is of course sampled for entropy */
+- rand_initialize_irq(client->irq);
+-
+ if (err)
+ goto exit_no_irq;
+
+diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
+index 5c2a06a..8fa077c 100644
+--- a/drivers/mfd/janz-cmodio.c
++++ b/drivers/mfd/janz-cmodio.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/pci.h>
+ #include <linux/interrupt.h>
+diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
+index 0219115..0743393 100644
+--- a/drivers/mfd/max8925-i2c.c
++++ b/drivers/mfd/max8925-i2c.c
+@@ -139,7 +139,7 @@ static int __devinit max8925_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+ struct max8925_platform_data *pdata = client->dev.platform_data;
+- static struct max8925_chip *chip;
++ struct max8925_chip *chip;
+
+ if (!pdata) {
+ pr_info("%s: platform data is missing\n", __func__);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 6dad2ef..ef80da6 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -167,7 +167,7 @@ int mfd_add_devices(struct device *parent, int id,
+ atomic_t *cnts;
+
+ /* initialize reference counting for all cells */
+- cnts = kcalloc(sizeof(*cnts), n_devs, GFP_KERNEL);
++ cnts = kcalloc(n_devs, sizeof(*cnts), GFP_KERNEL);
+ if (!cnts)
+ return -ENOMEM;
+
+diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
+index 29f11e0..89f0c3d 100644
+--- a/drivers/mfd/twl4030-irq.c
++++ b/drivers/mfd/twl4030-irq.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+
+ #include <linux/i2c/twl.h>
++#include <asm/pgtable.h>
+
+ #include "twl-core.h"
+
+@@ -713,10 +714,12 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+ /* install an irq handler for each of the SIH modules;
+ * clone dummy irq_chip since PIH can't *do* anything
+ */
+- twl4030_irq_chip = dummy_irq_chip;
+- twl4030_irq_chip.name = "twl4030";
++ pax_open_kernel();
++ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
++ *(const char **)&twl4030_irq_chip.name = "twl4030";
+
+- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
++ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
++ pax_close_kernel();
+
+ for (i = irq_base; i < irq_end; i++) {
+ irq_set_chip_and_handler(i, &twl4030_irq_chip,
+diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
+index 83f4988..80f7a01 100644
+--- a/drivers/mfd/twl6030-irq.c
++++ b/drivers/mfd/twl6030-irq.c
+@@ -376,10 +376,12 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+ /* install an irq handler for each of the modules;
+ * clone dummy irq_chip since PIH can't *do* anything
+ */
+- twl6030_irq_chip = dummy_irq_chip;
+- twl6030_irq_chip.name = "twl6030";
+- twl6030_irq_chip.irq_set_type = NULL;
+- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
++ pax_open_kernel();
++ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
++ *(const char **)&twl6030_irq_chip.name = "twl6030";
++ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
++ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
++ pax_close_kernel();
+
+ for (i = irq_base; i < irq_end; i++) {
+ irq_set_chip_and_handler(i, &twl6030_irq_chip,
+diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
+index 19fc7c1..09a4d26 100644
+--- a/drivers/misc/c2port/core.c
++++ b/drivers/misc/c2port/core.c
+@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
+ mutex_init(&c2dev->mutex);
+
+ /* Create binary file */
+- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
++ pax_open_kernel();
++ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
++ pax_close_kernel();
+ ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
+ if (unlikely(ret))
+ goto error_device_create_bin_file;
+diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
+index 8994772..f8453cc 100644
+--- a/drivers/misc/ibmasm/ibmasmfs.c
++++ b/drivers/misc/ibmasm/ibmasmfs.c
+@@ -110,6 +110,7 @@ static struct file_system_type ibmasmfs_type = {
+ .mount = ibmasmfs_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("ibmasmfs");
+
+ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
+ {
+diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
+index 3aa9a96..59cf685 100644
+--- a/drivers/misc/kgdbts.c
++++ b/drivers/misc/kgdbts.c
+@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
+ char before[BREAK_INSTR_SIZE];
+ char after[BREAK_INSTR_SIZE];
+
+- probe_kernel_read(before, (char *)kgdbts_break_test,
++ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
+ BREAK_INSTR_SIZE);
+ init_simple_test();
+ ts.tst = plant_and_detach_test;
+@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
+ /* Activate test with initial breakpoint */
+ if (!is_early)
+ kgdb_breakpoint();
+- probe_kernel_read(after, (char *)kgdbts_break_test,
++ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
+ BREAK_INSTR_SIZE);
+ if (memcmp(before, after, BREAK_INSTR_SIZE)) {
+ printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index 29d12a7..f900ba4 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
+ * the lid is closed. This leads to interrupts as soon as a little move
+ * is done.
+ */
+- atomic_inc(&lis3->count);
++ atomic_inc_unchecked(&lis3->count);
+
+ wake_up_interruptible(&lis3->misc_wait);
+ kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
+@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+ if (lis3->pm_dev)
+ pm_runtime_get_sync(lis3->pm_dev);
+
+- atomic_set(&lis3->count, 0);
++ atomic_set_unchecked(&lis3->count, 0);
+ return 0;
+ }
+
+@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
+ add_wait_queue(&lis3->misc_wait, &wait);
+ while (true) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- data = atomic_xchg(&lis3->count, 0);
++ data = atomic_xchg_unchecked(&lis3->count, 0);
+ if (data)
+ break;
+
+@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+ struct lis3lv02d, miscdev);
+
+ poll_wait(file, &lis3->misc_wait, wait);
+- if (atomic_read(&lis3->count))
++ if (atomic_read_unchecked(&lis3->count))
+ return POLLIN | POLLRDNORM;
+ return 0;
+ }
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
+index 2b1482a..5d33616 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.h
++++ b/drivers/misc/lis3lv02d/lis3lv02d.h
+@@ -266,7 +266,7 @@ struct lis3lv02d {
+ struct input_polled_dev *idev; /* input device */
+ struct platform_device *pdev; /* platform device */
+ struct regulator_bulk_data regulators[2];
+- atomic_t count; /* interrupt count after last read */
++ atomic_unchecked_t count; /* interrupt count after last read */
+ union axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
+
+diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
+index 150cd70..1d5d99b 100644
+--- a/drivers/misc/lkdtm.c
++++ b/drivers/misc/lkdtm.c
+@@ -473,6 +473,8 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
+
+ n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(cp_type); i++)
+diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
+index 2f30bad..c4c13d0 100644
+--- a/drivers/misc/sgi-gru/gruhandles.c
++++ b/drivers/misc/sgi-gru/gruhandles.c
+@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
+- atomic_long_inc(&mcs_op_statistics[op].count);
+- atomic_long_add(nsec, &mcs_op_statistics[op].total);
++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
+ }
+diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
+index 7768b87..f8aac38 100644
+--- a/drivers/misc/sgi-gru/gruprocfs.c
++++ b/drivers/misc/sgi-gru/gruprocfs.c
+@@ -32,9 +32,9 @@
+
+ #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
+
+-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
+ {
+- unsigned long val = atomic_long_read(v);
++ unsigned long val = atomic_long_read_unchecked(v);
+
+ seq_printf(s, "%16lu %s\n", val, id);
+ }
+@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
+
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ for (op = 0; op < mcsop_last; op++) {
+- count = atomic_long_read(&mcs_op_statistics[op].count);
+- total = atomic_long_read(&mcs_op_statistics[op].total);
++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
+ max = mcs_op_statistics[op].max;
+ seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
+ count ? total / count : 0, max);
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
+index 5c3ce24..4915ccb 100644
+--- a/drivers/misc/sgi-gru/grutables.h
++++ b/drivers/misc/sgi-gru/grutables.h
+@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
+ * GRU statistics.
+ */
+ struct gru_stats_s {
+- atomic_long_t vdata_alloc;
+- atomic_long_t vdata_free;
+- atomic_long_t gts_alloc;
+- atomic_long_t gts_free;
+- atomic_long_t gms_alloc;
+- atomic_long_t gms_free;
+- atomic_long_t gts_double_allocate;
+- atomic_long_t assign_context;
+- atomic_long_t assign_context_failed;
+- atomic_long_t free_context;
+- atomic_long_t load_user_context;
+- atomic_long_t load_kernel_context;
+- atomic_long_t lock_kernel_context;
+- atomic_long_t unlock_kernel_context;
+- atomic_long_t steal_user_context;
+- atomic_long_t steal_kernel_context;
+- atomic_long_t steal_context_failed;
+- atomic_long_t nopfn;
+- atomic_long_t asid_new;
+- atomic_long_t asid_next;
+- atomic_long_t asid_wrap;
+- atomic_long_t asid_reuse;
+- atomic_long_t intr;
+- atomic_long_t intr_cbr;
+- atomic_long_t intr_tfh;
+- atomic_long_t intr_spurious;
+- atomic_long_t intr_mm_lock_failed;
+- atomic_long_t call_os;
+- atomic_long_t call_os_wait_queue;
+- atomic_long_t user_flush_tlb;
+- atomic_long_t user_unload_context;
+- atomic_long_t user_exception;
+- atomic_long_t set_context_option;
+- atomic_long_t check_context_retarget_intr;
+- atomic_long_t check_context_unload;
+- atomic_long_t tlb_dropin;
+- atomic_long_t tlb_preload_page;
+- atomic_long_t tlb_dropin_fail_no_asid;
+- atomic_long_t tlb_dropin_fail_upm;
+- atomic_long_t tlb_dropin_fail_invalid;
+- atomic_long_t tlb_dropin_fail_range_active;
+- atomic_long_t tlb_dropin_fail_idle;
+- atomic_long_t tlb_dropin_fail_fmm;
+- atomic_long_t tlb_dropin_fail_no_exception;
+- atomic_long_t tfh_stale_on_fault;
+- atomic_long_t mmu_invalidate_range;
+- atomic_long_t mmu_invalidate_page;
+- atomic_long_t flush_tlb;
+- atomic_long_t flush_tlb_gru;
+- atomic_long_t flush_tlb_gru_tgh;
+- atomic_long_t flush_tlb_gru_zero_asid;
++ atomic_long_unchecked_t vdata_alloc;
++ atomic_long_unchecked_t vdata_free;
++ atomic_long_unchecked_t gts_alloc;
++ atomic_long_unchecked_t gts_free;
++ atomic_long_unchecked_t gms_alloc;
++ atomic_long_unchecked_t gms_free;
++ atomic_long_unchecked_t gts_double_allocate;
++ atomic_long_unchecked_t assign_context;
++ atomic_long_unchecked_t assign_context_failed;
++ atomic_long_unchecked_t free_context;
++ atomic_long_unchecked_t load_user_context;
++ atomic_long_unchecked_t load_kernel_context;
++ atomic_long_unchecked_t lock_kernel_context;
++ atomic_long_unchecked_t unlock_kernel_context;
++ atomic_long_unchecked_t steal_user_context;
++ atomic_long_unchecked_t steal_kernel_context;
++ atomic_long_unchecked_t steal_context_failed;
++ atomic_long_unchecked_t nopfn;
++ atomic_long_unchecked_t asid_new;
++ atomic_long_unchecked_t asid_next;
++ atomic_long_unchecked_t asid_wrap;
++ atomic_long_unchecked_t asid_reuse;
++ atomic_long_unchecked_t intr;
++ atomic_long_unchecked_t intr_cbr;
++ atomic_long_unchecked_t intr_tfh;
++ atomic_long_unchecked_t intr_spurious;
++ atomic_long_unchecked_t intr_mm_lock_failed;
++ atomic_long_unchecked_t call_os;
++ atomic_long_unchecked_t call_os_wait_queue;
++ atomic_long_unchecked_t user_flush_tlb;
++ atomic_long_unchecked_t user_unload_context;
++ atomic_long_unchecked_t user_exception;
++ atomic_long_unchecked_t set_context_option;
++ atomic_long_unchecked_t check_context_retarget_intr;
++ atomic_long_unchecked_t check_context_unload;
++ atomic_long_unchecked_t tlb_dropin;
++ atomic_long_unchecked_t tlb_preload_page;
++ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
++ atomic_long_unchecked_t tlb_dropin_fail_upm;
++ atomic_long_unchecked_t tlb_dropin_fail_invalid;
++ atomic_long_unchecked_t tlb_dropin_fail_range_active;
++ atomic_long_unchecked_t tlb_dropin_fail_idle;
++ atomic_long_unchecked_t tlb_dropin_fail_fmm;
++ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
++ atomic_long_unchecked_t tfh_stale_on_fault;
++ atomic_long_unchecked_t mmu_invalidate_range;
++ atomic_long_unchecked_t mmu_invalidate_page;
++ atomic_long_unchecked_t flush_tlb;
++ atomic_long_unchecked_t flush_tlb_gru;
++ atomic_long_unchecked_t flush_tlb_gru_tgh;
++ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
+
+- atomic_long_t copy_gpa;
+- atomic_long_t read_gpa;
++ atomic_long_unchecked_t copy_gpa;
++ atomic_long_unchecked_t read_gpa;
+
+- atomic_long_t mesq_receive;
+- atomic_long_t mesq_receive_none;
+- atomic_long_t mesq_send;
+- atomic_long_t mesq_send_failed;
+- atomic_long_t mesq_noop;
+- atomic_long_t mesq_send_unexpected_error;
+- atomic_long_t mesq_send_lb_overflow;
+- atomic_long_t mesq_send_qlimit_reached;
+- atomic_long_t mesq_send_amo_nacked;
+- atomic_long_t mesq_send_put_nacked;
+- atomic_long_t mesq_page_overflow;
+- atomic_long_t mesq_qf_locked;
+- atomic_long_t mesq_qf_noop_not_full;
+- atomic_long_t mesq_qf_switch_head_failed;
+- atomic_long_t mesq_qf_unexpected_error;
+- atomic_long_t mesq_noop_unexpected_error;
+- atomic_long_t mesq_noop_lb_overflow;
+- atomic_long_t mesq_noop_qlimit_reached;
+- atomic_long_t mesq_noop_amo_nacked;
+- atomic_long_t mesq_noop_put_nacked;
+- atomic_long_t mesq_noop_page_overflow;
++ atomic_long_unchecked_t mesq_receive;
++ atomic_long_unchecked_t mesq_receive_none;
++ atomic_long_unchecked_t mesq_send;
++ atomic_long_unchecked_t mesq_send_failed;
++ atomic_long_unchecked_t mesq_noop;
++ atomic_long_unchecked_t mesq_send_unexpected_error;
++ atomic_long_unchecked_t mesq_send_lb_overflow;
++ atomic_long_unchecked_t mesq_send_qlimit_reached;
++ atomic_long_unchecked_t mesq_send_amo_nacked;
++ atomic_long_unchecked_t mesq_send_put_nacked;
++ atomic_long_unchecked_t mesq_page_overflow;
++ atomic_long_unchecked_t mesq_qf_locked;
++ atomic_long_unchecked_t mesq_qf_noop_not_full;
++ atomic_long_unchecked_t mesq_qf_switch_head_failed;
++ atomic_long_unchecked_t mesq_qf_unexpected_error;
++ atomic_long_unchecked_t mesq_noop_unexpected_error;
++ atomic_long_unchecked_t mesq_noop_lb_overflow;
++ atomic_long_unchecked_t mesq_noop_qlimit_reached;
++ atomic_long_unchecked_t mesq_noop_amo_nacked;
++ atomic_long_unchecked_t mesq_noop_put_nacked;
++ atomic_long_unchecked_t mesq_noop_page_overflow;
+
+ };
+
+@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
+ tghop_invalidate, mcsop_last};
+
+ struct mcs_op_statistic {
+- atomic_long_t count;
+- atomic_long_t total;
++ atomic_long_unchecked_t count;
++ atomic_long_unchecked_t total;
+ unsigned long max;
+ };
+
+@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
+
+ #define STAT(id) do { \
+ if (gru_options & OPT_STATS) \
+- atomic_long_inc(&gru_stats.id); \
++ atomic_long_inc_unchecked(&gru_stats.id); \
+ } while (0)
+
+ #ifdef CONFIG_SGI_GRU_DEBUG
+diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
+index 851b2f2..a4ec097 100644
+--- a/drivers/misc/sgi-xp/xp.h
++++ b/drivers/misc/sgi-xp/xp.h
+@@ -289,7 +289,7 @@ struct xpc_interface {
+ xpc_notify_func, void *);
+ void (*received) (short, int, void *);
+ enum xp_retval (*partid_to_nasids) (short, void *);
+-};
++} __no_const;
+
+ extern struct xpc_interface xpc_interface;
+
+diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
+index b94d5f7..7f494c5 100644
+--- a/drivers/misc/sgi-xp/xpc.h
++++ b/drivers/misc/sgi-xp/xpc.h
+@@ -835,6 +835,7 @@ struct xpc_arch_operations {
+ void (*received_payload) (struct xpc_channel *, void *);
+ void (*notify_senders_of_disconnect) (struct xpc_channel *);
+ };
++typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
+
+ /* struct xpc_partition act_state values (for XPC HB) */
+
+@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
+ /* found in xpc_main.c */
+ extern struct device *xpc_part;
+ extern struct device *xpc_chan;
+-extern struct xpc_arch_operations xpc_arch_ops;
++extern xpc_arch_operations_no_const xpc_arch_ops;
+ extern int xpc_disengage_timelimit;
+ extern int xpc_disengage_timedout;
+ extern int xpc_activate_IRQ_rcvd;
+diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
+index d971817..33bdca5 100644
+--- a/drivers/misc/sgi-xp/xpc_main.c
++++ b/drivers/misc/sgi-xp/xpc_main.c
+@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
+ .notifier_call = xpc_system_die,
+ };
+
+-struct xpc_arch_operations xpc_arch_ops;
++xpc_arch_operations_no_const xpc_arch_ops;
+
+ /*
+ * Timer function to enforce the timelimit on the partition disengage.
+@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
+
+ if (((die_args->trapnr == X86_TRAP_MF) ||
+ (die_args->trapnr == X86_TRAP_XF)) &&
+- !user_mode_vm(die_args->regs))
++ !user_mode(die_args->regs))
+ xpc_die_deactivate();
+
+ break;
+diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
+index ba168a7..399925d6 100644
+--- a/drivers/misc/ti-st/st_core.c
++++ b/drivers/misc/ti-st/st_core.c
+@@ -347,6 +347,11 @@ void st_int_recv(void *disc_data,
+ st_gdata->rx_skb = alloc_skb(
+ st_gdata->list[type]->max_frame_size,
+ GFP_ATOMIC);
++ if (st_gdata->rx_skb == NULL) {
++ pr_err("out of memory: dropping\n");
++ goto done;
++ }
++
+ skb_reserve(st_gdata->rx_skb,
+ st_gdata->list[type]->reserve);
+ /* next 2 required for BT only */
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 83b51b5..ec2396c 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -674,7 +674,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
+ .probe = via_probe,
+ };
+
+-static const struct pci_device_id pci_ids[] __devinitdata = {
++static const struct pci_device_id pci_ids[] __devinitconst = {
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+ .device = PCI_DEVICE_ID_RICOH_R5C822,
+diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
+index 179814a..01cb750 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0020.c
++++ b/drivers/mtd/chips/cfi_cmdset_0020.c
+@@ -674,7 +674,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ size_t totlen = 0, thislen;
+ int ret = 0;
+ size_t buflen = 0;
+- static char *buffer;
++ char *buffer;
+
+ if (!ECCBUF_SIZE) {
+ /* We should fall back to a general writev implementation.
+diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
+index e9fad91..0a7a16a 100644
+--- a/drivers/mtd/devices/doc2000.c
++++ b/drivers/mtd/devices/doc2000.c
+@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+
+ /* The ECC will not be calculated correctly if less than 512 is written */
+ /* DBB-
+- if (len != 0x200 && eccbuf)
++ if (len != 0x200)
+ printk(KERN_WARNING
+ "ECC needs a full sector write (adr: %lx size %lx)\n",
+ (long) to, (long) len);
+diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
+index a3f7a27..234016e 100644
+--- a/drivers/mtd/devices/doc2001.c
++++ b/drivers/mtd/devices/doc2001.c
+@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
+ struct Nand *mychip = &this->chips[from >> (this->chipshift)];
+
+ /* Don't allow read past end of device */
+- if (from >= this->totlen)
++ if (from >= this->totlen || !len)
+ return -EINVAL;
+
+ /* Don't allow a single read to cross a 512-byte block boundary */
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 1d90e26..865d439 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -1215,6 +1215,7 @@ static struct file_system_type mtd_inodefs_type = {
+ .mount = mtd_inodefs_mount,
+ .kill_sb = kill_anon_super,
+ };
++MODULE_ALIAS_FS("mtd_inodefs");
+
+ static void mtdchar_notify_add(struct mtd_info *mtd)
+ {
+diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
+index 3984d48..28aa897 100644
+--- a/drivers/mtd/nand/denali.c
++++ b/drivers/mtd/nand/denali.c
+@@ -26,6 +26,7 @@
+ #include <linux/pci.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
+
+ #include "denali.h"
+
+diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
+index ac40925..483b753 100644
+--- a/drivers/mtd/nftlmount.c
++++ b/drivers/mtd/nftlmount.c
+@@ -24,6 +24,7 @@
+ #include <asm/errno.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/nand.h>
+ #include <linux/mtd/nftl.h>
+diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
+index a9ff89ff..461d313 100644
+--- a/drivers/mtd/sm_ftl.c
++++ b/drivers/mtd/sm_ftl.c
+@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ #define SM_CIS_VENDOR_OFFSET 0x59
+ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+ {
+- struct attribute_group *attr_group;
++ attribute_group_no_const *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1bf36ac..55c534e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4803,7 +4803,7 @@ static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
+ return 0;
+ }
+
+-static struct rtnl_link_ops bond_link_ops __read_mostly = {
++static struct rtnl_link_ops bond_link_ops = {
+ .kind = "bond",
+ .priv_size = sizeof(struct bonding),
+ .setup = bond_setup,
+@@ -4928,8 +4928,8 @@ static void __exit bonding_exit(void)
+
+ bond_destroy_debugfs();
+
+- rtnl_link_unregister(&bond_link_ops);
+ unregister_pernet_subsys(&bond_net_ops);
++ rtnl_link_unregister(&bond_link_ops);
+
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ /*
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index cf95bd8d..f61f675 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -1063,7 +1063,7 @@ static ssize_t bonding_store_primary(struct device *d,
+ goto out;
+ }
+
+- sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
++ sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
+
+ /* check to see if we are clearing primary */
+ if (!strlen(ifname) || buf[0] == '\n') {
+@@ -1236,7 +1236,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
+ goto out;
+ }
+
+- sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
++ sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
+
+ /* check to see if we are clearing active */
+ if (!strlen(ifname) || buf[0] == '\n') {
+diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
+index e9f8432..45308e6 100644
+--- a/drivers/net/ethernet/8390/ax88796.c
++++ b/drivers/net/ethernet/8390/ax88796.c
+@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
+ if (ax->plat->reg_offsets)
+ ei_local->reg_offset = ax->plat->reg_offsets;
+ else {
++ resource_size_t _mem_size = mem_size;
++ do_div(_mem_size, 0x18);
+ ei_local->reg_offset = ax->reg_offsets;
+ for (ret = 0; ret < 0x18; ret++)
+- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
++ ax->reg_offsets[ret] = _mem_size * ret;
+ }
+
+ if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
+index 1feae59..c2a61d2 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl2.c
++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
+@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
+ */
+
+ #define ATL2_PARAM(X, desc) \
+- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
++ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+ #else
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+index 283d663..4373534 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+@@ -1240,7 +1240,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+ {
+ /* RX_MODE controlling object */
+- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
++ bnx2x_init_rx_mode_obj(bp);
+
+ /* multicast configuration controlling object */
+ bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+index 1451769..0275580 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+@@ -2290,15 +2290,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
+ return rc;
+ }
+
+-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+- struct bnx2x_rx_mode_obj *o)
++void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
+ {
+ if (CHIP_IS_E1x(bp)) {
+- o->wait_comp = bnx2x_empty_rx_mode_wait;
+- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
++ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
++ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
+ } else {
+- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
+- o->config_rx_mode = bnx2x_set_rx_mode_e2;
++ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
++ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
+ }
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+index 9a517c2..6d245e1 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+@@ -1207,8 +1207,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
+
+ /********************* RX MODE ****************/
+
+-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+- struct bnx2x_rx_mode_obj *o);
++void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
+
+ /**
+ * Send and RX_MODE ramrod according to the provided parameters.
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index da90ba5..dcba1fd 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -134,6 +134,7 @@
+ #define CHIPREV_ID_5750_A0 0x4000
+ #define CHIPREV_ID_5750_A1 0x4001
+ #define CHIPREV_ID_5750_A3 0x4003
++#define CHIPREV_ID_5750_C1 0x4201
+ #define CHIPREV_ID_5750_C2 0x4202
+ #define CHIPREV_ID_5752_A0_HW 0x5000
+ #define CHIPREV_ID_5752_A0 0x6000
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index 4d15c8f..1bc7689 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -3031,7 +3031,9 @@ static void t3_io_resume(struct pci_dev *pdev)
+ CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
++ rtnl_lock();
+ t3_resume_ports(adapter);
++ rtnl_unlock();
+ }
+
+ static struct pci_error_handlers t3_err_handler = {
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+index c5f5479..2e8c260 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
++++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+ */
+ struct l2t_skb_cb {
+ arp_failure_handler_func arp_failure_handler;
+-};
++} __no_const;
+
+ #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+index cfb60e1..f0fe46f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -1537,9 +1537,9 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+- if (skb->tail - skb->transport_header)
++ if (skb_tail_pointer(skb) - skb_transport_header(skb))
+ pci_unmap_single(dui->pdev, *p++,
+- skb->tail - skb->transport_header,
++ skb_tail_pointer(skb) - skb_transport_header(skb),
+ PCI_DMA_TODEVICE);
+
+ si = skb_shinfo(skb);
+@@ -1600,7 +1600,7 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+ flits = skb_transport_offset(skb) / 8;
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+- skb->tail - skb->transport_header,
++ skb_tail_pointer(skb) - skb_transport_header(skb),
+ adap->pdev);
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
+index 871bcaa..4043505 100644
+--- a/drivers/net/ethernet/dec/tulip/de4x5.c
++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
+@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
++ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(&statbuf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+- if (copy_to_user(ioc->data, &statbuf, ioc->len))
++ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
+ break;
+ }
+diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
+index 14d5b61..1398636 100644
+--- a/drivers/net/ethernet/dec/tulip/eeprom.c
++++ b/drivers/net/ethernet/dec/tulip/eeprom.c
+@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
+ {NULL}};
+
+
+-static const char *block_name[] __devinitdata = {
++static const char *block_name[] __devinitconst = {
+ "21140 non-MII",
+ "21140 MII PHY",
+ "21142 Serial PHY",
+diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
+index 4d01219..b58d26d 100644
+--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
++++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
+@@ -236,7 +236,7 @@ struct pci_id_info {
+ int drv_flags; /* Driver use, intended as capability flags. */
+ };
+
+-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
++static const struct pci_id_info pci_id_tbl[] __devinitconst = {
+ { /* Sometime a Level-One switch card. */
+ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ { "Winbond W89c840", CanHaveMII | HasBrokenTx},
+diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
+index dcd7f7a..ecb7fb3 100644
+--- a/drivers/net/ethernet/dlink/sundance.c
++++ b/drivers/net/ethernet/dlink/sundance.c
+@@ -218,7 +218,7 @@ enum {
+ struct pci_id_info {
+ const char *name;
+ };
+-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
++static const struct pci_id_info pci_id_tbl[] __devinitconst = {
+ {"D-Link DFE-550TX FAST Ethernet Adapter"},
+ {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
+ {"D-Link DFE-580TX 4 port Server Adapter"},
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 36c7c4e..7de1382 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
+
+ if (wrapped)
+ newacc += 65536;
+- ACCESS_ONCE(*acc) = newacc;
++ ACCESS_ONCE_RW(*acc) = newacc;
+ }
+
+ void be_parse_stats(struct be_adapter *adapter)
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index fb5579a..debdffa 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -30,6 +30,8 @@
+ #include <linux/netdevice.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+ #include <net/ip.h>
+
+ #include "ftgmac100.h"
+diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
+index a127cb2..0d043cd 100644
+--- a/drivers/net/ethernet/faraday/ftmac100.c
++++ b/drivers/net/ethernet/faraday/ftmac100.c
+@@ -30,6 +30,8 @@
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+ #include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
+
+ #include "ftmac100.h"
+
+diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
+index 61d2bdd..7f1154a 100644
+--- a/drivers/net/ethernet/fealnx.c
++++ b/drivers/net/ethernet/fealnx.c
+@@ -150,7 +150,7 @@ struct chip_info {
+ int flags;
+ };
+
+-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
++static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
+ { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
+index ed79b2d..b17b19d 100644
+--- a/drivers/net/ethernet/ibm/emac/core.c
++++ b/drivers/net/ethernet/ibm/emac/core.c
+@@ -2309,7 +2309,7 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
+ return 0;
+ }
+
+-static struct notifier_block emac_of_bus_notifier __devinitdata = {
++static struct notifier_block emac_of_bus_notifier = {
+ .notifier_call = emac_of_bus_notify
+ };
+
+diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+index e1159e5..34efe3e 100644
+--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
++++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+@@ -205,7 +205,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+- struct e1000_mac_operations *func = &mac->ops;
+
+ /* Set media type */
+ switch (adapter->pdev->device) {
+@@ -233,16 +232,16 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
+- func->check_for_link = e1000e_check_for_copper_link;
++ mac->ops.setup_physical_interface = e1000_setup_copper_link_80003es2lan;
++ mac->ops.check_for_link = e1000e_check_for_copper_link;
+ break;
+ case e1000_media_type_fiber:
+- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+- func->check_for_link = e1000e_check_for_fiber_link;
++ mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link;
++ mac->ops.check_for_link = e1000e_check_for_fiber_link;
+ break;
+ case e1000_media_type_internal_serdes:
+- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+- func->check_for_link = e1000e_check_for_serdes_link;
++ mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link;
++ mac->ops.check_for_link = e1000e_check_for_serdes_link;
+ break;
+ default:
+ return -E1000_ERR_CONFIG;
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index 4f4d52a..faf0fa4 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -239,7 +239,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+- struct e1000_mac_operations *func = &mac->ops;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
+@@ -272,22 +271,22 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+- func->setup_physical_interface = e1000_setup_copper_link_82571;
+- func->check_for_link = e1000e_check_for_copper_link;
+- func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
++ mac->ops.setup_physical_interface = e1000_setup_copper_link_82571;
++ mac->ops.check_for_link = e1000e_check_for_copper_link;
++ mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper;
+ break;
+ case e1000_media_type_fiber:
+- func->setup_physical_interface =
++ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+- func->check_for_link = e1000e_check_for_fiber_link;
+- func->get_link_up_info =
++ mac->ops.check_for_link = e1000e_check_for_fiber_link;
++ mac->ops.get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
+ break;
+ case e1000_media_type_internal_serdes:
+- func->setup_physical_interface =
++ mac->ops.setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+- func->check_for_link = e1000_check_for_serdes_link_82571;
+- func->get_link_up_info =
++ mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
++ mac->ops.get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
+ break;
+ default:
+@@ -297,10 +296,10 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+- func->set_lan_id = e1000_set_lan_id_single_port;
+- func->check_mng_mode = e1000e_check_mng_mode_generic;
+- func->led_on = e1000e_led_on_generic;
+- func->blink_led = e1000e_blink_led_generic;
++ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
++ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
++ mac->ops.led_on = e1000e_led_on_generic;
++ mac->ops.blink_led = e1000e_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+@@ -314,14 +313,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+ break;
+ case e1000_82574:
+ case e1000_82583:
+- func->set_lan_id = e1000_set_lan_id_single_port;
+- func->check_mng_mode = e1000_check_mng_mode_82574;
+- func->led_on = e1000_led_on_82574;
++ mac->ops.set_lan_id = e1000_set_lan_id_single_port;
++ mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
++ mac->ops.led_on = e1000_led_on_82574;
+ break;
+ default:
+- func->check_mng_mode = e1000e_check_mng_mode_generic;
+- func->led_on = e1000e_led_on_generic;
+- func->blink_led = e1000e_blink_led_generic;
++ mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
++ mac->ops.led_on = e1000e_led_on_generic;
++ mac->ops.blink_led = e1000e_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index 8e362bb..679d9da 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -175,7 +175,7 @@ struct e1000_info;
+ #define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+- (5 << 16) | /* wthresh must be +1 more than desired */\
++ (1 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 4c8e199..f7f5587 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -956,8 +956,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+- tx_ring->total_bytes = 0;
+- tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+@@ -981,16 +979,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+- int i;
+-
+- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+- for (i = 0; i < q_vector->rxr_count; i++) {
+- rx_ring = &(adapter->rx_ring[r_idx]);
+- rx_ring->total_bytes = 0;
+- rx_ring->total_packets = 0;
+- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+- r_idx + 1);
+- }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
+index 0b3567a..49bc4bc 100644
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -756,7 +756,7 @@ ltq_etop_probe(struct platform_device *pdev)
+ return 0;
+
+ err_free:
+- kfree(dev);
++ free_netdev(dev);
+ err_out:
+ return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 24ee967..9a07e41 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -570,8 +570,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
+ int err;
+ int i;
+
+- priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
+- mlx4_num_eq_uar(dev), GFP_KERNEL);
++ priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
++ sizeof *priv->eq_table.uar_map, GFP_KERNEL);
+ if (!priv->eq_table.uar_map) {
+ err = -ENOMEM;
+ goto err_out_free;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 94bbc85..78c12e6 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -40,6 +40,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+ #include <linux/io-mapping.h>
++#include <linux/sched.h>
+
+ #include <linux/mlx4/device.h>
+ #include <linux/mlx4/doorbell.h>
+diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+index 98e2c10..79af7f8 100644
+--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
++++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
+@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+ struct __vxge_hw_fifo *fifo;
+ struct vxge_hw_fifo_config *config;
+ u32 txdl_size, txdl_per_memblock;
+- struct vxge_hw_mempool_cbs fifo_mp_callback;
++ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
++ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
++ };
++
+ struct __vxge_hw_virtualpath *vpath;
+
+ if ((vp == NULL) || (attr == NULL)) {
+@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+ goto exit;
+ }
+
+- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
+-
+ fifo->mempool =
+ __vxge_hw_mempool_create(vpath->hldev,
+ fifo->config->memblock_size,
+diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
+index 212f43b..fb31b51 100644
+--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
++++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
+@@ -683,10 +683,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
+ p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+- if (IS_ERR(p->phydev)) {
+- p->phydev = NULL;
++ if (!p->phydev)
+ return -1;
+- }
+
+ phy_start_aneg(p->phydev);
+
+diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
+index 49b549f..13d648c 100644
+--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
+@@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
+ phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
+ PHY_INTERFACE_MODE_SGMII);
+
+- if (IS_ERR(phydev)) {
++ if (!phydev) {
+ printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
+- return PTR_ERR(phydev);
++ return -ENODEV;
+ }
+
+ mac->phydev = phydev;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index a3bd0ba..8a34a90 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -704,17 +704,17 @@ struct rtl8169_private {
+ struct mdio_ops {
+ void (*write)(void __iomem *, int, int);
+ int (*read)(void __iomem *, int);
+- } mdio_ops;
++ } __no_const mdio_ops;
+
+ struct pll_power_ops {
+ void (*down)(struct rtl8169_private *);
+ void (*up)(struct rtl8169_private *);
+- } pll_power_ops;
++ } __no_const pll_power_ops;
+
+ struct jumbo_ops {
+ void (*enable)(struct rtl8169_private *);
+ void (*disable)(struct rtl8169_private *);
+- } jumbo_ops;
++ } __no_const jumbo_ops;
+
+ int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
+index 1b4658c..a30dabb 100644
+--- a/drivers/net/ethernet/sis/sis190.c
++++ b/drivers/net/ethernet/sis/sis190.c
+@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
+ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+ struct net_device *dev)
+ {
+- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
++ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
+ struct sis190_private *tp = netdev_priv(dev);
+ struct pci_dev *isa_bridge;
+ u8 reg, tmp8;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index c07cfe9..81cbf7e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+
+ writel(value, ioaddr + MMC_CNTRL);
+
+- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+- MMC_CNTRL, value);
++// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
++// MMC_CNTRL, value);
+ }
+
+ /* To mask all all interrupts.*/
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index d4d2bc1..14b8672 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1602,7 +1602,7 @@ static const struct file_operations stmmac_rings_status_fops = {
+ .open = stmmac_sysfs_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = single_release,
+ };
+
+ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+@@ -1674,7 +1674,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
+ .open = stmmac_sysfs_dma_cap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = single_release,
+ };
+
+ static int stmmac_init_fs(struct net_device *dev)
+diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
+index a4a3516..3b3a7e0 100644
+--- a/drivers/net/hamradio/hdlcdrv.c
++++ b/drivers/net/hamradio/hdlcdrv.c
+@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ case HDLCDRVCTL_CALIBRATE:
+ if(!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
++ return -EINVAL;
+ s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+ return 0;
+
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index d0893e4..14b0d44 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -216,6 +216,6 @@ out:
+ }
+
+ /* Registered in net/core/dev.c */
+-struct pernet_operations __net_initdata loopback_net_ops = {
++struct pernet_operations __net_initconst loopback_net_ops = {
+ .init = loopback_net_init,
+ };
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 301b39e..345c414 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -790,13 +790,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
+ int macvlan_link_register(struct rtnl_link_ops *ops)
+ {
+ /* common fields */
+- ops->priv_size = sizeof(struct macvlan_dev);
+- ops->validate = macvlan_validate;
+- ops->maxtype = IFLA_MACVLAN_MAX;
+- ops->policy = macvlan_policy;
+- ops->changelink = macvlan_changelink;
+- ops->get_size = macvlan_get_size;
+- ops->fill_info = macvlan_fill_info;
++ pax_open_kernel();
++ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
++ *(void **)&ops->validate = macvlan_validate;
++ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
++ *(const void **)&ops->policy = macvlan_policy;
++ *(void **)&ops->changelink = macvlan_changelink;
++ *(void **)&ops->get_size = macvlan_get_size;
++ *(void **)&ops->fill_info = macvlan_fill_info;
++ pax_close_kernel();
+
+ return rtnl_link_register(ops);
+ };
+@@ -852,7 +854,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block macvlan_notifier_block __read_mostly = {
++static struct notifier_block macvlan_notifier_block = {
+ .notifier_call = macvlan_device_event,
+ };
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index b0f9015..93da3cf 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -1085,7 +1085,7 @@ static int macvtap_device_event(struct notifier_block *unused,
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block macvtap_notifier_block __read_mostly = {
++static struct notifier_block macvtap_notifier_block = {
+ .notifier_call = macvtap_device_event,
+ };
+
+diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
+index 6539189..09875ce 100644
+--- a/drivers/net/phy/mdio-bitbang.c
++++ b/drivers/net/phy/mdio-bitbang.c
+@@ -225,6 +225,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ module_put(ctrl->ops->owner);
++ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+ }
+ EXPORT_SYMBOL(free_mdio_bitbang);
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 3ed983c..a1bb418 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
+ struct ppp_stats stats;
+ struct ppp_comp_stats cstats;
+- char *vers;
+
+ switch (cmd) {
+ case SIOCGPPPSTATS:
+@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ break;
+
+ case SIOCGPPPVER:
+- vers = PPP_VERSION;
+- if (copy_to_user(addr, vers, strlen(vers) + 1))
++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
+ break;
+ err = 0;
+ break;
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 0a0a664..7e7deef 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -489,7 +489,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+ register struct tcphdr *thp;
+ register struct iphdr *ip;
+ register struct cstate *cs;
+- int len, hdrlen;
++ long len, hdrlen;
+ unsigned char *cp = icp;
+
+ /* We've got a compressed packet; read the change byte */
+diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
+index 515f122..41dd273 100644
+--- a/drivers/net/tokenring/abyss.c
++++ b/drivers/net/tokenring/abyss.c
+@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
+
+ static int __init abyss_init (void)
+ {
+- abyss_netdev_ops = tms380tr_netdev_ops;
++ pax_open_kernel();
++ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
+
+- abyss_netdev_ops.ndo_open = abyss_open;
+- abyss_netdev_ops.ndo_stop = abyss_close;
++ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
++ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
++ pax_close_kernel();
+
+ return pci_register_driver(&abyss_driver);
+ }
+diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
+index 6153cfd..cf69c1c 100644
+--- a/drivers/net/tokenring/madgemc.c
++++ b/drivers/net/tokenring/madgemc.c
+@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
+
+ static int __init madgemc_init (void)
+ {
+- madgemc_netdev_ops = tms380tr_netdev_ops;
+- madgemc_netdev_ops.ndo_open = madgemc_open;
+- madgemc_netdev_ops.ndo_stop = madgemc_close;
++ pax_open_kernel();
++ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
++ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
++ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
++ pax_close_kernel();
+
+ return mca_register_driver (&madgemc_driver);
+ }
+diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
+index 8d362e6..f91cc52 100644
+--- a/drivers/net/tokenring/proteon.c
++++ b/drivers/net/tokenring/proteon.c
+@@ -353,9 +353,11 @@ static int __init proteon_init(void)
+ struct platform_device *pdev;
+ int i, num = 0, err = 0;
+
+- proteon_netdev_ops = tms380tr_netdev_ops;
+- proteon_netdev_ops.ndo_open = proteon_open;
+- proteon_netdev_ops.ndo_stop = tms380tr_close;
++ pax_open_kernel();
++ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
++ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
++ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
++ pax_close_kernel();
+
+ err = platform_driver_register(&proteon_driver);
+ if (err)
+diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
+index 46db5c5..37c1536 100644
+--- a/drivers/net/tokenring/skisa.c
++++ b/drivers/net/tokenring/skisa.c
+@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
+ struct platform_device *pdev;
+ int i, num = 0, err = 0;
+
+- sk_isa_netdev_ops = tms380tr_netdev_ops;
+- sk_isa_netdev_ops.ndo_open = sk_isa_open;
+- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
++ pax_open_kernel();
++ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
++ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
++ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
++ pax_close_kernel();
+
+ err = platform_driver_register(&sk_isa_driver);
+ if (err)
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index ee1aab0..31aa71c 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -359,7 +359,7 @@ static void tun_free_netdev(struct net_device *dev)
+ {
+ struct tun_struct *tun = netdev_priv(dev);
+
+- sock_put(tun->socket.sk);
++ sk_release_kernel(tun->socket.sk);
+ }
+
+ /* Net device open. */
+@@ -983,10 +983,18 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
+ return ret;
+ }
+
++static int tun_release(struct socket *sock)
++{
++ if (sock->sk)
++ sock_put(sock->sk);
++ return 0;
++}
++
+ /* Ops structure to mimic raw sockets with tun */
+ static const struct proto_ops tun_socket_ops = {
+ .sendmsg = tun_sendmsg,
+ .recvmsg = tun_recvmsg,
++ .release = tun_release,
+ };
+
+ static struct proto tun_proto = {
+@@ -1113,10 +1121,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+
+ err = -ENOMEM;
+- sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
++ sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
+ if (!sk)
+ goto err_free_dev;
+
++ sk_change_net(sk, net);
+ tun->socket.wq = &tun->wq;
+ init_waitqueue_head(&tun->wq.wait);
+ tun->socket.ops = &tun_socket_ops;
+@@ -1177,7 +1186,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ return 0;
+
+ err_free_sk:
+- sock_put(sk);
++ tun_free_netdev(dev);
+ err_free_dev:
+ free_netdev(dev);
+ failed:
+@@ -1236,7 +1245,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
+ }
+
+ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+- unsigned long arg, int ifreq_len)
++ unsigned long arg, size_t ifreq_len)
+ {
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun;
+@@ -1247,6 +1256,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ int vnet_hdr_sz;
+ int ret;
+
++ if (ifreq_len > sizeof ifr)
++ return -EFAULT;
++
+ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+ if (copy_from_user(&ifr, argp, ifreq_len))
+ return -EFAULT;
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 304fe78..db112fa 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -71,7 +71,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+-
++#include <asm/local.h>
+
+ #define MOD_AUTHOR "Option Wireless"
+ #define MOD_DESCRIPTION "USB High Speed Option driver"
+@@ -257,7 +257,7 @@ struct hso_serial {
+
+ /* from usb_serial_port */
+ struct tty_struct *tty;
+- int open_count;
++ local_t open_count;
+ spinlock_t serial_lock;
+
+ int (*write_data) (struct hso_serial *serial);
+@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
+ struct urb *urb;
+
+ urb = serial->rx_urb[0];
+- if (serial->open_count > 0) {
++ if (local_read(&serial->open_count) > 0) {
+ count = put_rxbuf_data(urb, serial);
+ if (count == -1)
+ return;
+@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
+ DUMP1(urb->transfer_buffer, urb->actual_length);
+
+ /* Anyone listening? */
+- if (serial->open_count == 0)
++ if (local_read(&serial->open_count) == 0)
+ return;
+
+ if (status == 0) {
+@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
+ spin_unlock_irq(&serial->serial_lock);
+
+ /* check for port already opened, if not set the termios */
+- serial->open_count++;
+- if (serial->open_count == 1) {
++ if (local_inc_return(&serial->open_count) == 1) {
+ serial->rx_state = RX_IDLE;
+ /* Force default termio settings */
+ _hso_serial_set_termios(tty, NULL);
+@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
+ result = hso_start_serial_device(serial->parent, GFP_KERNEL);
+ if (result) {
+ hso_stop_serial_device(serial->parent);
+- serial->open_count--;
++ local_dec(&serial->open_count);
+ kref_put(&serial->parent->ref, hso_serial_ref_free);
+ }
+ } else {
+@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
+
+ /* reset the rts and dtr */
+ /* do the actual close */
+- serial->open_count--;
++ local_dec(&serial->open_count);
+
+- if (serial->open_count <= 0) {
+- serial->open_count = 0;
++ if (local_read(&serial->open_count) <= 0) {
++ local_set(&serial->open_count, 0);
+ spin_lock_irq(&serial->serial_lock);
+ if (serial->tty == tty) {
+ serial->tty->driver_data = NULL;
+@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+
+ /* the actual setup */
+ spin_lock_irqsave(&serial->serial_lock, flags);
+- if (serial->open_count)
++ if (local_read(&serial->open_count))
+ _hso_serial_set_termios(tty, old);
+ else
+ tty->termios = old;
+@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
+ D1("Pending read interrupt on port %d\n", i);
+ spin_lock(&serial->serial_lock);
+ if (serial->rx_state == RX_IDLE &&
+- serial->open_count > 0) {
++ local_read(&serial->open_count) > 0) {
+ /* Setup and send a ctrl req read on
+ * port i */
+ if (!serial->rx_urb_filled[0]) {
+@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
+ /* Start all serial ports */
+ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
+ if (serial_table[i] && (serial_table[i]->interface == iface)) {
+- if (dev2ser(serial_table[i])->open_count) {
++ if (local_read(&dev2ser(serial_table[i])->open_count)) {
+ result =
+ hso_start_serial_device(serial_table[i], GFP_NOIO);
+ hso_kick_transmit(dev2ser(serial_table[i]));
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index e773250..9ee61ab 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
+ /* atomic counter partially included in MAC address to make sure 2 devices
+ * do not end up with the same MAC - concept breaks in case of > 255 ifaces
+ */
+-static atomic_t iface_counter = ATOMIC_INIT(0);
++static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
+
+ /*
+ * SYNC Timer Delay definition used to set the expiry time
+@@ -738,7 +738,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->netdev_ops = &sierra_net_device_ops;
+
+ /* change MAC addr to include, ifacenum, and to be unique */
+- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
++ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
+ dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+
+ /* we will have to manufacture ethernet headers, prepare template */
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index dc53a8f..2b66cc1 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -344,6 +344,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ unsigned long lockflags;
+ size_t size = dev->rx_urb_size;
+
++ /* prevent rx skb allocation when error ratio is high */
++ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
++ usb_free_urb(urb);
++ return -ENOLINK;
++ }
++
+ if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
+ netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
+ usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
+@@ -503,6 +509,17 @@ block:
+ break;
+ }
+
++ /* stop rx if packet error rate is high */
++ if (++dev->pkt_cnt > 30) {
++ dev->pkt_cnt = 0;
++ dev->pkt_err = 0;
++ } else {
++ if (state == rx_cleanup)
++ dev->pkt_err++;
++ if (dev->pkt_err > 20)
++ set_bit(EVENT_RX_KILL, &dev->flags);
++ }
++
+ state = defer_bh(dev, skb, &dev->rxq, state);
+
+ if (urb) {
+@@ -789,6 +806,11 @@ int usbnet_open (struct net_device *net)
+ (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
+ "simple");
+
++ /* reset rx error state */
++ dev->pkt_cnt = 0;
++ dev->pkt_err = 0;
++ clear_bit(EVENT_RX_KILL, &dev->flags);
++
+ // delay posting reads until we're fully open
+ tasklet_schedule (&dev->bh);
+ if (info->manage_power) {
+@@ -1227,6 +1249,9 @@ static void usbnet_bh (unsigned long param)
+ }
+ }
+
++ /* restart RX again after disabling due to high error rate */
++ clear_bit(EVENT_RX_KILL, &dev->flags);
++
+ // waiting for all pending urbs to complete?
+ if (dev->wait) {
+ if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
+diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
+index e662cbc..8d4a102 100644
+--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
+@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
+ * Return with error code if any of the queue indices
+ * is out of range
+ */
+- if (p->ring_index[i] < 0 ||
+- p->ring_index[i] >= adapter->num_rx_queues)
++ if (p->ring_index[i] >= adapter->num_rx_queues)
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
+index ac1176a..79e93d4 100644
+--- a/drivers/net/wireless/airo.c
++++ b/drivers/net/wireless/airo.c
+@@ -7885,7 +7885,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
+ struct airo_info *ai = dev->ml_priv;
+ int ridcode;
+ int enabled;
+- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
++ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
+ unsigned char *iobuf;
+
+ /* Only super-user can write RIDs */
+diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
+index 4045e5a..506f1cf 100644
+--- a/drivers/net/wireless/at76c50x-usb.c
++++ b/drivers/net/wireless/at76c50x-usb.c
+@@ -353,7 +353,7 @@ static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state)
+ }
+
+ /* Convert timeout from the DFU status to jiffies */
+-static inline unsigned long at76_get_timeout(struct dfu_status *s)
++static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
+ {
+ return msecs_to_jiffies((s->poll_timeout[2] << 16)
+ | (s->poll_timeout[1] << 8)
+diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
+index b346d04..04436fa 100644
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -1791,7 +1791,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ {
+ int ret;
+ struct ath5k_hw *ah = hw->priv;
+- struct ath5k_vif *avf = (void *)vif->drv_priv;
++ struct ath5k_vif *avf;
+ struct sk_buff *skb;
+
+ if (WARN_ON(!vif)) {
+@@ -1806,6 +1806,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ goto out;
+ }
+
++ avf = (void *)vif->drv_priv;
+ ath5k_txbuf_free_skb(ah, avf->bbuf);
+ avf->bbuf->skb = skb;
+ ret = ath5k_beacon_setup(ah, avf->bbuf);
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+index b592016..fe47870 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+
+- ACCESS_ONCE(ads->ds_link) = i->link;
+- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
++ ACCESS_ONCE_RW(ads->ds_link) = i->link;
++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
+
+ ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
+ ctl6 = SM(i->keytype, AR_EncrType);
+@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+
+ if ((i->is_first || i->is_last) &&
+ i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
+- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+- ACCESS_ONCE(ads->ds_ctl2) = 0;
+- ACCESS_ONCE(ads->ds_ctl3) = 0;
++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
+ }
+
+ if (!i->is_first) {
+- ACCESS_ONCE(ads->ds_ctl0) = 0;
+- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+ return;
+ }
+
+@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+ break;
+ }
+
+- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower, AR_XmitPower)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+ | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
+ (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+
+- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
+- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
+
+ if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
+ return;
+
+- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+index f5ae3c6..7936af3 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+ (i->qcu << AR_TxQcuNum_S) | 0x17;
+
+ checksum += val;
+- ACCESS_ONCE(ads->info) = val;
++ ACCESS_ONCE_RW(ads->info) = val;
+
+ checksum += i->link;
+- ACCESS_ONCE(ads->link) = i->link;
++ ACCESS_ONCE_RW(ads->link) = i->link;
+
+ checksum += i->buf_addr[0];
+- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
+ checksum += i->buf_addr[1];
+- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
+ checksum += i->buf_addr[2];
+- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
+ checksum += i->buf_addr[3];
+- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
+
+ checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl3) = val;
++ ACCESS_ONCE_RW(ads->ctl3) = val;
+ checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl5) = val;
++ ACCESS_ONCE_RW(ads->ctl5) = val;
+ checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl7) = val;
++ ACCESS_ONCE_RW(ads->ctl7) = val;
+ checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
+- ACCESS_ONCE(ads->ctl9) = val;
++ ACCESS_ONCE_RW(ads->ctl9) = val;
+
+ checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
+- ACCESS_ONCE(ads->ctl10) = checksum;
++ ACCESS_ONCE_RW(ads->ctl10) = checksum;
+
+ if (i->is_first || i->is_last) {
+- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
+ | set11nTries(i->rates, 1)
+ | set11nTries(i->rates, 2)
+ | set11nTries(i->rates, 3)
+ | (i->dur_update ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
+ | set11nRate(i->rates, 1)
+ | set11nRate(i->rates, 2)
+ | set11nRate(i->rates, 3);
+ } else {
+- ACCESS_ONCE(ads->ctl13) = 0;
+- ACCESS_ONCE(ads->ctl14) = 0;
++ ACCESS_ONCE_RW(ads->ctl13) = 0;
++ ACCESS_ONCE_RW(ads->ctl14) = 0;
+ }
+
+ ads->ctl20 = 0;
+@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+
+ ctl17 = SM(i->keytype, AR_EncrType);
+ if (!i->is_first) {
+- ACCESS_ONCE(ads->ctl11) = 0;
+- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+- ACCESS_ONCE(ads->ctl15) = 0;
+- ACCESS_ONCE(ads->ctl16) = 0;
+- ACCESS_ONCE(ads->ctl17) = ctl17;
+- ACCESS_ONCE(ads->ctl18) = 0;
+- ACCESS_ONCE(ads->ctl19) = 0;
++ ACCESS_ONCE_RW(ads->ctl11) = 0;
++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
++ ACCESS_ONCE_RW(ads->ctl15) = 0;
++ ACCESS_ONCE_RW(ads->ctl16) = 0;
++ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
++ ACCESS_ONCE_RW(ads->ctl18) = 0;
++ ACCESS_ONCE_RW(ads->ctl19) = 0;
+ return;
+ }
+
+- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+ | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(i->txpower, AR_XmitPower)
+ | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+ val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
+ ctl12 |= SM(val, AR_PAPRDChainMask);
+
+- ACCESS_ONCE(ads->ctl12) = ctl12;
+- ACCESS_ONCE(ads->ctl17) = ctl17;
++ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
++ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
+
+- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+ | set11nPktDurRTSCTS(i->rates, 1);
+
+- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+ | set11nPktDurRTSCTS(i->rates, 3);
+
+- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
+ | set11nRateFlags(i->rates, 1)
+ | set11nRateFlags(i->rates, 2)
+ | set11nRateFlags(i->rates, 3)
+ | SM(i->rtscts_rate, AR_RTSCTSRate);
+
+- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
+ }
+
+ static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index dc774cd..fd6efed 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -607,7 +607,7 @@ struct ath_hw_private_ops {
+
+ /* ANI */
+ void (*ani_cache_ini_regs)(struct ath_hw *ah);
+-};
++} __no_const;
+
+ /**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+@@ -637,7 +637,7 @@ struct ath_hw_ops {
+ void (*antdiv_comb_conf_set)(struct ath_hw *ah,
+ struct ath_hw_antcomb_conf *antconf);
+
+-};
++} __no_const;
+
+ struct ath_nf_limits {
+ s16 max;
+diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
+index f93d66b..a6c7765 100644
+--- a/drivers/net/wireless/b43/phy_lp.c
++++ b/drivers/net/wireless/b43/phy_lp.c
+@@ -2520,7 +2520,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
+ {
+ struct ssb_bus *bus = dev->dev->sdev->bus;
+
+- static const struct b206x_channel *chandata = NULL;
++ const struct b206x_channel *chandata = NULL;
+ u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
+ u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
+ u16 old_comm15, scale;
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+index 62dc461..5250f0b 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+@@ -175,7 +175,7 @@ struct brcmf_cfg80211_event_loop {
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e,
+ void *data);
+-};
++} __no_const;
+
+ /* representing interface of cfg80211 plane */
+ struct brcmf_cfg80211_iface {
+@@ -239,7 +239,7 @@ struct brcmf_cfg80211_profile {
+ struct brcmf_cfg80211_iscan_eloop {
+ s32 (*handler[WL_SCAN_ERSULTS_LAST])
+ (struct brcmf_cfg80211_priv *cfg_priv);
+-};
++} __no_const;
+
+ /* dongle iscan controller */
+ struct brcmf_cfg80211_iscan_ctrl {
+diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+index b3d9f3f..9931f58 100644
+--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
+ */
+ if (iwl3945_mod_params.disable_hw_scan) {
+ IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
+- iwl3945_hw_ops.hw_scan = NULL;
++ pax_open_kernel();
++ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
++ pax_close_kernel();
+ }
+
+ IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
+index 69a77e24..552b42c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
++++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
+@@ -71,8 +71,8 @@ do { \
+ } while (0)
+
+ #else
+-#define IWL_DEBUG(m, level, fmt, args...)
+-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
++#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
++#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
+ #define iwl_print_hex_dump(m, level, p, len)
+ #endif /* CONFIG_IWLWIFI_DEBUG */
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+index 93e6179..b221e4f 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+@@ -163,7 +163,7 @@ static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
+ struct iwl_priv *priv = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+@@ -311,7 +311,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[64];
+- int buf_size;
++ size_t buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+@@ -601,7 +601,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
+ struct iwl_priv *priv = file->private_data;
+
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+@@ -682,7 +682,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+@@ -737,7 +737,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int value;
+
+ memset(buf, 0, sizeof(buf));
+@@ -897,7 +897,7 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+@@ -912,10 +912,10 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
+ return count;
+ }
+
+-static const char *fmt_value = " %-30s %10u\n";
+-static const char *fmt_hex = " %-30s 0x%02X\n";
+-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+-static const char *fmt_header =
++static const char fmt_value[] = " %-30s %10u\n";
++static const char fmt_hex[] = " %-30s 0x%02X\n";
++static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
++static const char fmt_header[] =
+ "%-32s current cumulative delta max\n";
+
+ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+@@ -2078,7 +2078,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2123,7 +2123,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2193,7 +2193,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2234,7 +2234,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2288,7 +2288,7 @@ static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int reset, ret;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2314,7 +2314,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int flush;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2338,7 +2338,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+@@ -2427,7 +2427,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int rts;
+
+ if (!priv->cfg->ht_params)
+@@ -2452,7 +2452,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
+ {
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+index 75da4bc..7737dff 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+@@ -1890,7 +1890,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+@@ -1911,7 +1911,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ {
+ struct iwl_trans *trans = file->private_data;
+ char buf[8];
+- int buf_size;
++ size_t buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 523ad55..f8c5dc5 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
+ return -EINVAL;
+
+ if (fake_hw_scan) {
+- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+- mac80211_hwsim_ops.sw_scan_start = NULL;
+- mac80211_hwsim_ops.sw_scan_complete = NULL;
++ pax_open_kernel();
++ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
++ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
++ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
++ pax_close_kernel();
+ }
+
+ spin_lock_init(&hwsim_radio_lock);
+diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
+index d26a78b..156ad04 100644
+--- a/drivers/net/wireless/mwifiex/debugfs.c
++++ b/drivers/net/wireless/mwifiex/debugfs.c
+@@ -26,10 +26,17 @@
+ static struct dentry *mwifiex_dfs_dir;
+
+ static char *bss_modes[] = {
+- "Unknown",
+- "Ad-hoc",
+- "Managed",
+- "Auto"
++ "UNSPECIFIED",
++ "ADHOC",
++ "STATION",
++ "AP",
++ "AP_VLAN",
++ "WDS",
++ "MONITOR",
++ "MESH_POINT",
++ "P2P_CLIENT",
++ "P2P_GO",
++ "P2P_DEVICE",
+ };
+
+ /* size/addr for mwifiex_debug_info */
+@@ -213,7 +220,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
+ p += sprintf(p, "driver_version = %s", fmt);
+ p += sprintf(p, "\nverext = %s", priv->version_str);
+ p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
+- p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
++
++ if (info.bss_mode >= ARRAY_SIZE(bss_modes))
++ p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
++ else
++ p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
++
+ p += sprintf(p, "media_state=\"%s\"\n",
+ (!priv->media_connected ? "Disconnected" : "Connected"));
+ p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
+diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
+index 0c13840..a5c3ed6 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
+
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
+
+- if (rts_threshold < 0 || rts_threshold > 2347)
++ if (rts_threshold > 2347)
+ rts_threshold = 2347;
+
+ tmp = cpu_to_le32(rts_threshold);
+diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
+index e2750a1..797e179 100644
+--- a/drivers/net/wireless/wl1251/sdio.c
++++ b/drivers/net/wireless/wl1251/sdio.c
+@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+
+ irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
++ pax_open_kernel();
++ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
++ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
++ pax_close_kernel();
+
+ wl1251_info("using dedicated interrupt line");
+ } else {
+- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
++ pax_open_kernel();
++ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
++ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
++ pax_close_kernel();
+
+ wl1251_info("using SDIO interrupt");
+ }
+diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
+index 785bdbe..ddde2d1 100644
+--- a/drivers/net/wireless/zd1211rw/zd_usb.c
++++ b/drivers/net/wireless/zd1211rw/zd_usb.c
+@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
+ {
+ struct zd_usb *usb = urb->context;
+ struct zd_usb_interrupt *intr = &usb->intr;
+- int len;
++ unsigned int len;
+ u16 int_num;
+
+ ZD_ASSERT(in_interrupt());
+diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
+index 06c3642..c4ee5f2 100644
+--- a/drivers/nfc/nfcwilink.c
++++ b/drivers/nfc/nfcwilink.c
+@@ -237,7 +237,7 @@ static struct nci_ops nfcwilink_ops = {
+
+ static int nfcwilink_probe(struct platform_device *pdev)
+ {
+- static struct nfcwilink *drv;
++ struct nfcwilink *drv;
+ int rc;
+ u32 protocols;
+
+diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
+index f34b5b2..b5abb9f 100644
+--- a/drivers/oprofile/buffer_sync.c
++++ b/drivers/oprofile/buffer_sync.c
+@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
+ if (cookie == NO_COOKIE)
+ offset = pc;
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ offset = pc;
+ }
+ if (cookie != last_cookie) {
+@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
+ /* add userspace sample */
+
+ if (!mm) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mm);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
+ return 0;
+ }
+
+ cookie = lookup_dcookie(mm, s->eip, &offset);
+
+ if (cookie == INVALID_COOKIE) {
+- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
+ return 0;
+ }
+
+@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
+ /* ignore backtraces if failed to add a sample */
+ if (state == sb_bt_start) {
+ state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
+ }
+ }
+ release_mm(mm);
+diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
+index c0cc4e7..44d4e54 100644
+--- a/drivers/oprofile/event_buffer.c
++++ b/drivers/oprofile/event_buffer.c
+@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
+ }
+
+ if (buffer_pos == buffer_size) {
+- atomic_inc(&oprofile_stats.event_lost_overflow);
++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
+ return;
+ }
+
+diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
+index f8c752e..28bf4fc 100644
+--- a/drivers/oprofile/oprof.c
++++ b/drivers/oprofile/oprof.c
+@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
+ if (oprofile_ops.switch_events())
+ return;
+
+- atomic_inc(&oprofile_stats.multiplex_counter);
++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
+ start_switch_worker();
+ }
+
+diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
+index 84a208d..d61b0a1 100644
+--- a/drivers/oprofile/oprofile_files.c
++++ b/drivers/oprofile/oprofile_files.c
+@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
+
+ #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+-static ssize_t timeout_read(struct file *file, char __user *buf,
++static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+ {
+ return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
+diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
+index 917d28e..d62d981 100644
+--- a/drivers/oprofile/oprofile_stats.c
++++ b/drivers/oprofile/oprofile_stats.c
+@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
+ cpu_buf->sample_invalid_eip = 0;
+ }
+
+- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.event_lost_overflow, 0);
+- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+- atomic_set(&oprofile_stats.multiplex_counter, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
+ }
+
+
+diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
+index 38b6fc0..b5cbfce 100644
+--- a/drivers/oprofile/oprofile_stats.h
++++ b/drivers/oprofile/oprofile_stats.h
+@@ -13,11 +13,11 @@
+ #include <linux/atomic.h>
+
+ struct oprofile_stat_struct {
+- atomic_t sample_lost_no_mm;
+- atomic_t sample_lost_no_mapping;
+- atomic_t bt_lost_no_mapping;
+- atomic_t event_lost_overflow;
+- atomic_t multiplex_counter;
++ atomic_unchecked_t sample_lost_no_mm;
++ atomic_unchecked_t sample_lost_no_mapping;
++ atomic_unchecked_t bt_lost_no_mapping;
++ atomic_unchecked_t event_lost_overflow;
++ atomic_unchecked_t multiplex_counter;
+ };
+
+ extern struct oprofile_stat_struct oprofile_stats;
+diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
+index 2f0aa0f..1ba4404 100644
+--- a/drivers/oprofile/oprofilefs.c
++++ b/drivers/oprofile/oprofilefs.c
+@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
+
+
+ int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+- char const *name, atomic_t *val)
++ char const *name, atomic_unchecked_t *val)
+ {
+ return __oprofilefs_create_file(sb, root, name,
+ &atomic_ro_fops, 0444, val);
+@@ -279,6 +279,7 @@ static struct file_system_type oprofilefs_type = {
+ .mount = oprofilefs_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("oprofilefs");
+
+
+ int __init oprofilefs_register(void)
+diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
+index 878fba1..2084bcf 100644
+--- a/drivers/oprofile/timer_int.c
++++ b/drivers/oprofile/timer_int.c
+@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata oprofile_cpu_notifier = {
++static struct notifier_block oprofile_cpu_notifier = {
+ .notifier_call = oprofile_cpu_notify,
+ };
+
+diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
+index 3f56bc0..707d642 100644
+--- a/drivers/parport/procfs.c
++++ b/drivers/parport/procfs.c
+@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
+
+ *ppos += len;
+
+- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
++ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
+ }
+
+ #ifdef CONFIG_PARPORT_1284
+@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
+
+ *ppos += len;
+
+- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
++ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
+ }
+ #endif /* IEEE1284.3 support. */
+
+diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
+index e525263..ebde92e 100644
+--- a/drivers/pci/hotplug/acpiphp_ibm.c
++++ b/drivers/pci/hotplug/acpiphp_ibm.c
+@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
+ goto init_cleanup;
+ }
+
+- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
++ pax_open_kernel();
++ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
++ pax_close_kernel();
+ retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
+
+ return retval;
+diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
+index fb3f846..792d643 100644
+--- a/drivers/pci/hotplug/cpcihp_generic.c
++++ b/drivers/pci/hotplug/cpcihp_generic.c
+@@ -73,7 +73,6 @@ static u16 port;
+ static unsigned int enum_bit;
+ static u8 enum_mask;
+
+-static struct cpci_hp_controller_ops generic_hpc_ops;
+ static struct cpci_hp_controller generic_hpc;
+
+ static int __init validate_parameters(void)
+@@ -139,6 +138,10 @@ static int query_enum(void)
+ return ((value & enum_mask) == enum_mask);
+ }
+
++static struct cpci_hp_controller_ops generic_hpc_ops = {
++ .query_enum = query_enum,
++};
++
+ static int __init cpcihp_generic_init(void)
+ {
+ int status;
+@@ -169,7 +172,6 @@ static int __init cpcihp_generic_init(void)
+ pci_dev_put(dev);
+
+ memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
+- generic_hpc_ops.query_enum = query_enum;
+ generic_hpc.ops = &generic_hpc_ops;
+
+ status = cpci_hp_register_controller(&generic_hpc);
+diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
+index 41f6a8d..da73050 100644
+--- a/drivers/pci/hotplug/cpcihp_zt5550.c
++++ b/drivers/pci/hotplug/cpcihp_zt5550.c
+@@ -59,7 +59,6 @@
+ /* local variables */
+ static int debug;
+ static int poll;
+-static struct cpci_hp_controller_ops zt5550_hpc_ops;
+ static struct cpci_hp_controller zt5550_hpc;
+
+ /* Primary cPCI bus bridge device */
+@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
+ return 0;
+ }
+
++static struct cpci_hp_controller_ops zt5550_hpc_ops = {
++ .query_enum = zt5550_hc_query_enum,
++};
++
+ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ int status;
+@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
+ dbg("returned from zt5550_hc_config");
+
+ memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
+- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
+ zt5550_hpc.ops = &zt5550_hpc_ops;
+ if(!poll) {
+ zt5550_hpc.irq = hc_dev->irq;
+ zt5550_hpc.irq_flags = IRQF_SHARED;
+ zt5550_hpc.dev_id = hc_dev;
+
+- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++ pax_open_kernel();
++ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
++ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
++ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++ pax_open_kernel();
+ } else {
+ info("using ENUM# polling mode");
+ }
+diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
+index 76ba8a1..20ca857 100644
+--- a/drivers/pci/hotplug/cpqphp_nvram.c
++++ b/drivers/pci/hotplug/cpqphp_nvram.c
+@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
+
+ void compaq_nvram_init (void __iomem *rom_start)
+ {
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if (rom_start) {
+ compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+ }
++#endif
++
+ dbg("int15 entry = %p\n", compaq_int15_entry_point);
+
+ /* initialize our int15 lock */
+diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
+index 6d2eea9..4bf3318 100644
+--- a/drivers/pci/hotplug/pci_hotplug_core.c
++++ b/drivers/pci/hotplug/pci_hotplug_core.c
+@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ return -EINVAL;
+ }
+
+- slot->ops->owner = owner;
+- slot->ops->mod_name = mod_name;
++ pax_open_kernel();
++ *(struct module **)&slot->ops->owner = owner;
++ *(const char **)&slot->ops->mod_name = mod_name;
++ pax_close_kernel();
+
+ mutex_lock(&pci_hp_mutex);
+ /*
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 9350af9..68623c4 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
+ struct slot *slot = ctrl->slot;
+ struct hotplug_slot *hotplug = NULL;
+ struct hotplug_slot_info *info = NULL;
+- struct hotplug_slot_ops *ops = NULL;
++ hotplug_slot_ops_no_const *ops = NULL;
+ char name[SLOT_NAME_SIZE];
+ int retval = -ENOMEM;
+
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 106be0d..4a79e67 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -505,6 +505,10 @@ pci_write_config(struct file* filp, struct kobject *kobj,
+ loff_t init_off = off;
+ u8 *data = (u8*) buf;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ if (off > dev->cfg_size)
+ return 0;
+ if (off + count > dev->cfg_size) {
+@@ -807,6 +811,10 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+ resource_size_t start, end;
+ int i;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ if (res == &pdev->resource[i])
+ break;
+@@ -914,6 +922,10 @@ pci_write_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ return pci_resource_io(filp, kobj, attr, buf, off, count, true);
+ }
+
+@@ -950,7 +962,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+ {
+ /* allocate attribute structure, piggyback attribute name */
+ int name_len = write_combine ? 13 : 10;
+- struct bin_attribute *res_attr;
++ bin_attribute_no_const *res_attr;
+ int retval;
+
+ res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
+@@ -1135,7 +1147,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
+ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+ {
+ int retval;
+- struct bin_attribute *attr;
++ bin_attribute_no_const *attr;
+
+ /* If the device has VPD, try to expose it in sysfs. */
+ if (dev->vpd) {
+@@ -1182,7 +1194,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
+ {
+ int retval;
+ int rom_size = 0;
+- struct bin_attribute *attr;
++ bin_attribute_no_const *attr;
+
+ if (!sysfs_initialized)
+ return -EACCES;
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index b74084e..a9c2922 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -101,7 +101,7 @@ struct pci_vpd_ops {
+ struct pci_vpd {
+ unsigned int len;
+ const struct pci_vpd_ops *ops;
+- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
++ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
+ };
+
+ extern int pci_vpd_pci22_init(struct pci_dev *dev);
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index c73ed00..cc3edec 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -27,9 +27,9 @@
+ #define MODULE_PARAM_PREFIX "pcie_aspm."
+
+ /* Note: those are not register definitions */
+-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+-#define ASPM_STATE_L1 (4) /* L1 state */
++#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
++#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
++#define ASPM_STATE_L1 (4U) /* L1 state */
+ #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+ #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 9005380..c497080 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ u32 l, sz, mask;
+ u16 orig_cmd;
+
+- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
++ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
+
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
+index 27911b5..2bf4653 100644
+--- a/drivers/pci/proc.c
++++ b/drivers/pci/proc.c
+@@ -135,6 +135,10 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
+ int size = dp->size;
+ int cnt;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ if (pos >= size)
+ return 0;
+ if (nbytes >= size)
+@@ -211,6 +215,10 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
+ #endif /* HAVE_PCI_MMAP */
+ int ret = 0;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ switch (cmd) {
+ case PCIIOC_CONTROLLER:
+ ret = pci_domain_nr(dev->bus);
+@@ -251,6 +259,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
+ struct pci_filp_private *fpriv = file->private_data;
+ int i, ret;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+@@ -476,7 +488,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
+ static int __init pci_proc_init(void)
+ {
+ struct pci_dev *dev = NULL;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+ proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
++#endif
+ proc_create("devices", 0, proc_bus_pci_dir,
+ &proc_bus_pci_dev_operations);
+ proc_initialized = 1;
+diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
+index e1c1ec5..bef4210 100644
+--- a/drivers/pci/syscall.c
++++ b/drivers/pci/syscall.c
+@@ -92,6 +92,10 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ u32 dword;
+ int err = 0;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM
++#endif
++
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 26fba2d..693b4d3 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -1463,6 +1463,10 @@ static int show_dsts(struct seq_file *m, void *data)
+ int err;
+ u32 retval = -1;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
+
+ if (err < 0)
+@@ -1479,6 +1483,10 @@ static int show_devs(struct seq_file *m, void *data)
+ int err;
+ u32 retval = -1;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
+ &retval);
+
+@@ -1503,6 +1511,10 @@ static int show_call(struct seq_file *m, void *data)
+ union acpi_object *obj;
+ acpi_status status;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ return -EPERM;
++#endif
++
+ status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
+ 1, asus->debug.method_id,
+ &input, &output);
+diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
+index f204643..9ccea74 100644
+--- a/drivers/platform/x86/msi-laptop.c
++++ b/drivers/platform/x86/msi-laptop.c
+@@ -815,12 +815,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
+ int result;
+
+ /* allow userland write sysfs file */
+- dev_attr_bluetooth.store = store_bluetooth;
+- dev_attr_wlan.store = store_wlan;
+- dev_attr_threeg.store = store_threeg;
+- dev_attr_bluetooth.attr.mode |= S_IWUSR;
+- dev_attr_wlan.attr.mode |= S_IWUSR;
+- dev_attr_threeg.attr.mode |= S_IWUSR;
++ pax_open_kernel();
++ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
++ *(void **)&dev_attr_wlan.store = store_wlan;
++ *(void **)&dev_attr_threeg.store = store_threeg;
++ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
++ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
++ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
++ pax_close_kernel();
+
+ /* disable hardware control by fn key */
+ result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
+diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
+index b96766b..909c5a0 100644
+--- a/drivers/platform/x86/msi-wmi.c
++++ b/drivers/platform/x86/msi-wmi.c
+@@ -147,7 +147,7 @@ static const struct backlight_ops msi_backlight_ops = {
+ static void msi_wmi_notify(u32 value, void *context)
+ {
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+- static struct key_entry *key;
++ struct key_entry *key;
+ union acpi_object *obj;
+ ktime_t cur;
+ acpi_status status;
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 8e6c4fa..a7539b3 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
+ return 0;
+ }
+
+-void static hotkey_mask_warn_incomplete_mask(void)
++static void hotkey_mask_warn_incomplete_mask(void)
+ {
+ /* log only what the user can fix... */
+ const u32 wantedmask = hotkey_driver_mask &
+@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
+ }
+ }
+
+-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+- struct tp_nvram_state *newn,
+- const u32 event_mask)
+-{
+-
+ #define TPACPI_COMPARE_KEY(__scancode, __member) \
+ do { \
+ if ((event_mask & (1 << __scancode)) && \
+@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ tpacpi_hotkey_send_key(__scancode); \
+ } while (0)
+
+- void issue_volchange(const unsigned int oldvol,
+- const unsigned int newvol)
+- {
+- unsigned int i = oldvol;
++static void issue_volchange(const unsigned int oldvol,
++ const unsigned int newvol,
++ const u32 event_mask)
++{
++ unsigned int i = oldvol;
+
+- while (i > newvol) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+- i--;
+- }
+- while (i < newvol) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+- i++;
+- }
++ while (i > newvol) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
++ i--;
+ }
++ while (i < newvol) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
++ i++;
++ }
++}
+
+- void issue_brightnesschange(const unsigned int oldbrt,
+- const unsigned int newbrt)
+- {
+- unsigned int i = oldbrt;
++static void issue_brightnesschange(const unsigned int oldbrt,
++ const unsigned int newbrt,
++ const u32 event_mask)
++{
++ unsigned int i = oldbrt;
+
+- while (i > newbrt) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+- i--;
+- }
+- while (i < newbrt) {
+- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+- i++;
+- }
++ while (i > newbrt) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
++ i--;
++ }
++ while (i < newbrt) {
++ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
++ i++;
+ }
++}
+
++static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
++ struct tp_nvram_state *newn,
++ const u32 event_mask)
++{
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ oldn->volume_level != newn->volume_level) {
+ /* recently muted, or repeated mute keypress, or
+ * multiple presses ending in mute */
+- issue_volchange(oldn->volume_level, newn->volume_level);
++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ }
+ } else {
+@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ }
+ if (oldn->volume_level != newn->volume_level) {
+- issue_volchange(oldn->volume_level, newn->volume_level);
++ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
+ } else if (oldn->volume_toggle != newn->volume_toggle) {
+ /* repeated vol up/down keypress at end of scale ? */
+ if (newn->volume_level == 0)
+@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ /* handle brightness */
+ if (oldn->brightness_level != newn->brightness_level) {
+ issue_brightnesschange(oldn->brightness_level,
+- newn->brightness_level);
++ newn->brightness_level,
++ event_mask);
+ } else if (oldn->brightness_toggle != newn->brightness_toggle) {
+ /* repeated key presses that didn't change state */
+ if (newn->brightness_level == 0)
+@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ && !tp_features.bright_unkfw)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ }
++}
+
+ #undef TPACPI_COMPARE_KEY
+ #undef TPACPI_MAY_SEND_KEY
+-}
+
+ /*
+ * Polling driver
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index a134c26..d024437 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -743,7 +743,7 @@ static int wmi_create_device(const struct guid_block *gblock,
+ wblock->dev.class = &wmi_class;
+
+ wmi_gtoa(gblock->guid, guid_string);
+- dev_set_name(&wblock->dev, guid_string);
++ dev_set_name(&wblock->dev, "%s", guid_string);
+
+ dev_set_drvdata(&wblock->dev, wblock);
+
+diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
+index b859d16..5cc6b1a 100644
+--- a/drivers/pnp/pnpbios/bioscalls.c
++++ b/drivers/pnp/pnpbios/bioscalls.c
+@@ -59,7 +59,7 @@ do { \
+ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
+ } while(0)
+
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ /*
+@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+
+ cpu = get_cpu();
+ save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
++
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+ :"memory");
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
++ pax_open_kernel();
+ get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
+ return status;
+ }
+
+-void pnpbios_calls_init(union pnp_bios_install_struct *header)
++void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
+ {
+ int i;
+
+@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
+ pnp_bios_callpoint.offset = header->fields.pm16offset;
+ pnp_bios_callpoint.segment = PNP_CS16;
+
++ pax_open_kernel();
++
+ for_each_possible_cpu(i) {
+ struct desc_struct *gdt = get_cpu_gdt_table(i);
+ if (!gdt)
+@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
+ set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
+ (unsigned long)__va(header->fields.pm16dseg));
+ }
++
++ pax_close_kernel();
+ }
+diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
+index b0ecacb..7c9da2e 100644
+--- a/drivers/pnp/resource.c
++++ b/drivers/pnp/resource.c
+@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
+ return 1;
+
+ /* check if the resource is valid */
+- if (*irq < 0 || *irq > 15)
++ if (*irq > 15)
+ return 0;
+
+ /* check if the resource is reserved */
+@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
+ return 1;
+
+ /* check if the resource is valid */
+- if (*dma < 0 || *dma == 4 || *dma > 7)
++ if (*dma == 4 || *dma > 7)
+ return 0;
+
+ /* check if the resource is reserved */
+diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
+index 018de2b..bc8e317 100644
+--- a/drivers/power/power_supply.h
++++ b/drivers/power/power_supply.h
+@@ -12,12 +12,12 @@
+
+ #ifdef CONFIG_SYSFS
+
+-extern void power_supply_init_attrs(struct device_type *dev_type);
++extern void power_supply_init_attrs(void);
+ extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
+
+ #else
+
+-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
++static inline void power_supply_init_attrs(void) {}
+ #define power_supply_uevent NULL
+
+ #endif /* CONFIG_SYSFS */
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index 329b46b..1b17633 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -23,7 +23,10 @@
+ struct class *power_supply_class;
+ EXPORT_SYMBOL_GPL(power_supply_class);
+
+-static struct device_type power_supply_dev_type;
++extern const struct attribute_group *power_supply_attr_groups[];
++static struct device_type power_supply_dev_type = {
++ .groups = power_supply_attr_groups,
++};
+
+ static int __power_supply_changed_work(struct device *dev, void *data)
+ {
+@@ -215,7 +218,7 @@ static int __init power_supply_class_init(void)
+ return PTR_ERR(power_supply_class);
+
+ power_supply_class->dev_uevent = power_supply_uevent;
+- power_supply_init_attrs(&power_supply_dev_type);
++ power_supply_init_attrs();
+
+ return 0;
+ }
+diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
+index e15d4c9..83cd617 100644
+--- a/drivers/power/power_supply_sysfs.c
++++ b/drivers/power/power_supply_sysfs.c
+@@ -208,17 +208,15 @@ static struct attribute_group power_supply_attr_group = {
+ .is_visible = power_supply_attr_is_visible,
+ };
+
+-static const struct attribute_group *power_supply_attr_groups[] = {
++const struct attribute_group *power_supply_attr_groups[] = {
+ &power_supply_attr_group,
+ NULL,
+ };
+
+-void power_supply_init_attrs(struct device_type *dev_type)
++void power_supply_init_attrs(void)
+ {
+ int i;
+
+- dev_type->groups = power_supply_attr_groups;
+-
+ for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
+ __power_supply_attrs[i] = &power_supply_attrs[i].attr;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 6ec610c..078eaf3 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2639,7 +2639,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
+ struct device *dev, const struct regulator_init_data *init_data,
+ void *driver_data)
+ {
+- static atomic_t regulator_no = ATOMIC_INIT(0);
++ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
+ struct regulator_dev *rdev;
+ int ret, i;
+
+@@ -2698,7 +2698,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
+ rdev->dev.class = &regulator_class;
+ rdev->dev.parent = dev;
+ dev_set_name(&rdev->dev, "regulator.%d",
+- atomic_inc_return(&regulator_no) - 1);
++ atomic_inc_return_unchecked(&regulator_no) - 1);
+ ret = device_register(&rdev->dev);
+ if (ret != 0) {
+ put_device(&rdev->dev);
+diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
+index 33f5d9a..d957d3f 100644
+--- a/drivers/regulator/max8660.c
++++ b/drivers/regulator/max8660.c
+@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
+ max8660->shadow_regs[MAX8660_OVER1] = 5;
+ } else {
+ /* Otherwise devices can be toggled via software */
+- max8660_dcdc_ops.enable = max8660_dcdc_enable;
+- max8660_dcdc_ops.disable = max8660_dcdc_disable;
++ pax_open_kernel();
++ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
++ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
++ pax_close_kernel();
+ }
+
+ /*
+diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
+index 023d17d..74ef35b 100644
+--- a/drivers/regulator/mc13892-regulator.c
++++ b/drivers/regulator/mc13892-regulator.c
+@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
+ }
+ mc13xxx_unlock(mc13892);
+
+- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
++ pax_open_kernel();
++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+ = mc13892_vcam_set_mode;
+- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
++ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+ = mc13892_vcam_get_mode;
++ pax_close_kernel();
+ for (i = 0; i < pdata->num_regulators; i++) {
+ init_data = &pdata->regulators[i];
+ priv->regulators[i] = regulator_register(
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index e3eed18..155946b 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+ hpet_rtc_timer_init();
+
+ /* export at least the first block of NVRAM */
+- nvram.size = address_space - NVRAM_OFFSET;
++ pax_open_kernel();
++ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
++ pax_close_kernel();
+ retval = sysfs_create_bin_file(&dev->kobj, &nvram);
+ if (retval < 0) {
+ dev_dbg(dev, "can't create nvram file? %d\n", retval);
+diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
+index cace6d3..f623fda 100644
+--- a/drivers/rtc/rtc-dev.c
++++ b/drivers/rtc/rtc-dev.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include "rtc-core.h"
+
+ static dev_t rtc_devt;
+@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
+ if (copy_from_user(&tm, uarg, sizeof(tm)))
+ return -EFAULT;
+
++ gr_log_timechange();
++
+ return rtc_set_time(rtc, &tm);
+
+ case RTC_PIE_ON:
+diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
+index 2836538..30edf9d 100644
+--- a/drivers/rtc/rtc-m48t59.c
++++ b/drivers/rtc/rtc-m48t59.c
+@@ -482,7 +482,9 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
+ goto out;
+ }
+
+- m48t59_nvram_attr.size = pdata->offset;
++ pax_open_kernel();
++ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
++ pax_close_kernel();
+
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+ if (ret) {
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 2e658d2..46f4afb 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
+ #elif defined(__devinitconst)
+ static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
+ #else
+-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
++static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
+ #endif
+ { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
+ { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
+diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
+index 14b5f8d..cc9bd26 100644
+--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
++++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
+@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
+ for (bit = 0; bit < 8; bit++) {
+
+ if ((pci_status[i] & (0x1 << bit)) != 0) {
+- static const char *s;
++ const char *s;
+
+ s = pci_status_strings[bit];
+ if (i == 7/*TARG*/ && bit == 3)
+@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
+
+ for (bit = 0; bit < 8; bit++) {
+
+- if ((split_status[i] & (0x1 << bit)) != 0) {
+- static const char *s;
+-
+- s = split_status_strings[bit];
+- printk(s, ahd_name(ahd),
++ if ((split_status[i] & (0x1 << bit)) != 0)
++ printk(split_status_strings[bit], ahd_name(ahd),
+ split_status_source[i]);
+- }
+
+ if (i > 1)
+ continue;
+
+- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
+- static const char *s;
+-
+- s = split_status_strings[bit];
+- printk(s, ahd_name(ahd), "SG");
+- }
++ if ((sg_split_status[i] & (0x1 << bit)) != 0)
++ printk(split_status_strings[bit], ahd_name(ahd), "SG");
+ }
+ }
+ /*
+diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
+index d5ff142..49c0ebb 100644
+--- a/drivers/scsi/aic94xx/aic94xx_init.c
++++ b/drivers/scsi/aic94xx/aic94xx_init.c
+@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
+ .lldd_control_phy = asd_control_phy,
+ };
+
+-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
++static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
+diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
+index 1080bcb..4a8ddd9 100644
+--- a/drivers/scsi/bfa/bfa_fcpim.h
++++ b/drivers/scsi/bfa/bfa_fcpim.h
+@@ -36,7 +36,7 @@ struct bfa_iotag_s {
+
+ struct bfa_itn_s {
+ bfa_isr_func_t isr;
+-};
++} __no_const;
+
+ void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 546d46b..642fa5b 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+-};
++} __no_const;
+
+ /*
+ * IOC event notification mechanism.
+@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
+ void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
+-};
++} __no_const;
+
+ /*
+ * Queue element to wait for room in request queue. FIFO order is
+diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
+index dee1a09..24adab6 100644
+--- a/drivers/scsi/bfa/bfad_debugfs.c
++++ b/drivers/scsi/bfa/bfad_debugfs.c
+@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+ file->f_pos += offset;
+ break;
+ case 2:
+- file->f_pos = debug->buffer_len - offset;
++ file->f_pos = debug->buffer_len + offset;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index e7522dc..f585e84 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -2030,7 +2030,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
+ */
+ port_id = fip->port_id;
+ if (fip->probe_tries)
+- port_id = prandom32(&fip->rnd_state) & 0xffff;
++ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
+ else if (!port_id)
+ port_id = fip->lp->wwpn & 0xffff;
+ if (!port_id || port_id == 0xffff)
+@@ -2055,7 +2055,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
+ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
+ {
+ fip->probe_tries = 0;
+- prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
++ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
+ fcoe_ctlr_vn_restart(fip);
+ }
+
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index ee77a58..af9d518 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -42,7 +42,7 @@
+ #include "scsi_logging.h"
+
+
+-static atomic_t scsi_host_next_hn; /* host_no for next new host */
++static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
+
+
+ static void scsi_host_cls_release(struct device *dev)
+@@ -358,7 +358,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
+ * subtract one because we increment first then return, but we need to
+ * know what the next host number was before increment
+ */
+- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
++ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
+ shost->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 5b7e1bf..6e5521a 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -523,7 +523,7 @@ static inline u32 next_command(struct ctlr_info *h)
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+@@ -3034,7 +3034,7 @@ static void start_io(struct ctlr_info *h)
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, struct CommandList, list);
+ /* can't do anything if fifo is full */
+- if ((h->access.fifo_full(h))) {
++ if ((h->access->fifo_full(h))) {
+ dev_warn(&h->pdev->dev, "fifo full\n");
+ break;
+ }
+@@ -3044,7 +3044,7 @@ static void start_io(struct ctlr_info *h)
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+- h->access.submit_command(h, c);
++ h->access->submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ(&h->cmpQ, c);
+@@ -3053,17 +3053,17 @@ static void start_io(struct ctlr_info *h)
+
+ static inline unsigned long get_next_completion(struct ctlr_info *h)
+ {
+- return h->access.command_completed(h);
++ return h->access->command_completed(h);
+ }
+
+ static inline bool interrupt_pending(struct ctlr_info *h)
+ {
+- return h->access.intr_pending(h);
++ return h->access->intr_pending(h);
+ }
+
+ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ {
+- return (h->access.intr_pending(h) == 0) ||
++ return (h->access->intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0);
+ }
+
+@@ -3963,7 +3963,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ if (prod_index < 0)
+ return -ENODEV;
+ h->product_name = products[prod_index].product_name;
+- h->access = *(products[prod_index].access);
++ h->access = products[prod_index].access;
+
+ if (hpsa_board_disabled(h->pdev)) {
+ dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+@@ -4208,7 +4208,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
+
+ assert_spin_locked(&lockup_detector_lock);
+ remove_ctlr_from_lockup_detector_list(h);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ spin_lock_irqsave(&h->lock, flags);
+ h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ spin_unlock_irqrestore(&h->lock, flags);
+@@ -4384,7 +4384,7 @@ reinit_after_soft_reset:
+ }
+
+ /* make sure the board interrupts are off */
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+
+ if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
+ goto clean2;
+@@ -4418,7 +4418,7 @@ reinit_after_soft_reset:
+ * fake ones to scoop up any residual completions.
+ */
+ spin_lock_irqsave(&h->lock, flags);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ spin_unlock_irqrestore(&h->lock, flags);
+ free_irq(h->intr[h->intr_mode], h);
+ rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
+@@ -4437,9 +4437,9 @@ reinit_after_soft_reset:
+ dev_info(&h->pdev->dev, "Board READY.\n");
+ dev_info(&h->pdev->dev,
+ "Waiting for stale completions to drain.\n");
+- h->access.set_intr_mask(h, HPSA_INTR_ON);
++ h->access->set_intr_mask(h, HPSA_INTR_ON);
+ msleep(10000);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+
+ rc = controller_reset_failed(h->cfgtable);
+ if (rc)
+@@ -4460,7 +4460,7 @@ reinit_after_soft_reset:
+ }
+
+ /* Turn the interrupts on so we can service requests */
+- h->access.set_intr_mask(h, HPSA_INTR_ON);
++ h->access->set_intr_mask(h, HPSA_INTR_ON);
+
+ hpsa_hba_inquiry(h);
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+@@ -4512,7 +4512,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+ * To write all data in the battery backed cache to disks
+ */
+ hpsa_flush_cache(h);
+- h->access.set_intr_mask(h, HPSA_INTR_OFF);
++ h->access->set_intr_mask(h, HPSA_INTR_OFF);
+ free_irq(h->intr[h->intr_mode], h);
+ #ifdef CONFIG_PCI_MSI
+ if (h->msix_vector)
+@@ -4676,7 +4676,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+ return;
+ }
+ /* Change the access methods to the performant access methods */
+- h->access = SA5_performant_access;
++ h->access = &SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+ }
+
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index c721509..8be5717 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -73,7 +73,7 @@ struct ctlr_info {
+ unsigned int msix_vector;
+ unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
+- struct access_method access;
++ struct access_method *access;
+
+ /* queue and queue Info */
+ struct list_head reqQ;
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 9de9db2..1e09660 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -105,12 +105,12 @@ struct fc_exch_mgr {
+ * all together if not used XXX
+ */
+ struct {
+- atomic_t no_free_exch;
+- atomic_t no_free_exch_xid;
+- atomic_t xid_not_found;
+- atomic_t xid_busy;
+- atomic_t seq_not_found;
+- atomic_t non_bls_resp;
++ atomic_unchecked_t no_free_exch;
++ atomic_unchecked_t no_free_exch_xid;
++ atomic_unchecked_t xid_not_found;
++ atomic_unchecked_t xid_busy;
++ atomic_unchecked_t seq_not_found;
++ atomic_unchecked_t non_bls_resp;
+ } stats;
+ };
+
+@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+- atomic_inc(&mp->stats.no_free_exch);
++ atomic_inc_unchecked(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+@@ -780,7 +780,7 @@ out:
+ return ep;
+ err:
+ spin_unlock_bh(&pool->lock);
+- atomic_inc(&mp->stats.no_free_exch_xid);
++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+ }
+@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+- atomic_inc(&mp->stats.xid_busy);
++ atomic_inc_unchecked(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ if (f_ctl & FC_FC_END_SEQ) {
+ /*
+ * Update sequence_id based on incoming last
+@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else if (sp->id != fh->fh_seq_id) {
+- atomic_inc(&mp->stats.seq_not_found);
++ atomic_inc_unchecked(&mp->stats.seq_not_found);
+ goto rel;
+ }
+
+@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+
+ if (!sp)
+- atomic_inc(&mp->stats.xid_not_found);
++ atomic_inc_unchecked(&mp->stats.xid_not_found);
+ else
+- atomic_inc(&mp->stats.non_bls_resp);
++ atomic_inc_unchecked(&mp->stats.non_bls_resp);
+
+ fc_frame_free(fp);
+ }
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 5e170e3..1e87efc 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
+ .post_internal_cmd = sas_ata_post_internal,
+- .qc_defer = ata_std_qc_defer,
++ .qc_defer = ata_std_qc_defer,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = sas_ata_qc_issue,
+ .qc_fill_rtf = sas_ata_qc_fill_rtf,
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index bb4c8e0..f33d849 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -425,7 +425,7 @@ struct lpfc_vport {
+ struct dentry *debug_nodelist;
+ struct dentry *vport_debugfs_root;
+ struct lpfc_debugfs_trc *disc_trc;
+- atomic_t disc_trc_cnt;
++ atomic_unchecked_t disc_trc_cnt;
+ #endif
+ uint8_t stat_data_enabled;
+ uint8_t stat_data_blocked;
+@@ -835,8 +835,8 @@ struct lpfc_hba {
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+ #define FABRIC_COMANDS_BLOCKED 0
+- atomic_t num_rsrc_err;
+- atomic_t num_cmd_success;
++ atomic_unchecked_t num_rsrc_err;
++ atomic_unchecked_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+ unsigned long last_ramp_up_time;
+@@ -866,7 +866,7 @@ struct lpfc_hba {
+
+ struct dentry *debug_slow_ring_trc;
+ struct lpfc_debugfs_trc *slow_ring_trc;
+- atomic_t slow_ring_trc_cnt;
++ atomic_unchecked_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 2838259..35b747a 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+
+ #include <linux/debugfs.h>
+
+-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+ static unsigned long lpfc_debugfs_start_time = 0L;
+
+ /* iDiag */
+@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
++ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+ dtp = vport->disc_trc + i;
+@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
++ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+ dtp = phba->slow_ring_trc + i;
+@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+ !vport || !vport->disc_trc)
+ return;
+
+- index = atomic_inc_return(&vport->disc_trc_cnt) &
++ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ dtp = vport->disc_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+ #endif
+ return;
+@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
+ !phba || !phba->slow_ring_trc)
+ return;
+
+- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
++ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
+ (lpfc_debugfs_max_slow_ring_trc - 1);
+ dtp = phba->slow_ring_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
++ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
+ dtp->jif = jiffies;
+ #endif
+ return;
+@@ -1151,7 +1151,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
+ pos = file->f_pos + off;
+ break;
+ case 2:
+- pos = debug->len - off;
++ pos = debug->len + off;
+ }
+ return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
+ }
+@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ "slow_ring buffer\n");
+ goto debug_failed;
+ }
+- atomic_set(&phba->slow_ring_trc_cnt, 0);
++ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
+ memset(phba->slow_ring_trc, 0,
+ (sizeof(struct lpfc_debugfs_trc) *
+ lpfc_debugfs_max_slow_ring_trc));
+@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
+ "buffer\n");
+ goto debug_failed;
+ }
+- atomic_set(&vport->disc_trc_cnt, 0);
++ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
+
+ snprintf(name, sizeof(name), "discovery_trace");
+ vport->debug_disc_trc =
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 55bc4fc..a2a109c 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -10027,8 +10027,10 @@ lpfc_init(void)
+ printk(LPFC_COPYRIGHT "\n");
+
+ if (lpfc_enable_npiv) {
+- lpfc_transport_functions.vport_create = lpfc_vport_create;
+- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++ pax_open_kernel();
++ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
++ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
++ pax_close_kernel();
+ }
+ lpfc_transport_template =
+ fc_attach_transport(&lpfc_transport_functions);
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 2e1e54e..1af0a0d 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+ uint32_t evt_posted;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+- atomic_inc(&phba->num_rsrc_err);
++ atomic_inc_unchecked(&phba->num_rsrc_err);
+ phba->last_rsrc_error_time = jiffies;
+
+ if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
+ unsigned long flags;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t evt_posted;
+- atomic_inc(&phba->num_cmd_success);
++ atomic_inc_unchecked(&phba->num_cmd_success);
+
+ if (vport->cfg_lun_queue_depth <= queue_depth)
+ return;
+@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+ unsigned long num_rsrc_err, num_cmd_success;
+ int i;
+
+- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+- num_cmd_success = atomic_read(&phba->num_cmd_success);
++ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
++ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
++ atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+
+ /**
+@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+- atomic_set(&phba->num_rsrc_err, 0);
+- atomic_set(&phba->num_cmd_success, 0);
++ atomic_set_unchecked(&phba->num_rsrc_err, 0);
++ atomic_set_unchecked(&phba->num_cmd_success, 0);
+ }
+
+ /**
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 987c6d6..575985c 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -1532,7 +1532,7 @@ _scsih_get_resync(struct device *dev)
+ {
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+- static struct _raid_device *raid_device;
++ struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+@@ -1571,7 +1571,7 @@ _scsih_get_state(struct device *dev)
+ {
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+- static struct _raid_device *raid_device;
++ struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+@@ -6532,7 +6532,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+ {
+ Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+- static struct _raid_device *raid_device;
++ struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
+@@ -7005,7 +7005,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+- static struct _raid_device *raid_device;
++ struct _raid_device *raid_device;
+ u8 retry_count;
+
+ printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
+diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
+index 5163edb..7b142bc 100644
+--- a/drivers/scsi/pmcraid.c
++++ b/drivers/scsi/pmcraid.c
+@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+ res->scsi_dev = scsi_dev;
+ scsi_dev->hostdata = res;
+ res->change_detected = 0;
+- atomic_set(&res->read_failures, 0);
+- atomic_set(&res->write_failures, 0);
++ atomic_set_unchecked(&res->read_failures, 0);
++ atomic_set_unchecked(&res->write_failures, 0);
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
+
+ /* If this was a SCSI read/write command keep count of errors */
+ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+- atomic_inc(&res->read_failures);
++ atomic_inc_unchecked(&res->read_failures);
+ else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+- atomic_inc(&res->write_failures);
++ atomic_inc_unchecked(&res->write_failures);
+
+ if (!RES_IS_GSCSI(res->cfg_entry) &&
+ masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+ cmd->cmd_done = pmcraid_io_done;
+
+@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
+ * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
+ * hrrq_id assigned here in queuecommand
+ */
+- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
++ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
+ pinstance->num_hrrq;
+
+ if (request_size) {
+@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
+
+ pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+ /* add resources only after host is added into system */
+- if (!atomic_read(&pinstance->expose_resources))
++ if (!atomic_read_unchecked(&pinstance->expose_resources))
+ return;
+
+ fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
+@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
+ init_waitqueue_head(&pinstance->reset_wait_q);
+
+ atomic_set(&pinstance->outstanding_cmds, 0);
+- atomic_set(&pinstance->last_message_id, 0);
+- atomic_set(&pinstance->expose_resources, 0);
++ atomic_set_unchecked(&pinstance->last_message_id, 0);
++ atomic_set_unchecked(&pinstance->expose_resources, 0);
+
+ INIT_LIST_HEAD(&pinstance->free_res_q);
+ INIT_LIST_HEAD(&pinstance->used_res_q);
+@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
+ /* Schedule worker thread to handle CCN and take care of adding and
+ * removing devices to OS
+ */
+- atomic_set(&pinstance->expose_resources, 1);
++ atomic_set_unchecked(&pinstance->expose_resources, 1);
+ schedule_work(&pinstance->worker_q);
+ return rc;
+
+diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
+index ca496c7..9c791d5 100644
+--- a/drivers/scsi/pmcraid.h
++++ b/drivers/scsi/pmcraid.h
+@@ -748,7 +748,7 @@ struct pmcraid_instance {
+ struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+
+ /* Message id as filled in last fired IOARCB, used to identify HRRQ */
+- atomic_t last_message_id;
++ atomic_unchecked_t last_message_id;
+
+ /* configuration table */
+ struct pmcraid_config_table *cfg_table;
+@@ -777,7 +777,7 @@ struct pmcraid_instance {
+ atomic_t outstanding_cmds;
+
+ /* should add/delete resources to mid-layer now ?*/
+- atomic_t expose_resources;
++ atomic_unchecked_t expose_resources;
+
+
+
+@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
+ struct pmcraid_config_table_entry_ext cfg_entry_ext;
+ };
+ struct scsi_device *scsi_dev; /* Link scsi_device structure */
+- atomic_t read_failures; /* count of failed READ commands */
+- atomic_t write_failures; /* count of failed WRITE commands */
++ atomic_unchecked_t read_failures; /* count of failed READ commands */
++ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
+
+ /* To indicate add/delete/modify during CCN */
+ u8 change_detected;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 82a5ca6..97ace97 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1429,8 +1429,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+ !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ /* Ok, a 64bit DMA mask is applicable. */
+ ha->flags.enable_64bit_addressing = 1;
+- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++ pax_open_kernel();
++ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
++ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++ pax_close_kernel();
+ return;
+ }
+ }
+diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
+index fd5edc6..4906148 100644
+--- a/drivers/scsi/qla4xxx/ql4_def.h
++++ b/drivers/scsi/qla4xxx/ql4_def.h
+@@ -258,7 +258,7 @@ struct ddb_entry {
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+- atomic_t relogin_retry_count; /* Num of times relogin has been
++ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 4169c8b..a8b896b 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+- atomic_inc(&ddb_entry->relogin_retry_count);
++ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+- atomic_read(&ddb_entry->relogin_retry_count),
++ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+- atomic_set(&ddb_entry->relogin_retry_count, 0);
++ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
+
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 831db24..aef1598 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+ unsigned long timeout;
+ int rtn = 0;
+
+- atomic_inc(&cmd->device->iorequest_cnt);
++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+
+ /* check if the device is still usable */
+ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 6c4b620..78feefb 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1426,7 +1426,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
+ shost = sdev->host;
+ scsi_init_cmd_errh(cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+- atomic_inc(&cmd->device->iorequest_cnt);
++ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
+
+ /*
+ * SCSI request completion path will do scsi_device_unbusy(),
+@@ -1452,9 +1452,9 @@ static void scsi_softirq_done(struct request *rq)
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+- atomic_inc(&cmd->device->iodone_cnt);
++ atomic_inc_unchecked(&cmd->device->iodone_cnt);
+ if (cmd->result)
+- atomic_inc(&cmd->device->ioerr_cnt);
++ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 72ca515..708d627 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -657,7 +657,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct scsi_device *sdev = to_scsi_device(dev); \
+- unsigned long long count = atomic_read(&sdev->field); \
++ unsigned long long count = atomic_read_unchecked(&sdev->field); \
+ return snprintf(buf, 20, "0x%llx\n", count); \
+ } \
+ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
+diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
+index 84a1fdf..693b0d6 100644
+--- a/drivers/scsi/scsi_tgt_lib.c
++++ b/drivers/scsi/scsi_tgt_lib.c
+@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
+ int err;
+
+ dprintk("%lx %u\n", uaddr, len);
+- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
++ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
+ if (err) {
+ /*
+ * TODO: need to fixup sg_tablesize, max_segment_size,
+diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
+index 1b21491..1b7f60e 100644
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
+ * Netlink Infrastructure
+ */
+
+-static atomic_t fc_event_seq;
++static atomic_unchecked_t fc_event_seq;
+
+ /**
+ * fc_get_event_number - Obtain the next sequential FC event number
+@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
+ u32
+ fc_get_event_number(void)
+ {
+- return atomic_add_return(1, &fc_event_seq);
++ return atomic_add_return_unchecked(1, &fc_event_seq);
+ }
+ EXPORT_SYMBOL(fc_get_event_number);
+
+@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
+ {
+ int error;
+
+- atomic_set(&fc_event_seq, 0);
++ atomic_set_unchecked(&fc_event_seq, 0);
+
+ error = transport_class_register(&fc_host_class);
+ if (error)
+@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
+ char *cp;
+
+ *val = simple_strtoul(buf, &cp, 0);
+- if ((*cp && (*cp != '\n')) || (*val < 0))
++ if (*cp && (*cp != '\n'))
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index c874458..568a977 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -79,7 +79,7 @@ struct iscsi_internal {
+ struct transport_container session_cont;
+ };
+
+-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
++static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
+ static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ static DEFINE_IDA(iscsi_sess_ida);
+@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ int err;
+
+ ihost = shost->shost_data;
+- session->sid = atomic_add_return(1, &iscsi_session_nr);
++ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
+
+ if (target_id == ISCSI_MAX_TARGET) {
+ id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
+ printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
+ ISCSI_TRANSPORT_VERSION);
+
+- atomic_set(&iscsi_session_nr, 0);
++ atomic_set_unchecked(&iscsi_session_nr, 0);
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index 21a045e..ec89e03 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -33,7 +33,7 @@
+ #include "scsi_transport_srp_internal.h"
+
+ struct srp_host_attrs {
+- atomic_t next_port_id;
++ atomic_unchecked_t next_port_id;
+ };
+ #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
+
+@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
+
+- atomic_set(&srp_host->next_port_id, 0);
++ atomic_set_unchecked(&srp_host->next_port_id, 0);
+ return 0;
+ }
+
+@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
+ memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
+ rport->roles = ids->roles;
+
+- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
++ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
+ dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
+
+ transport_setup_device(&rport->dev);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f6d2b62..d9aa1a4 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2632,7 +2632,7 @@ static int sd_probe(struct device *dev)
+ device_initialize(&sdkp->dev);
+ sdkp->dev.parent = dev;
+ sdkp->dev.class = &sd_disk_class;
+- dev_set_name(&sdkp->dev, dev_name(dev));
++ dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+
+ if (device_add(&sdkp->dev))
+ goto out_free_index;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 441a1c5..07cece7 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ sdp->disk->disk_name,
+ MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
+ NULL,
+- (char *)arg);
++ (char __user *)arg);
+ case BLKTRACESTART:
+ return blk_trace_startstop(sdp->device->request_queue, 1);
+ case BLKTRACESTOP:
+@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
+ const struct file_operations * fops;
+ };
+
+-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
++static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
+ {"allow_dio", &adio_fops},
+ {"debug", &debug_fops},
+ {"def_reserved_size", &dressz_fops},
+@@ -2327,7 +2327,7 @@ sg_proc_init(void)
+ {
+ int k, mask;
+ int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
+- struct sg_proc_leaf * leaf;
++ const struct sg_proc_leaf * leaf;
+
+ sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
+ if (!sg_proc_sgp)
+diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
+index f64250e..1ee3049 100644
+--- a/drivers/spi/spi-dw-pci.c
++++ b/drivers/spi/spi-dw-pci.c
+@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
+ #define spi_resume NULL
+ #endif
+
+-static const struct pci_device_id pci_ids[] __devinitdata = {
++static const struct pci_device_id pci_ids[] __devinitconst = {
+ /* Intel MID platform SPI controller 0 */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+ {},
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index b2ccdea..84cde75 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
+ EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
+ /* portable code must never pass more than 32 bytes */
+-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
++#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
+
+ static u8 *buf;
+
+diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
+index 436fe97..4082570 100644
+--- a/drivers/staging/gma500/power.c
++++ b/drivers/staging/gma500/power.c
+@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
+ ret = gma_resume_pci(dev->pdev);
+ if (ret == 0) {
+ /* FIXME: we want to defer this for Medfield/Oaktrail */
+- gma_resume_display(dev);
++ gma_resume_display(dev->pdev);
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ pm_runtime_get(&dev->pdev->dev);
+diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
+index bafccb3..e3ac78d 100644
+--- a/drivers/staging/hv/rndis_filter.c
++++ b/drivers/staging/hv/rndis_filter.c
+@@ -42,7 +42,7 @@ struct rndis_device {
+
+ enum rndis_device_state state;
+ bool link_state;
+- atomic_t new_req_id;
++ atomic_unchecked_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
+ * template
+ */
+ set = &rndis_msg->msg.set_req;
+- set->req_id = atomic_inc_return(&dev->new_req_id);
++ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+
+ /* Add to the request list */
+ spin_lock_irqsave(&dev->request_lock, flags);
+@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
+
+ /* Setup the rndis set */
+ halt = &request->request_msg.msg.halt_req;
+- halt->req_id = atomic_inc_return(&dev->new_req_id);
++ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
+
+ /* Ignore return since this msg is optional. */
+ rndis_filter_send_request(dev, request);
+diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
+index 9e8f010..af9efb56 100644
+--- a/drivers/staging/iio/buffer_generic.h
++++ b/drivers/staging/iio/buffer_generic.h
+@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
+
+ int (*is_enabled)(struct iio_buffer *buffer);
+ int (*enable)(struct iio_buffer *buffer);
+-};
++} __no_const;
+
+ /**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
+index 72d0f3f..ba3ff3c 100644
+--- a/drivers/staging/iio/dac/ad5360.c
++++ b/drivers/staging/iio/dac/ad5360.c
+@@ -439,8 +439,8 @@ static int __devinit ad5360_alloc_channels(struct iio_dev *indio_dev)
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+- channels = kcalloc(sizeof(struct iio_chan_spec),
+- st->chip_info->num_channels, GFP_KERNEL);
++ channels = kcalloc(st->chip_info->num_channels,
++ sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
+index aec9311..ddc3103 100644
+--- a/drivers/staging/iio/industrialio-core.c
++++ b/drivers/staging/iio/industrialio-core.c
+@@ -398,7 +398,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
+ }
+
+ static
+-int __iio_device_attr_init(struct device_attribute *dev_attr,
++int __iio_device_attr_init(device_attribute_no_const *dev_attr,
+ const char *postfix,
+ struct iio_chan_spec const *chan,
+ ssize_t (*readfunc)(struct device *dev,
+diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
+index 66a34ad..65f6aea 100644
+--- a/drivers/staging/iio/ring_sw.c
++++ b/drivers/staging/iio/ring_sw.c
+@@ -173,7 +173,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
+
+ u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
+ u8 *data;
+- int ret, max_copied, bytes_to_rip, dead_offset;
++ long ret, max_copied, bytes_to_rip, dead_offset;
+
+ /* A userspace program has probably made an error if it tries to
+ * read something that is not a whole number of bpds.
+diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
+index 8b307b4..a97ac91 100644
+--- a/drivers/staging/octeon/ethernet-rx.c
++++ b/drivers/staging/octeon/ethernet-rx.c
+@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+ /* Increment RX stats for virtual ports */
+ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+ #ifdef CONFIG_64BIT
+- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
+- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
++ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
++ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
+ #else
+- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
+- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
++ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
+ #endif
+ }
+ netif_receive_skb(skb);
+@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
+ dev->name);
+ */
+ #ifdef CONFIG_64BIT
+- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
++ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
++ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+ dev_kfree_skb_irq(skb);
+ }
+diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
+index 076f866..2308070 100644
+--- a/drivers/staging/octeon/ethernet.c
++++ b/drivers/staging/octeon/ethernet.c
+@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+ * since the RX tasklet also increments it.
+ */
+ #ifdef CONFIG_64BIT
+- atomic64_add(rx_status.dropped_packets,
+- (atomic64_t *)&priv->stats.rx_dropped);
++ atomic64_add_unchecked(rx_status.dropped_packets,
++ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
+ #else
+- atomic_add(rx_status.dropped_packets,
+- (atomic_t *)&priv->stats.rx_dropped);
++ atomic_add_unchecked(rx_status.dropped_packets,
++ (atomic_unchecked_t *)&priv->stats.rx_dropped);
+ #endif
+ }
+
+diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
+index 7a19555..466456d 100644
+--- a/drivers/staging/pohmelfs/inode.c
++++ b/drivers/staging/pohmelfs/inode.c
+@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
+ mutex_init(&psb->mcache_lock);
+ psb->mcache_root = RB_ROOT;
+ psb->mcache_timeout = msecs_to_jiffies(5000);
+- atomic_long_set(&psb->mcache_gen, 0);
++ atomic_long_set_unchecked(&psb->mcache_gen, 0);
+
+ psb->trans_max_pages = 100;
+
+@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
+ INIT_LIST_HEAD(&psb->crypto_ready_list);
+ INIT_LIST_HEAD(&psb->crypto_active_list);
+
+- atomic_set(&psb->trans_gen, 1);
++ atomic_set_unchecked(&psb->trans_gen, 1);
+ atomic_long_set(&psb->total_inodes, 0);
+
+ mutex_init(&psb->state_lock);
+diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
+index e22665c..a2a9390 100644
+--- a/drivers/staging/pohmelfs/mcache.c
++++ b/drivers/staging/pohmelfs/mcache.c
+@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
+ m->data = data;
+ m->start = start;
+ m->size = size;
+- m->gen = atomic_long_inc_return(&psb->mcache_gen);
++ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
+
+ mutex_lock(&psb->mcache_lock);
+ err = pohmelfs_mcache_insert(psb, m);
+diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
+index 985b6b7..7699e05 100644
+--- a/drivers/staging/pohmelfs/netfs.h
++++ b/drivers/staging/pohmelfs/netfs.h
+@@ -571,14 +571,14 @@ struct pohmelfs_config;
+ struct pohmelfs_sb {
+ struct rb_root mcache_root;
+ struct mutex mcache_lock;
+- atomic_long_t mcache_gen;
++ atomic_long_unchecked_t mcache_gen;
+ unsigned long mcache_timeout;
+
+ unsigned int idx;
+
+ unsigned int trans_retries;
+
+- atomic_t trans_gen;
++ atomic_unchecked_t trans_gen;
+
+ unsigned int crypto_attached_size;
+ unsigned int crypto_align_size;
+diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
+index 06c1a74..866eebc 100644
+--- a/drivers/staging/pohmelfs/trans.c
++++ b/drivers/staging/pohmelfs/trans.c
+@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
+ int err;
+ struct netfs_cmd *cmd = t->iovec.iov_base;
+
+- t->gen = atomic_inc_return(&psb->trans_gen);
++ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
+
+ cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
+ t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
+diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
+index 86308a0..feaa925 100644
+--- a/drivers/staging/rtl8712/rtl871x_io.h
++++ b/drivers/staging/rtl8712/rtl871x_io.h
+@@ -108,7 +108,7 @@ struct _io_ops {
+ u8 *pmem);
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+-};
++} __no_const;
+
+ struct io_req {
+ struct list_head list;
+diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
+index c7b5e8b..783d6cbe 100644
+--- a/drivers/staging/sbe-2t3e3/netdev.c
++++ b/drivers/staging/sbe-2t3e3/netdev.c
+@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
+
+ if (rlen)
+- if (copy_to_user(data, &resp, rlen))
++ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
+ return -EFAULT;
+
+ return 0;
+diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
+index 88b3298..3783eee 100644
+--- a/drivers/staging/usbip/vhci.h
++++ b/drivers/staging/usbip/vhci.h
+@@ -88,7 +88,7 @@ struct vhci_hcd {
+ unsigned resuming:1;
+ unsigned long re_timeout;
+
+- atomic_t seqnum;
++ atomic_unchecked_t seqnum;
+
+ /*
+ * NOTE:
+diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
+index 2ee97e2..0420b86 100644
+--- a/drivers/staging/usbip/vhci_hcd.c
++++ b/drivers/staging/usbip/vhci_hcd.c
+@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
+ return;
+ }
+
+- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
++ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+ if (priv->seqnum == 0xffff)
+ dev_info(&urb->dev->dev, "seqnum max\n");
+
+@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ return -ENOMEM;
+ }
+
+- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
++ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
+ if (unlink->seqnum == 0xffff)
+ pr_info("seqnum max\n");
+
+@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
+ vdev->rhport = rhport;
+ }
+
+- atomic_set(&vhci->seqnum, 0);
++ atomic_set_unchecked(&vhci->seqnum, 0);
+ spin_lock_init(&vhci->lock);
+
+ hcd->power_budget = 0; /* no limit */
+diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
+index 1a7afaa..e7dafbb 100644
+--- a/drivers/staging/usbip/vhci_rx.c
++++ b/drivers/staging/usbip/vhci_rx.c
+@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ if (!urb) {
+ pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
+ pr_info("max seqnum %d\n",
+- atomic_read(&the_controller->seqnum));
++ atomic_read_unchecked(&the_controller->seqnum));
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return;
+ }
+diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
+index 7735027..30eed13 100644
+--- a/drivers/staging/vt6655/hostap.c
++++ b/drivers/staging/vt6655/hostap.c
+@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
+ *
+ */
+
++static net_device_ops_no_const apdev_netdev_ops;
++
+ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ {
+ PSDevice apdev_priv;
+ struct net_device *dev = pDevice->dev;
+ int ret;
+- const struct net_device_ops apdev_netdev_ops = {
+- .ndo_start_xmit = pDevice->tx_80211,
+- };
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
+
+@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ *apdev_priv = *pDevice;
+ memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+
++ /* only half broken now */
++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
+ pDevice->apdev->netdev_ops = &apdev_netdev_ops;
+
+ pDevice->apdev->type = ARPHRD_IEEE80211;
+diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
+index df8ea25..47dd9c6 100644
+--- a/drivers/staging/vt6656/hostap.c
++++ b/drivers/staging/vt6656/hostap.c
+@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
+ *
+ */
+
++static net_device_ops_no_const apdev_netdev_ops;
++
+ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ {
+ PSDevice apdev_priv;
+ struct net_device *dev = pDevice->dev;
+ int ret;
+- const struct net_device_ops apdev_netdev_ops = {
+- .ndo_start_xmit = pDevice->tx_80211,
+- };
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
+
+@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
+ *apdev_priv = *pDevice;
+ memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+
++ /* only half broken now */
++ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
+ pDevice->apdev->netdev_ops = &apdev_netdev_ops;
+
+ pDevice->apdev->type = ARPHRD_IEEE80211;
+diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
+index 1ca66ea..76f1343 100644
+--- a/drivers/staging/zcache/tmem.c
++++ b/drivers/staging/zcache/tmem.c
+@@ -39,7 +39,7 @@
+ * A tmem host implementation must use this function to register callbacks
+ * for memory allocation.
+ */
+-static struct tmem_hostops tmem_hostops;
++static tmem_hostops_no_const tmem_hostops;
+
+ static void tmem_objnode_tree_init(void);
+
+@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
+ * A tmem host implementation must use this function to register
+ * callbacks for a page-accessible memory (PAM) implementation
+ */
+-static struct tmem_pamops tmem_pamops;
++static tmem_pamops_no_const tmem_pamops;
+
+ void tmem_register_pamops(struct tmem_pamops *m)
+ {
+diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
+index ed147c4..94fc3c6 100644
+--- a/drivers/staging/zcache/tmem.h
++++ b/drivers/staging/zcache/tmem.h
+@@ -180,6 +180,7 @@ struct tmem_pamops {
+ void (*new_obj)(struct tmem_obj *);
+ int (*replace_in_obj)(void *, struct tmem_obj *);
+ };
++typedef struct tmem_pamops __no_const tmem_pamops_no_const;
+ extern void tmem_register_pamops(struct tmem_pamops *m);
+
+ /* memory allocation methods provided by the host implementation */
+@@ -189,6 +190,7 @@ struct tmem_hostops {
+ struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
+ void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
+ };
++typedef struct tmem_hostops __no_const tmem_hostops_no_const;
+ extern void tmem_register_hostops(struct tmem_hostops *m);
+
+ /* core tmem accessor functions */
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 3effde2..dda7d46 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1351,7 +1351,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+ * outstanding_r2ts reaches zero, go ahead and send the delayed
+ * TASK_ABORTED status.
+ */
+- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
++ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ if (--cmd->outstanding_r2ts < 1) {
+ iscsit_stop_dataout_timer(cmd);
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 6845228..df77141 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
+ cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+- atomic_read(&cmd->t_task_cdbs_sent),
++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
+@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
+ pr_debug("LUN_RESET: got t_transport_active = 1 for"
+ " task: %p, t_fe_count: %d dev: %p\n", task,
+ fe_count, dev);
+- atomic_set(&cmd->t_transport_aborted, 1);
++ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
+ }
+ pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ " t_fe_count: %d dev: %p\n", task, fe_count, dev);
+- atomic_set(&cmd->t_transport_aborted, 1);
++ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 898c1de..b2ca488 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
+
+ dev->queue_depth = dev_limits->queue_depth;
+ atomic_set(&dev->depth_left, dev->queue_depth);
+- atomic_set(&dev->dev_ordered_id, 0);
++ atomic_set_unchecked(&dev->dev_ordered_id, 0);
+
+ se_dev_set_default_attribs(dev, dev_limits);
+
+@@ -1531,7 +1531,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+ * Used to determine when ORDERED commands should go from
+ * Dormant to Active status.
+ */
+- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
++ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
+ smp_mb__after_atomic_inc();
+ pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ cmd->se_ordered_id, cmd->sam_task_attr,
+@@ -1801,7 +1801,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
+ " t_transport_active: %d t_transport_stop: %d"
+ " t_transport_sent: %d\n", cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+- atomic_read(&cmd->t_task_cdbs_sent),
++ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_task_cdbs_ex_left),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+@@ -2091,9 +2091,9 @@ check_depth:
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags |= (TF_ACTIVE | TF_SENT);
+- atomic_inc(&cmd->t_task_cdbs_sent);
++ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
+
+- if (atomic_read(&cmd->t_task_cdbs_sent) ==
++ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
+ cmd->t_task_list_num)
+ atomic_set(&cmd->t_transport_sent, 1);
+
+@@ -4304,7 +4304,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
+ atomic_set(&cmd->transport_lun_stop, 0);
+ }
+ if (!atomic_read(&cmd->t_transport_active) ||
+- atomic_read(&cmd->t_transport_aborted)) {
++ atomic_read_unchecked(&cmd->t_transport_aborted)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return false;
+ }
+@@ -4562,7 +4562,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ {
+ int ret = 0;
+
+- if (atomic_read(&cmd->t_transport_aborted) != 0) {
++ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
+ if (!send_status ||
+ (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+ return 1;
+@@ -4599,7 +4599,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+- atomic_inc(&cmd->t_transport_aborted);
++ atomic_inc_unchecked(&cmd->t_transport_aborted);
+ smp_mb__after_atomic_inc();
+ }
+ }
+diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
+index b9040be..e3f5aab 100644
+--- a/drivers/tty/hvc/hvcs.c
++++ b/drivers/tty/hvc/hvcs.c
+@@ -83,6 +83,7 @@
+ #include <asm/hvcserver.h>
+ #include <asm/uaccess.h>
+ #include <asm/vio.h>
++#include <asm/local.h>
+
+ /*
+ * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
+@@ -270,7 +271,7 @@ struct hvcs_struct {
+ unsigned int index;
+
+ struct tty_struct *tty;
+- int open_count;
++ local_t open_count;
+
+ /*
+ * Used to tell the driver kernel_thread what operations need to take
+@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+
+- if (hvcsd->open_count > 0) {
++ if (local_read(&hvcsd->open_count) > 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ printk(KERN_INFO "HVCS: vterm state unchanged. "
+ "The hvcs device node is still in use.\n");
+@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
+ if ((retval = hvcs_partner_connect(hvcsd)))
+ goto error_release;
+
+- hvcsd->open_count = 1;
++ local_set(&hvcsd->open_count, 1);
+ hvcsd->tty = tty;
+ tty->driver_data = hvcsd;
+
+@@ -1179,7 +1180,7 @@ fast_open:
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ kref_get(&hvcsd->kref);
+- hvcsd->open_count++;
++ local_inc(&hvcsd->open_count);
+ hvcsd->todo_mask |= HVCS_SCHED_READ;
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+
+@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+ hvcsd = tty->driver_data;
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+- if (--hvcsd->open_count == 0) {
++ if (local_dec_and_test(&hvcsd->open_count)) {
+
+ vio_disable_interrupts(hvcsd->vdev);
+
+@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+ free_irq(irq, hvcsd);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
+ return;
+- } else if (hvcsd->open_count < 0) {
++ } else if (local_read(&hvcsd->open_count) < 0) {
+ printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
+ " is missmanaged.\n",
+- hvcsd->vdev->unit_address, hvcsd->open_count);
++ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
+ }
+
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+
+ spin_lock_irqsave(&hvcsd->lock, flags);
+ /* Preserve this so that we know how many kref refs to put */
+- temp_open_count = hvcsd->open_count;
++ temp_open_count = local_read(&hvcsd->open_count);
+
+ /*
+ * Don't kref put inside the spinlock because the destruction
+@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
+ hvcsd->tty->driver_data = NULL;
+ hvcsd->tty = NULL;
+
+- hvcsd->open_count = 0;
++ local_set(&hvcsd->open_count, 0);
+
+ /* This will drop any buffered data on the floor which is OK in a hangup
+ * scenario. */
+@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
+ * the middle of a write operation? This is a crummy place to do this
+ * but we want to keep it all in the spinlock.
+ */
+- if (hvcsd->open_count <= 0) {
++ if (local_read(&hvcsd->open_count) <= 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return -ENODEV;
+ }
+@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
+ {
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
+- if (!hvcsd || hvcsd->open_count <= 0)
++ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
+ return 0;
+
+ return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
+diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
+index cdfa3e0..37fa165 100644
+--- a/drivers/tty/hvc/hvsi.c
++++ b/drivers/tty/hvc/hvsi.c
+@@ -86,7 +86,7 @@ struct hvsi_struct {
+ int n_outbuf;
+ uint32_t vtermno;
+ uint32_t virq;
+- atomic_t seqno; /* HVSI packet sequence number */
++ atomic_unchecked_t seqno; /* HVSI packet sequence number */
+ uint16_t mctrl;
+ uint8_t state; /* HVSI protocol state */
+ uint8_t flags;
+@@ -297,7 +297,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
+
+ packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
+ packet.hdr.len = sizeof(struct hvsi_query_response);
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.verb = VSV_SEND_VERSION_NUMBER;
+ packet.u.version = HVSI_VERSION;
+ packet.query_seqno = query_seqno+1;
+@@ -581,7 +581,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
+
+ packet.hdr.type = VS_QUERY_PACKET_HEADER;
+ packet.hdr.len = sizeof(struct hvsi_query);
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.verb = verb;
+
+ pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+@@ -623,7 +623,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
+ int wrote;
+
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER,
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.hdr.len = sizeof(struct hvsi_control);
+ packet.verb = VSV_SET_MODEM_CTL;
+ packet.mask = HVSI_TSDTR;
+@@ -706,7 +706,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
+ BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
+
+ packet.hdr.type = VS_DATA_PACKET_HEADER;
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.hdr.len = count + sizeof(struct hvsi_header);
+ memcpy(&packet.data, buf, count);
+
+@@ -723,7 +723,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
+ struct hvsi_control packet __ALIGNED__;
+
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
++ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
+ packet.hdr.len = 6;
+ packet.verb = VSV_CLOSE_PROTOCOL;
+
+@@ -755,7 +755,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
+ spin_lock_irqsave(&hp->lock, flags);
+ hp->tty = tty;
+ hp->count++;
+- atomic_set(&hp->seqno, 0);
++ atomic_set_unchecked(&hp->seqno, 0);
+ h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
+index 3749688..82c91dc 100644
+--- a/drivers/tty/hvc/hvsi_lib.c
++++ b/drivers/tty/hvc/hvsi_lib.c
+@@ -9,7 +9,7 @@
+
+ static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
+ {
+- packet->seqno = atomic_inc_return(&pv->seqno);
++ packet->seqno = atomic_inc_return_unchecked(&pv->seqno);
+
+ /* Assumes that always succeeds, works in practice */
+ return pv->put_chars(pv->termno, (char *)packet, packet->len);
+@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
+
+ /* Reset state */
+ pv->established = 0;
+- atomic_set(&pv->seqno, 0);
++ atomic_set_unchecked(&pv->seqno, 0);
+
+ pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
+
+@@ -265,7 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
+ pv->mctrl_update = 0;
+ q.hdr.type = VS_QUERY_PACKET_HEADER;
+ q.hdr.len = sizeof(struct hvsi_query);
+- q.hdr.seqno = atomic_inc_return(&pv->seqno);
++ q.hdr.seqno = atomic_inc_return_unchecked(&pv->seqno);
+ q.verb = VSV_SEND_MODEM_CTL_STATUS;
+ rc = hvsi_send_packet(pv, &q.hdr);
+ if (rc <= 0) {
+diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
+index ef92869..f4ebd88 100644
+--- a/drivers/tty/ipwireless/tty.c
++++ b/drivers/tty/ipwireless/tty.c
+@@ -29,6 +29,7 @@
+ #include <linux/tty_driver.h>
+ #include <linux/tty_flip.h>
+ #include <linux/uaccess.h>
++#include <asm/local.h>
+
+ #include "tty.h"
+ #include "network.h"
+@@ -51,7 +52,7 @@ struct ipw_tty {
+ int tty_type;
+ struct ipw_network *network;
+ struct tty_struct *linux_tty;
+- int open_count;
++ local_t open_count;
+ unsigned int control_lines;
+ struct mutex ipw_tty_mutex;
+ int tx_bytes_queued;
+@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -ENODEV;
+ }
+- if (tty->open_count == 0)
++ if (local_read(&tty->open_count) == 0)
+ tty->tx_bytes_queued = 0;
+
+- tty->open_count++;
++ local_inc(&tty->open_count);
+
+ tty->linux_tty = linux_tty;
+ linux_tty->driver_data = tty;
+@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+
+ static void do_ipw_close(struct ipw_tty *tty)
+ {
+- tty->open_count--;
+-
+- if (tty->open_count == 0) {
++ if (local_dec_return(&tty->open_count) == 0) {
+ struct tty_struct *linux_tty = tty->linux_tty;
+
+ if (linux_tty != NULL) {
+@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
+ return;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (tty->open_count == 0) {
++ if (local_read(&tty->open_count) == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+ return;
+ }
+
+- if (!tty->open_count) {
++ if (!local_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+- if (!tty->open_count) {
++ if (!local_read(&tty->open_count)) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -EINVAL;
+ }
+@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
+ if (!tty)
+ return 0;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return 0;
+
+ return tty->tx_bytes_queued;
+@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ return get_control_lines(tty);
+@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ return set_control_lines(tty, set, clear);
+@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
+ if (!tty)
+ return -ENODEV;
+
+- if (!tty->open_count)
++ if (!local_read(&tty->open_count))
+ return -EINVAL;
+
+ /* FIXME: Exactly how is the tty object locked here .. */
+@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
+ against a parallel ioctl etc */
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ }
+- while (ttyj->open_count)
++ while (local_read(&ttyj->open_count))
+ do_ipw_close(ttyj);
+ ipwireless_disassociate_network_ttys(network,
+ ttyj->channel_idx);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index d190269..f59727e 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+ kref_init(&dlci->ref);
+ mutex_init(&dlci->mutex);
+ dlci->fifo = &dlci->_fifo;
+- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
++ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
+ kfree(dlci);
+ return NULL;
+ }
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 0f8a785..64c35dd 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2132,6 +2132,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+ {
+ *ops = tty_ldisc_N_TTY;
+ ops->owner = NULL;
+- ops->refcount = ops->flags = 0;
++ atomic_set(&ops->refcount, 0);
++ ops->flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 4735928..e80860a 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -775,8 +775,10 @@ static void __init unix98_pty_init(void)
+ register_sysctl_table(pty_root_table);
+
+ /* Now create the /dev/ptmx special device */
++ pax_open_kernel();
+ tty_default_fops(&ptmx_fops);
+- ptmx_fops.open = ptmx_open;
++ *(void **)&ptmx_fops.open = ptmx_open;
++ pax_close_kernel();
+
+ cdev_init(&ptmx_cdev, &ptmx_fops);
+ if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
+diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
+index 6b36c15..335a4a2 100644
+--- a/drivers/tty/serial/ioc4_serial.c
++++ b/drivers/tty/serial/ioc4_serial.c
+@@ -438,7 +438,7 @@ struct ioc4_soft {
+ } is_intr_info[MAX_IOC4_INTR_ENTS];
+
+ /* Number of entries active in the above array */
+- atomic_t is_num_intrs;
++ atomic_unchecked_t is_num_intrs;
+ } is_intr_type[IOC4_NUM_INTR_TYPES];
+
+ /* is_ir_lock must be held while
+@@ -975,7 +975,7 @@ intr_connect(struct ioc4_soft *soft, int type,
+ BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
+ || (type == IOC4_OTHER_INTR_TYPE)));
+
+- i = atomic_inc(&soft-> is_intr_type[type].is_num_intrs) - 1;
++ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
+ BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
+
+ /* Save off the lower level interrupt handler */
+@@ -1002,7 +1002,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
+
+ soft = arg;
+ for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
+- num_intrs = (int)atomic_read(
++ num_intrs = (int)atomic_read_unchecked(
+ &soft->is_intr_type[intr_type].is_num_intrs);
+
+ this_mir = this_ir = pending_intrs(soft, intr_type);
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 2b42a01..32a2ed3 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -24,8 +24,9 @@
+ #define MAX_CONFIG_LEN 40
+
+ static struct kgdb_io kgdboc_io_ops;
++static struct kgdb_io kgdboc_io_ops_console;
+
+-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
++/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
+ static int configured = -1;
+
+ static char config[MAX_CONFIG_LEN];
+@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
+ kgdboc_unregister_kbd();
+ if (configured == 1)
+ kgdb_unregister_io_module(&kgdboc_io_ops);
++ else if (configured == 2)
++ kgdb_unregister_io_module(&kgdboc_io_ops_console);
+ }
+
+ static int configure_kgdboc(void)
+@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
+ int err;
+ char *cptr = config;
+ struct console *cons;
++ int is_console = 0;
+
+ err = kgdboc_option_setup(config);
+ if (err || !strlen(config) || isspace(config[0]))
+ goto noconfig;
+
+ err = -ENODEV;
+- kgdboc_io_ops.is_console = 0;
+ kgdb_tty_driver = NULL;
+
+ kgdboc_use_kms = 0;
+@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
+ int idx;
+ if (cons->device && cons->device(cons, &idx) == p &&
+ idx == tty_line) {
+- kgdboc_io_ops.is_console = 1;
++ is_console = 1;
+ break;
+ }
+ cons = cons->next;
+@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
+ kgdb_tty_line = tty_line;
+
+ do_register:
+- err = kgdb_register_io_module(&kgdboc_io_ops);
++ if (is_console) {
++ err = kgdb_register_io_module(&kgdboc_io_ops_console);
++ configured = 2;
++ } else {
++ err = kgdb_register_io_module(&kgdboc_io_ops);
++ configured = 1;
++ }
+ if (err)
+ goto noconfig;
+
+- configured = 1;
+-
+ return 0;
+
+ noconfig:
+@@ -213,7 +220,7 @@ noconfig:
+ static int __init init_kgdboc(void)
+ {
+ /* Already configured? */
+- if (configured == 1)
++ if (configured >= 1)
+ return 0;
+
+ return configure_kgdboc();
+@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+ if (config[len - 1] == '\n')
+ config[len - 1] = '\0';
+
+- if (configured == 1)
++ if (configured >= 1)
+ cleanup_kgdboc();
+
+ /* Go and configure with the new params. */
+@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
+ .post_exception = kgdboc_post_exp_handler,
+ };
+
++static struct kgdb_io kgdboc_io_ops_console = {
++ .name = "kgdboc",
++ .read_char = kgdboc_get_char,
++ .write_char = kgdboc_put_char,
++ .pre_exception = kgdboc_pre_exp_handler,
++ .post_exception = kgdboc_post_exp_handler,
++ .is_console = 1
++};
++
+ #ifdef CONFIG_KGDB_SERIAL_CONSOLE
+ /* This is only available if kgdboc is a built in for early debugging */
+ static int __init kgdboc_early_init(char *opt)
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index 8131e2c..b48928a 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -857,7 +857,7 @@ static struct uart_driver msm_uart_driver = {
+ .cons = MSM_CONSOLE,
+ };
+
+-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
++static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
+
+ static int __init msm_serial_probe(struct platform_device *pdev)
+ {
+@@ -867,7 +867,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
+ int irq;
+
+ if (pdev->id == -1)
+- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
++ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index b31f1c3..1b6b8c4 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -440,11 +440,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
+ }
+ }
+
++static int s3c64xx_serial_startup(struct uart_port *port);
+ static int s3c24xx_serial_startup(struct uart_port *port)
+ {
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+ int ret;
+
++ /* Startup sequence is different for s3c64xx and higher SoC's */
++ if (s3c24xx_serial_has_interrupt_mask(port))
++ return s3c64xx_serial_startup(port);
++
+ dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
+ port->mapbase, port->membase);
+
+@@ -1149,10 +1154,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ port->dev = &platdev->dev;
+ ourport->info = info;
+
+- /* Startup sequence is different for s3c64xx and higher SoC's */
+- if (s3c24xx_serial_has_interrupt_mask(port))
+- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
+-
+ /* copy the info in from provided structure */
+ ourport->port.fifosize = info->fifosize;
+
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 43db715..82134aa 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
+ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+- if (count) {
++ if (count && capable(CAP_SYS_ADMIN)) {
+ char c;
+
+ if (get_user(c, buf))
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 3f35e42..9fed166 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1089,7 +1089,7 @@ static inline ssize_t do_tty_write(
+ cond_resched();
+ }
+ if (written) {
+- struct inode *inode = file->f_path.dentry->d_inode;
++ struct inode *inode = file->f_path.dentry->d_inode;
+ tty_update_time(&inode->i_mtime);
+ ret = written;
+ }
+@@ -3250,7 +3250,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
+
+ void tty_default_fops(struct file_operations *fops)
+ {
+- *fops = tty_fops;
++ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
+ }
+
+ /*
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 8e0924f..4204eb4 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ struct tty_ldisc_ops *ldo = ld->ops;
+
+- ldo->refcount--;
++ atomic_dec(&ldo->refcount);
+ module_put(ldo->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+ tty_ldiscs[disc] = new_ldisc;
+ new_ldisc->num = disc;
+- new_ldisc->refcount = 0;
++ atomic_set(&new_ldisc->refcount, 0);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+
+ return ret;
+@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- if (tty_ldiscs[disc]->refcount)
++ if (atomic_read(&tty_ldiscs[disc]->refcount))
+ ret = -EBUSY;
+ else
+ tty_ldiscs[disc] = NULL;
+@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
+ if (ldops) {
+ ret = ERR_PTR(-EAGAIN);
+ if (try_module_get(ldops->owner)) {
+- ldops->refcount++;
++ atomic_inc(&ldops->refcount);
+ ret = ldops;
+ }
+ }
+@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty_ldisc_lock, flags);
+- ldops->refcount--;
++ atomic_dec(&ldops->refcount);
+ module_put(ldops->owner);
+ spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ }
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index a605549..6bd3c96 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
+ kbd->kbdmode == VC_OFF) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++ {
++ void *func = fn_handler[value];
++ if (func == fn_show_state || func == fn_show_ptregs ||
++ func == fn_show_mem)
++ return;
++ }
++#endif
++
+ fn_handler[value](vc);
+ }
+
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 65447c5..0526f0a 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
+ if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
+ return -EFAULT;
+
+- if (!capable(CAP_SYS_TTY_CONFIG))
+- perm = 0;
+-
+ switch (cmd) {
+ case KDGKBENT:
+ key_map = key_maps[s];
+@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
+ val = (i ? K_HOLE : K_NOSUCHMAP);
+ return put_user(val, &user_kbe->kb_value);
+ case KDSKBENT:
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ perm = 0;
++
+ if (!perm)
+ return -EPERM;
+ if (!i && v == K_NOSUCHMAP) {
+@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+ int i, j, k;
+ int ret;
+
+- if (!capable(CAP_SYS_TTY_CONFIG))
+- perm = 0;
+-
+ kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
+ if (!kbs) {
+ ret = -ENOMEM;
+@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+ kfree(kbs);
+ return ((p && *p) ? -EOVERFLOW : 0);
+ case KDSKBSENT:
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ perm = 0;
++
+ if (!perm) {
+ ret = -EPERM;
+ goto reterr;
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index af57648..2a8a122 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -25,6 +25,7 @@
+ #include <linux/kobject.h>
+ #include <linux/cdev.h>
+ #include <linux/uio_driver.h>
++#include <asm/local.h>
+
+ #define UIO_MAX_DEVICES (1U << MINORBITS)
+
+@@ -32,10 +33,10 @@ struct uio_device {
+ struct module *owner;
+ struct device *dev;
+ int minor;
+- atomic_t event;
++ atomic_unchecked_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+- int vma_count;
++ local_t vma_count;
+ struct uio_info *info;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
+@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+ struct uio_device *idev = dev_get_drvdata(dev);
+- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
++ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
+ }
+
+ static struct device_attribute uio_class_attributes[] = {
+@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
+ {
+ struct uio_device *idev = info->uio_dev;
+
+- atomic_inc(&idev->event);
++ atomic_inc_unchecked(&idev->event);
+ wake_up_interruptible(&idev->wait);
+ kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+ }
+@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
+ }
+
+ listener->dev = idev;
+- listener->event_count = atomic_read(&idev->event);
++ listener->event_count = atomic_read_unchecked(&idev->event);
+ filep->private_data = listener;
+
+ if (idev->info->open) {
+@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
+ return -EIO;
+
+ poll_wait(filep, &idev->wait, wait);
+- if (listener->event_count != atomic_read(&idev->event))
++ if (listener->event_count != atomic_read_unchecked(&idev->event))
+ return POLLIN | POLLRDNORM;
+ return 0;
+ }
+@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- event_count = atomic_read(&idev->event);
++ event_count = atomic_read_unchecked(&idev->event);
+ if (event_count != listener->event_count) {
+ if (copy_to_user(buf, &event_count, count))
+ retval = -EFAULT;
+@@ -594,9 +595,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
+ static int uio_find_mem_index(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
++ unsigned long size;
+
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+- if (idev->info->mem[vma->vm_pgoff].size == 0)
++ size = idev->info->mem[vma->vm_pgoff].size;
++ if (size == 0)
++ return -1;
++ if (vma->vm_end - vma->vm_start > size)
+ return -1;
+ return (int)vma->vm_pgoff;
+ }
+@@ -606,13 +611,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
+ static void uio_vma_open(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+- idev->vma_count++;
++ local_inc(&idev->vma_count);
+ }
+
+ static void uio_vma_close(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+- idev->vma_count--;
++ local_dec(&idev->vma_count);
+ }
+
+ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -655,6 +660,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
+ return -EINVAL;
+ mem = idev->info->mem + mi;
+
++ if (mem->addr & ~PAGE_MASK)
++ return -ENODEV;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+@@ -833,7 +840,7 @@ int __uio_register_device(struct module *owner,
+ idev->owner = owner;
+ idev->info = info;
+ init_waitqueue_head(&idev->wait);
+- atomic_set(&idev->event, 0);
++ atomic_set_unchecked(&idev->event, 0);
+
+ ret = uio_get_minor(idev);
+ if (ret)
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index 9497171..bfeecaf 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+ ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+ if (ret < 2)
+ return -EINVAL;
+- if (index < 0 || index > 0x7f)
++ if (index > 0x7f)
+ return -EINVAL;
+ pos += tmp;
+
+diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
+index d3448ca..d2864ca 100644
+--- a/drivers/usb/atm/usbatm.c
++++ b/drivers/usb/atm/usbatm.c
+@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+ if (printk_ratelimit())
+ atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
+ __func__, vpi, vci);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ return;
+ }
+
+@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+ if (length > ATM_MAX_AAL5_PDU) {
+ atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
+ __func__, length, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+ if (sarb->len < pdu_length) {
+ atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
+ __func__, pdu_length, sarb->len, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+ if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
+ atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
+ __func__, vcc);
+- atomic_inc(&vcc->stats->rx_err);
++ atomic_inc_unchecked(&vcc->stats->rx_err);
+ goto out;
+ }
+
+@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+ if (printk_ratelimit())
+ atm_err(instance, "%s: no memory for skb (length: %u)!\n",
+ __func__, length);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ goto out;
+ }
+
+@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
+
+ vcc->push(vcc, skb);
+
+- atomic_inc(&vcc->stats->rx);
++ atomic_inc_unchecked(&vcc->stats->rx);
+ out:
+ skb_trim(sarb, 0);
+ }
+@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
+ struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
+
+ usbatm_pop(vcc, skb);
+- atomic_inc(&vcc->stats->tx);
++ atomic_inc_unchecked(&vcc->stats->tx);
+
+ skb = skb_dequeue(&instance->sndqueue);
+ }
+@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
+ if (!left--)
+ return sprintf(page,
+ "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
+- atomic_read(&atm_dev->stats.aal5.tx),
+- atomic_read(&atm_dev->stats.aal5.tx_err),
+- atomic_read(&atm_dev->stats.aal5.rx),
+- atomic_read(&atm_dev->stats.aal5.rx_err),
+- atomic_read(&atm_dev->stats.aal5.rx_drop));
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
+
+ if (!left--) {
+ if (instance->disconnected)
+diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
+index 3440812..2a4ef1f 100644
+--- a/drivers/usb/core/devices.c
++++ b/drivers/usb/core/devices.c
+@@ -126,7 +126,7 @@ static const char format_endpt[] =
+ * time it gets called.
+ */
+ static struct device_connect_event {
+- atomic_t count;
++ atomic_unchecked_t count;
+ wait_queue_head_t wait;
+ } device_event = {
+ .count = ATOMIC_INIT(1),
+@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
+
+ void usbfs_conn_disc_event(void)
+ {
+- atomic_add(2, &device_event.count);
++ atomic_add_unchecked(2, &device_event.count);
+ wake_up(&device_event.wait);
+ }
+
+@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
+
+ poll_wait(file, &device_event.wait, wait);
+
+- event_count = atomic_read(&device_event.count);
++ event_count = atomic_read_unchecked(&device_event.count);
+ if (file->f_version != event_count) {
+ file->f_version = event_count;
+ return POLLIN | POLLRDNORM;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 49257b3..de27d93 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -147,7 +147,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
+ struct dev_state *ps = file->private_data;
+ struct usb_device *dev = ps->dev;
+ ssize_t ret = 0;
+- unsigned len;
++ size_t len;
+ loff_t pos;
+ int i;
+
+@@ -189,16 +189,16 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
+ for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
+ struct usb_config_descriptor *config =
+ (struct usb_config_descriptor *)dev->rawdescriptors[i];
+- unsigned int length = le16_to_cpu(config->wTotalLength);
++ size_t length = le16_to_cpu(config->wTotalLength);
+
+ if (*ppos < pos + length) {
+
+ /* The descriptor may claim to be longer than it
+ * really is. Here is the actual allocated length. */
+- unsigned alloclen =
++ size_t alloclen =
+ le16_to_cpu(dev->config[i].desc.wTotalLength);
+
+- len = length - (*ppos - pos);
++ len = length + pos - *ppos;
+ if (len > nbytes)
+ len = nbytes;
+
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 032e5a6..bc422e4 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1475,7 +1475,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+ */
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+- atomic_inc(&urb->dev->urbnum);
++ atomic_inc_unchecked(&urb->dev->urbnum);
+ usbmon_urb_submit(&hcd->self, urb);
+
+ /* NOTE requirements on root-hub callers (usbfs and the hub
+@@ -1502,7 +1502,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+ urb->hcpriv = NULL;
+ INIT_LIST_HEAD(&urb->urb_list);
+ atomic_dec(&urb->use_count);
+- atomic_dec(&urb->dev->urbnum);
++ atomic_dec_unchecked(&urb->dev->urbnum);
+ if (atomic_read(&urb->reject))
+ wake_up(&usb_kill_urb_queue);
+ usb_put_urb(urb);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 7013165..608be1a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -25,6 +25,7 @@
+ #include <linux/mutex.h>
+ #include <linux/freezer.h>
+ #include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -3410,6 +3411,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+ return;
+ }
+
++ if (gr_handle_new_usb())
++ goto done;
++
+ for (i = 0; i < SET_CONFIG_TRIES; i++) {
+
+ /* reallocate for each attempt, since references
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index ab11ca3c..2df783d 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
+ * method can wait for it to complete. Since you don't have a handle on the
+ * URB used, you can't cancel the request.
+ */
+-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
++int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index, void *data,
+ __u16 size, int timeout)
+ {
+@@ -182,7 +182,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
+ * complete. Since you don't have a handle on the URB used, you can't cancel
+ * the request.
+ */
+-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
++int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout)
+ {
+ return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
+@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
+ * interrupt endpoints. We will take the liberty of creating an interrupt URB
+ * (with the default interval) if the target is an interrupt endpoint.
+ */
+-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
++int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout)
+ {
+ struct urb *urb;
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 662c0cf..6880fbb 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
+ struct usb_device *udev;
+
+ udev = to_usb_device(dev);
+- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
++ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
+ }
+ static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
+
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 73cd900..40502a4 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -396,7 +396,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ dev->dev.dma_mask = bus->controller->dma_mask;
+ set_dev_node(&dev->dev, dev_to_node(bus->controller));
+ dev->state = USB_STATE_ATTACHED;
+- atomic_set(&dev->urbnum, 0);
++ atomic_set_unchecked(&dev->urbnum, 0);
+
+ INIT_LIST_HEAD(&dev->ep0.urb_list);
+ dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 347bb05..63e1b73 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
+
+ #ifdef CONFIG_KGDB
+ static struct kgdb_io kgdbdbgp_io_ops;
+-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
++static struct kgdb_io kgdbdbgp_io_ops_console;
++#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
+ #else
+ #define dbgp_kgdb_mode (0)
+ #endif
+@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
+ .write_char = kgdbdbgp_write_char,
+ };
+
++static struct kgdb_io kgdbdbgp_io_ops_console = {
++ .name = "kgdbdbgp",
++ .read_char = kgdbdbgp_read_char,
++ .write_char = kgdbdbgp_write_char,
++ .is_console = 1
++};
++
+ static int kgdbdbgp_wait_time;
+
+ static int __init kgdbdbgp_parse_config(char *str)
+@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
+ ptr++;
+ kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+ }
+- kgdb_register_io_module(&kgdbdbgp_io_ops);
+- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
++ if (early_dbgp_console.index != -1)
++ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
++ else
++ kgdb_register_io_module(&kgdbdbgp_io_ops);
+
+ return 0;
+ }
+diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
+index 0e641a1..49e6ac7 100644
+--- a/drivers/usb/gadget/f_fs.c
++++ b/drivers/usb/gadget/f_fs.c
+@@ -1212,6 +1212,7 @@ static struct file_system_type ffs_fs_type = {
+ .mount = ffs_fs_mount,
+ .kill_sb = ffs_fs_kill_sb,
+ };
++MODULE_ALIAS_FS("functionfs");
+
+
+ /* Driver's main init/cleanup functions *************************************/
+diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
+index db2d607..3a25028 100644
+--- a/drivers/usb/gadget/file_storage.c
++++ b/drivers/usb/gadget/file_storage.c
+@@ -3329,18 +3329,20 @@ static int __init fsg_bind(struct usb_gadget *gadget)
+ if ((rc = check_parameters(fsg)) != 0)
+ goto out;
+
++ pax_open_kernel();
+ if (mod_data.removable) { // Enable the store_xxx attributes
+- dev_attr_file.attr.mode = 0644;
+- dev_attr_file.store = fsg_store_file;
++ *(mode_t *)&dev_attr_file.attr.mode = 0644;
++ *(void **)&dev_attr_file.store = fsg_store_file;
+ if (!mod_data.cdrom) {
+- dev_attr_ro.attr.mode = 0644;
+- dev_attr_ro.store = fsg_store_ro;
++ *(mode_t *)&dev_attr_ro.attr.mode = 0644;
++ *(void **)&dev_attr_ro.store = fsg_store_ro;
+ }
+ }
+
+ /* Only for removable media? */
+- dev_attr_nofua.attr.mode = 0644;
+- dev_attr_nofua.store = fsg_store_nofua;
++ *(mode_t *)&dev_attr_nofua.attr.mode = 0644;
++ *(void **)&dev_attr_nofua.store = fsg_store_nofua;
++ pax_close_kernel();
+
+ /* Find out how many LUNs there should be */
+ i = mod_data.nluns;
+diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
+index 7138540..2370195 100644
+--- a/drivers/usb/gadget/inode.c
++++ b/drivers/usb/gadget/inode.c
+@@ -2121,6 +2121,7 @@ static struct file_system_type gadgetfs_type = {
+ .mount = gadgetfs_mount,
+ .kill_sb = gadgetfs_kill_sb,
+ };
++MODULE_ALIAS_FS("gadgetfs");
+
+ /*----------------------------------------------------------------------*/
+
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index 9f7003e..b1db1b6 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -83,7 +83,7 @@ struct appledisplay {
+ spinlock_t lock;
+ };
+
+-static atomic_t count_displays = ATOMIC_INIT(0);
++static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
+ static struct workqueue_struct *wq;
+
+ static void appledisplay_complete(struct urb *urb)
+@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
+
+ /* Register backlight device */
+ snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
+- atomic_inc_return(&count_displays) - 1);
++ atomic_inc_return_unchecked(&count_displays) - 1);
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = 0xff;
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index 1ee6b2a..523c0ae 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -200,7 +200,7 @@ static int usb_console_setup(struct console *co, char *options)
+ static void usb_console_write(struct console *co,
+ const char *buf, unsigned count)
+ {
+- static struct usbcons_info *info = &usbcons_info;
++ struct usbcons_info *info = &usbcons_info;
+ struct usb_serial_port *port = info->port;
+ struct usb_serial *serial;
+ int retval = -ENODEV;
+diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
+index e39b188..1fffda8 100644
+--- a/drivers/usb/storage/realtek_cr.c
++++ b/drivers/usb/storage/realtek_cr.c
+@@ -430,7 +430,7 @@ static int rts51x_read_status(struct us_data *us,
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (buf == NULL)
+- return USB_STOR_TRANSPORT_ERROR;
++ return -ENOMEM;
+
+ US_DEBUGP("%s, lun = %d\n", __func__, lun);
+
+diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
+index 75f70f0..d467e1a 100644
+--- a/drivers/usb/storage/usb.h
++++ b/drivers/usb/storage/usb.h
+@@ -63,7 +63,7 @@ struct us_unusual_dev {
+ __u8 useProtocol;
+ __u8 useTransport;
+ int (*initFunction)(struct us_data *);
+-};
++} __do_const;
+
+
+ /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
+diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
+index d6bea3e..60b250e 100644
+--- a/drivers/usb/wusbcore/wa-hc.h
++++ b/drivers/usb/wusbcore/wa-hc.h
+@@ -192,7 +192,7 @@ struct wahc {
+ struct list_head xfer_delayed_list;
+ spinlock_t xfer_list_lock;
+ struct work_struct xfer_work;
+- atomic_t xfer_id_count;
++ atomic_unchecked_t xfer_id_count;
+ };
+
+
+@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
+ INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ spin_lock_init(&wa->xfer_list_lock);
+ INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+- atomic_set(&wa->xfer_id_count, 1);
++ atomic_set_unchecked(&wa->xfer_id_count, 1);
+ }
+
+ /**
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 5f6df6e..0a16602 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -297,7 +297,7 @@ out:
+ */
+ static void wa_xfer_id_init(struct wa_xfer *xfer)
+ {
+- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
++ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
+ }
+
+ /*
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index be32b1b..b5f6c08 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -631,7 +631,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
+ return 0;
+ }
+
+-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
++static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+ {
+ struct file *eventfp, *filep = NULL,
+ *pollstart = NULL, *pollstop = NULL;
+diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
+index c22e8d3..12c48b0 100644
+--- a/drivers/video/arcfb.c
++++ b/drivers/video/arcfb.c
+@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
+ return -ENOSPC;
+
+ err = 0;
+- if ((count + p) > fbmemlength) {
++ if (count > (fbmemlength - p)) {
+ count = fbmemlength - p;
+ err = -ENOSPC;
+ }
+diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
+index b0b2ac3..89a4399 100644
+--- a/drivers/video/aty/aty128fb.c
++++ b/drivers/video/aty/aty128fb.c
+@@ -148,7 +148,7 @@ enum {
+ };
+
+ /* Must match above enum */
+-static const char *r128_family[] __devinitdata = {
++static const char *r128_family[] __devinitconst = {
+ "AGP",
+ "PCI",
+ "PRO AGP",
+diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
+index 44bdce4..a79c55f 100644
+--- a/drivers/video/aty/atyfb_base.c
++++ b/drivers/video/aty/atyfb_base.c
+@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
+ par->accel_flags = var->accel_flags; /* hack */
+
+ if (var->accel_flags) {
+- info->fbops->fb_sync = atyfb_sync;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_sync = atyfb_sync;
++ pax_close_kernel();
+ info->flags &= ~FBINFO_HWACCEL_DISABLED;
+ } else {
+- info->fbops->fb_sync = NULL;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_sync = NULL;
++ pax_close_kernel();
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
+index 46f72ed..107788d 100644
+--- a/drivers/video/aty/mach64_cursor.c
++++ b/drivers/video/aty/mach64_cursor.c
+@@ -7,6 +7,7 @@
+ #include <linux/string.h>
+
+ #include <asm/io.h>
++#include <asm/pgtable.h>
+
+ #ifdef __sparc__
+ #include <asm/fbio.h>
+@@ -208,7 +209,9 @@ int __devinit aty_init_cursor(struct fb_info *info)
+ info->sprite.buf_align = 16; /* and 64 lines tall. */
+ info->sprite.flags = FB_PIXMAP_IO;
+
+- info->fbops->fb_cursor = atyfb_cursor;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
++ pax_close_kernel();
+
+ return 0;
+ }
+diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
+index 7363c1b..b69ad66 100644
+--- a/drivers/video/backlight/backlight.c
++++ b/drivers/video/backlight/backlight.c
+@@ -303,7 +303,7 @@ struct backlight_device *backlight_device_register(const char *name,
+ new_bd->dev.class = backlight_class;
+ new_bd->dev.parent = parent;
+ new_bd->dev.release = bl_device_release;
+- dev_set_name(&new_bd->dev, name);
++ dev_set_name(&new_bd->dev, "%s", name);
+ dev_set_drvdata(&new_bd->dev, devdata);
+
+ /* Set default properties */
+diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
+index 72dd555..5f9bfbe 100644
+--- a/drivers/video/backlight/kb3886_bl.c
++++ b/drivers/video/backlight/kb3886_bl.c
+@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
+ static unsigned long kb3886bl_flags;
+ #define KB3886BL_SUSPENDED 0x01
+
+-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
++static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
+ {
+ .ident = "Sahara Touch-iT",
+ .matches = {
+diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
+index 71a11ca..86afe4b 100644
+--- a/drivers/video/backlight/lcd.c
++++ b/drivers/video/backlight/lcd.c
+@@ -209,7 +209,7 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
+ new_ld->dev.class = lcd_class;
+ new_ld->dev.parent = parent;
+ new_ld->dev.release = lcd_device_release;
+- dev_set_name(&new_ld->dev, name);
++ dev_set_name(&new_ld->dev, "%s", name);
+ dev_set_drvdata(&new_ld->dev, devdata);
+
+ rc = device_register(&new_ld->dev);
+diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
+index e132157..516db70 100644
+--- a/drivers/video/backlight/s6e63m0.c
++++ b/drivers/video/backlight/s6e63m0.c
+@@ -690,7 +690,7 @@ static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
+ struct backlight_device *bd = NULL;
+ int brightness, rc;
+
+- rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode);
++ rc = kstrtouint(buf, 0, &lcd->gamma_mode);
+ if (rc < 0)
+ return rc;
+
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 6b4fb5c..385e560 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -450,7 +450,7 @@ static int __init fb_console_setup(char *this_opt)
+
+ while ((options = strsep(&this_opt, ",")) != NULL) {
+ if (!strncmp(options, "font:", 5))
+- strcpy(fontname, options + 5);
++ strlcpy(fontname, options + 5, sizeof(fontname));
+
+ if (!strncmp(options, "scrollback:", 11)) {
+ options += 11;
+diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
+index c27e153..5beb687 100644
+--- a/drivers/video/fb_defio.c
++++ b/drivers/video/fb_defio.c
+@@ -200,7 +200,9 @@ void fb_deferred_io_init(struct fb_info *info)
+
+ BUG_ON(!fbdefio);
+ mutex_init(&fbdefio->lock);
+- info->fbops->fb_mmap = fb_deferred_io_mmap;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
++ pax_close_kernel();
+ INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
+ INIT_LIST_HEAD(&fbdefio->pagelist);
+ if (fbdefio->delay == 0) /* set a default of 1 s */
+@@ -231,7 +233,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
+ page->mapping = NULL;
+ }
+
+- info->fbops->fb_mmap = NULL;
++ *(void **)&info->fbops->fb_mmap = NULL;
+ mutex_destroy(&fbdefio->lock);
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
+diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
+index 5c3960d..15cf8fc 100644
+--- a/drivers/video/fbcmap.c
++++ b/drivers/video/fbcmap.c
+@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
+ rc = -ENODEV;
+ goto out;
+ }
+- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
+- !info->fbops->fb_setcmap)) {
++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
+ rc = -EINVAL;
+ goto out1;
+ }
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 0a22808..130eafe 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+ image->dx += image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_UD) {
+- for (x = 0; x < num && image->dx >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx -= image->width + 8;
+ }
+@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+ image->dy += image->height + 8;
+ }
+ } else if (rotate == FB_ROTATE_CCW) {
+- for (x = 0; x < num && image->dy >= 0; x++) {
++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy -= image->height + 8;
+ }
+@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ return -EFAULT;
+ if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
+ return -EINVAL;
+- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
++ if (con2fb.framebuffer >= FB_MAX)
+ return -EINVAL;
+ if (!registered_fb[con2fb.framebuffer])
+ request_module("fb%d", con2fb.framebuffer);
+diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
+index 5a5d092..265c5ed 100644
+--- a/drivers/video/geode/gx1fb_core.c
++++ b/drivers/video/geode/gx1fb_core.c
+@@ -29,7 +29,7 @@ static int crt_option = 1;
+ static char panel_option[32] = "";
+
+ /* Modes relevant to the GX1 (taken from modedb.c) */
+-static const struct fb_videomode __devinitdata gx1_modedb[] = {
++static const struct fb_videomode __devinitconst gx1_modedb[] = {
+ /* 640x480-60 VESA */
+ { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
+index 0fad23f..0e9afa4 100644
+--- a/drivers/video/gxt4500.c
++++ b/drivers/video/gxt4500.c
+@@ -156,7 +156,7 @@ struct gxt4500_par {
+ static char *mode_option;
+
+ /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
+-static const struct fb_videomode defaultmode __devinitdata = {
++static const struct fb_videomode defaultmode __devinitconst = {
+ .refresh = 60,
+ .xres = 1280,
+ .yres = 1024,
+@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
+ return 0;
+ }
+
+-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
++static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
+ .id = "IBM GXT4500P",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_PSEUDOCOLOR,
+diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
+index 7672d2e..b56437f 100644
+--- a/drivers/video/i810/i810_accel.c
++++ b/drivers/video/i810/i810_accel.c
+@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
+ }
+ }
+ printk("ringbuffer lockup!!!\n");
++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
+ i810_report_error(mmio);
+ par->dev_flags |= LOCKUP;
+ info->pixmap.scan_align = 1;
+diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
+index 318f6fb..9a389c1 100644
+--- a/drivers/video/i810/i810_main.c
++++ b/drivers/video/i810/i810_main.c
+@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
+ static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
+
+ /* PCI */
+-static const char *i810_pci_list[] __devinitdata = {
++static const char *i810_pci_list[] __devinitconst = {
+ "Intel(R) 810 Framebuffer Device" ,
+ "Intel(R) 810-DC100 Framebuffer Device" ,
+ "Intel(R) 810E Framebuffer Device" ,
+diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
+index de36693..3c63fc2 100644
+--- a/drivers/video/jz4740_fb.c
++++ b/drivers/video/jz4740_fb.c
+@@ -136,7 +136,7 @@ struct jzfb {
+ uint32_t pseudo_palette[16];
+ };
+
+-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
++static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
+ .id = "JZ4740 FB",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
+index 3c14e43..eafa544 100644
+--- a/drivers/video/logo/logo_linux_clut224.ppm
++++ b/drivers/video/logo/logo_linux_clut224.ppm
+@@ -1,1604 +1,1123 @@
+ P3
+-# Standard 224-color Linux logo
+ 80 80
+ 255
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 6 6 6 10 10 10 10 10 10
+- 10 10 10 6 6 6 6 6 6 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 10 10 10 14 14 14
+- 22 22 22 26 26 26 30 30 30 34 34 34
+- 30 30 30 30 30 30 26 26 26 18 18 18
+- 14 14 14 10 10 10 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 14 14 14 26 26 26 42 42 42
+- 54 54 54 66 66 66 78 78 78 78 78 78
+- 78 78 78 74 74 74 66 66 66 54 54 54
+- 42 42 42 26 26 26 18 18 18 10 10 10
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 22 22 22 42 42 42 66 66 66 86 86 86
+- 66 66 66 38 38 38 38 38 38 22 22 22
+- 26 26 26 34 34 34 54 54 54 66 66 66
+- 86 86 86 70 70 70 46 46 46 26 26 26
+- 14 14 14 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 10 10 10 26 26 26
+- 50 50 50 82 82 82 58 58 58 6 6 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 6 6 6 54 54 54 86 86 86 66 66 66
+- 38 38 38 18 18 18 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 22 22 22 50 50 50
+- 78 78 78 34 34 34 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 6 6 6 70 70 70
+- 78 78 78 46 46 46 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 18 18 18 42 42 42 82 82 82
+- 26 26 26 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 14 14 14
+- 46 46 46 34 34 34 6 6 6 2 2 6
+- 42 42 42 78 78 78 42 42 42 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 0 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 10 10 10 30 30 30 66 66 66 58 58 58
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 26 26 26
+- 86 86 86 101 101 101 46 46 46 10 10 10
+- 2 2 6 58 58 58 70 70 70 34 34 34
+- 10 10 10 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 14 14 14 42 42 42 86 86 86 10 10 10
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 30 30 30
+- 94 94 94 94 94 94 58 58 58 26 26 26
+- 2 2 6 6 6 6 78 78 78 54 54 54
+- 22 22 22 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 22 22 22 62 62 62 62 62 62 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 26 26 26
+- 54 54 54 38 38 38 18 18 18 10 10 10
+- 2 2 6 2 2 6 34 34 34 82 82 82
+- 38 38 38 14 14 14 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 30 30 30 78 78 78 30 30 30 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 10 10 10
+- 10 10 10 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 78 78 78
+- 50 50 50 18 18 18 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 38 38 38 86 86 86 14 14 14 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 54 54 54
+- 66 66 66 26 26 26 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 42 42 42 82 82 82 2 2 6 2 2 6
+- 2 2 6 6 6 6 10 10 10 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 6 6 6
+- 14 14 14 10 10 10 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 18 18 18
+- 82 82 82 34 34 34 10 10 10 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 46 46 46 86 86 86 2 2 6 2 2 6
+- 6 6 6 6 6 6 22 22 22 34 34 34
+- 6 6 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 18 18 18 34 34 34
+- 10 10 10 50 50 50 22 22 22 2 2 6
+- 2 2 6 2 2 6 2 2 6 10 10 10
+- 86 86 86 42 42 42 14 14 14 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 1 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 46 46 46 86 86 86 2 2 6 2 2 6
+- 38 38 38 116 116 116 94 94 94 22 22 22
+- 22 22 22 2 2 6 2 2 6 2 2 6
+- 14 14 14 86 86 86 138 138 138 162 162 162
+-154 154 154 38 38 38 26 26 26 6 6 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 86 86 86 46 46 46 14 14 14 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 46 46 46 86 86 86 2 2 6 14 14 14
+-134 134 134 198 198 198 195 195 195 116 116 116
+- 10 10 10 2 2 6 2 2 6 6 6 6
+-101 98 89 187 187 187 210 210 210 218 218 218
+-214 214 214 134 134 134 14 14 14 6 6 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 86 86 86 50 50 50 18 18 18 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 1 0 0 0
+- 0 0 1 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 46 46 46 86 86 86 2 2 6 54 54 54
+-218 218 218 195 195 195 226 226 226 246 246 246
+- 58 58 58 2 2 6 2 2 6 30 30 30
+-210 210 210 253 253 253 174 174 174 123 123 123
+-221 221 221 234 234 234 74 74 74 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 70 70 70 58 58 58 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 46 46 46 82 82 82 2 2 6 106 106 106
+-170 170 170 26 26 26 86 86 86 226 226 226
+-123 123 123 10 10 10 14 14 14 46 46 46
+-231 231 231 190 190 190 6 6 6 70 70 70
+- 90 90 90 238 238 238 158 158 158 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 70 70 70 58 58 58 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 1 0 0 0
+- 0 0 1 0 0 1 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 42 42 42 86 86 86 6 6 6 116 116 116
+-106 106 106 6 6 6 70 70 70 149 149 149
+-128 128 128 18 18 18 38 38 38 54 54 54
+-221 221 221 106 106 106 2 2 6 14 14 14
+- 46 46 46 190 190 190 198 198 198 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 74 74 74 62 62 62 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 1 0 0 0
+- 0 0 1 0 0 0 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 42 42 42 94 94 94 14 14 14 101 101 101
+-128 128 128 2 2 6 18 18 18 116 116 116
+-118 98 46 121 92 8 121 92 8 98 78 10
+-162 162 162 106 106 106 2 2 6 2 2 6
+- 2 2 6 195 195 195 195 195 195 6 6 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 74 74 74 62 62 62 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 1 0 0 1
+- 0 0 1 0 0 0 0 0 1 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 38 38 38 90 90 90 14 14 14 58 58 58
+-210 210 210 26 26 26 54 38 6 154 114 10
+-226 170 11 236 186 11 225 175 15 184 144 12
+-215 174 15 175 146 61 37 26 9 2 2 6
+- 70 70 70 246 246 246 138 138 138 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 70 70 70 66 66 66 26 26 26 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 38 38 38 86 86 86 14 14 14 10 10 10
+-195 195 195 188 164 115 192 133 9 225 175 15
+-239 182 13 234 190 10 232 195 16 232 200 30
+-245 207 45 241 208 19 232 195 16 184 144 12
+-218 194 134 211 206 186 42 42 42 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 50 50 50 74 74 74 30 30 30 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 34 34 34 86 86 86 14 14 14 2 2 6
+-121 87 25 192 133 9 219 162 10 239 182 13
+-236 186 11 232 195 16 241 208 19 244 214 54
+-246 218 60 246 218 38 246 215 20 241 208 19
+-241 208 19 226 184 13 121 87 25 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 50 50 50 82 82 82 34 34 34 10 10 10
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 34 34 34 82 82 82 30 30 30 61 42 6
+-180 123 7 206 145 10 230 174 11 239 182 13
+-234 190 10 238 202 15 241 208 19 246 218 74
+-246 218 38 246 215 20 246 215 20 246 215 20
+-226 184 13 215 174 15 184 144 12 6 6 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 26 26 26 94 94 94 42 42 42 14 14 14
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 78 78 78 50 50 50 104 69 6
+-192 133 9 216 158 10 236 178 12 236 186 11
+-232 195 16 241 208 19 244 214 54 245 215 43
+-246 215 20 246 215 20 241 208 19 198 155 10
+-200 144 11 216 158 10 156 118 10 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 6 6 6 90 90 90 54 54 54 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 78 78 78 46 46 46 22 22 22
+-137 92 6 210 162 10 239 182 13 238 190 10
+-238 202 15 241 208 19 246 215 20 246 215 20
+-241 208 19 203 166 17 185 133 11 210 150 10
+-216 158 10 210 150 10 102 78 10 2 2 6
+- 6 6 6 54 54 54 14 14 14 2 2 6
+- 2 2 6 62 62 62 74 74 74 30 30 30
+- 10 10 10 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 34 34 34 78 78 78 50 50 50 6 6 6
+- 94 70 30 139 102 15 190 146 13 226 184 13
+-232 200 30 232 195 16 215 174 15 190 146 13
+-168 122 10 192 133 9 210 150 10 213 154 11
+-202 150 34 182 157 106 101 98 89 2 2 6
+- 2 2 6 78 78 78 116 116 116 58 58 58
+- 2 2 6 22 22 22 90 90 90 46 46 46
+- 18 18 18 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 38 38 38 86 86 86 50 50 50 6 6 6
+-128 128 128 174 154 114 156 107 11 168 122 10
+-198 155 10 184 144 12 197 138 11 200 144 11
+-206 145 10 206 145 10 197 138 11 188 164 115
+-195 195 195 198 198 198 174 174 174 14 14 14
+- 2 2 6 22 22 22 116 116 116 116 116 116
+- 22 22 22 2 2 6 74 74 74 70 70 70
+- 30 30 30 10 10 10 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 18 18 18
+- 50 50 50 101 101 101 26 26 26 10 10 10
+-138 138 138 190 190 190 174 154 114 156 107 11
+-197 138 11 200 144 11 197 138 11 192 133 9
+-180 123 7 190 142 34 190 178 144 187 187 187
+-202 202 202 221 221 221 214 214 214 66 66 66
+- 2 2 6 2 2 6 50 50 50 62 62 62
+- 6 6 6 2 2 6 10 10 10 90 90 90
+- 50 50 50 18 18 18 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 10 10 10 34 34 34
+- 74 74 74 74 74 74 2 2 6 6 6 6
+-144 144 144 198 198 198 190 190 190 178 166 146
+-154 121 60 156 107 11 156 107 11 168 124 44
+-174 154 114 187 187 187 190 190 190 210 210 210
+-246 246 246 253 253 253 253 253 253 182 182 182
+- 6 6 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 62 62 62
+- 74 74 74 34 34 34 14 14 14 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 10 10 10 22 22 22 54 54 54
+- 94 94 94 18 18 18 2 2 6 46 46 46
+-234 234 234 221 221 221 190 190 190 190 190 190
+-190 190 190 187 187 187 187 187 187 190 190 190
+-190 190 190 195 195 195 214 214 214 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+- 82 82 82 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 14 14 14
+- 86 86 86 54 54 54 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 18 18 18 46 46 46 90 90 90
+- 46 46 46 18 18 18 6 6 6 182 182 182
+-253 253 253 246 246 246 206 206 206 190 190 190
+-190 190 190 190 190 190 190 190 190 190 190 190
+-206 206 206 231 231 231 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-202 202 202 14 14 14 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 42 42 42 86 86 86 42 42 42 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 14 14 14 38 38 38 74 74 74 66 66 66
+- 2 2 6 6 6 6 90 90 90 250 250 250
+-253 253 253 253 253 253 238 238 238 198 198 198
+-190 190 190 190 190 190 195 195 195 221 221 221
+-246 246 246 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 82 82 82 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 78 78 78 70 70 70 34 34 34
+- 14 14 14 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 34 34 34 66 66 66 78 78 78 6 6 6
+- 2 2 6 18 18 18 218 218 218 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-226 226 226 231 231 231 246 246 246 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 178 178 178 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 18 18 18 90 90 90 62 62 62
+- 30 30 30 10 10 10 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 10 10 10 26 26 26
+- 58 58 58 90 90 90 18 18 18 2 2 6
+- 2 2 6 110 110 110 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 231 231 231 18 18 18 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 18 18 18 94 94 94
+- 54 54 54 26 26 26 10 10 10 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 22 22 22 50 50 50
+- 90 90 90 26 26 26 2 2 6 2 2 6
+- 14 14 14 195 195 195 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 242 242 242 54 54 54 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 38 38 38
+- 86 86 86 50 50 50 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 14 14 14 38 38 38 82 82 82
+- 34 34 34 2 2 6 2 2 6 2 2 6
+- 42 42 42 195 195 195 246 246 246 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 242 242 242 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 246 246 246 238 238 238
+-226 226 226 231 231 231 101 101 101 6 6 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 38 38 38 82 82 82 42 42 42 14 14 14
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 10 10 10 26 26 26 62 62 62 66 66 66
+- 2 2 6 2 2 6 2 2 6 6 6 6
+- 70 70 70 170 170 170 206 206 206 234 234 234
+-246 246 246 250 250 250 250 250 250 238 238 238
+-226 226 226 231 231 231 238 238 238 250 250 250
+-250 250 250 250 250 250 246 246 246 231 231 231
+-214 214 214 206 206 206 202 202 202 202 202 202
+-198 198 198 202 202 202 182 182 182 18 18 18
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 62 62 62 66 66 66 30 30 30
+- 10 10 10 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 14 14 14 42 42 42 82 82 82 18 18 18
+- 2 2 6 2 2 6 2 2 6 10 10 10
+- 94 94 94 182 182 182 218 218 218 242 242 242
+-250 250 250 253 253 253 253 253 253 250 250 250
+-234 234 234 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-238 238 238 226 226 226 210 210 210 202 202 202
+-195 195 195 195 195 195 210 210 210 158 158 158
+- 6 6 6 14 14 14 50 50 50 14 14 14
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 6 6 6 86 86 86 46 46 46
+- 18 18 18 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 22 22 22 54 54 54 70 70 70 2 2 6
+- 2 2 6 10 10 10 2 2 6 22 22 22
+-166 166 166 231 231 231 250 250 250 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 246 246
+-231 231 231 206 206 206 198 198 198 226 226 226
+- 94 94 94 2 2 6 6 6 6 38 38 38
+- 30 30 30 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 62 62 62 66 66 66
+- 26 26 26 10 10 10 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 74 74 74 50 50 50 2 2 6
+- 26 26 26 26 26 26 2 2 6 106 106 106
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 246 246 246 218 218 218 202 202 202
+-210 210 210 14 14 14 2 2 6 2 2 6
+- 30 30 30 22 22 22 2 2 6 2 2 6
+- 2 2 6 2 2 6 18 18 18 86 86 86
+- 42 42 42 14 14 14 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 42 42 42 90 90 90 22 22 22 2 2 6
+- 42 42 42 2 2 6 18 18 18 218 218 218
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 250 250 250 221 221 221
+-218 218 218 101 101 101 2 2 6 14 14 14
+- 18 18 18 38 38 38 10 10 10 2 2 6
+- 2 2 6 2 2 6 2 2 6 78 78 78
+- 58 58 58 22 22 22 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 18 18 18
+- 54 54 54 82 82 82 2 2 6 26 26 26
+- 22 22 22 2 2 6 123 123 123 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-238 238 238 198 198 198 6 6 6 38 38 38
+- 58 58 58 26 26 26 38 38 38 2 2 6
+- 2 2 6 2 2 6 2 2 6 46 46 46
+- 78 78 78 30 30 30 10 10 10 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 10 10 10 30 30 30
+- 74 74 74 58 58 58 2 2 6 42 42 42
+- 2 2 6 22 22 22 231 231 231 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 246 246 246 46 46 46 38 38 38
+- 42 42 42 14 14 14 38 38 38 14 14 14
+- 2 2 6 2 2 6 2 2 6 6 6 6
+- 86 86 86 46 46 46 14 14 14 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 14 14 14 42 42 42
+- 90 90 90 18 18 18 18 18 18 26 26 26
+- 2 2 6 116 116 116 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 250 250 250 238 238 238
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 94 94 94 6 6 6
+- 2 2 6 2 2 6 10 10 10 34 34 34
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 74 74 74 58 58 58 22 22 22 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 10 10 10 26 26 26 66 66 66
+- 82 82 82 2 2 6 38 38 38 6 6 6
+- 14 14 14 210 210 210 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 246 246 246 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 144 144 144 2 2 6
+- 2 2 6 2 2 6 2 2 6 46 46 46
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 42 42 42 74 74 74 30 30 30 10 10 10
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 14 14 14 42 42 42 90 90 90
+- 26 26 26 6 6 6 42 42 42 2 2 6
+- 74 74 74 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 242 242 242 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 182 182 182 2 2 6
+- 2 2 6 2 2 6 2 2 6 46 46 46
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 10 10 10 86 86 86 38 38 38 10 10 10
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 10 10 10 26 26 26 66 66 66 82 82 82
+- 2 2 6 22 22 22 18 18 18 2 2 6
+-149 149 149 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 206 206 206 2 2 6
+- 2 2 6 2 2 6 2 2 6 38 38 38
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 6 6 6 86 86 86 46 46 46 14 14 14
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 18 18 18 46 46 46 86 86 86 18 18 18
+- 2 2 6 34 34 34 10 10 10 6 6 6
+-210 210 210 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 221 221 221 6 6 6
+- 2 2 6 2 2 6 6 6 6 30 30 30
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 82 82 82 54 54 54 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 26 26 26 66 66 66 62 62 62 2 2 6
+- 2 2 6 38 38 38 10 10 10 26 26 26
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 238 238 238
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 6 6 6
+- 2 2 6 2 2 6 10 10 10 30 30 30
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 66 66 66 58 58 58 22 22 22
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 38 38 38 78 78 78 6 6 6 2 2 6
+- 2 2 6 46 46 46 14 14 14 42 42 42
+-246 246 246 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 10 10 10
+- 2 2 6 2 2 6 22 22 22 14 14 14
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 66 66 66 62 62 62 22 22 22
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 18 18 18
+- 50 50 50 74 74 74 2 2 6 2 2 6
+- 14 14 14 70 70 70 34 34 34 62 62 62
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 14 14 14
+- 2 2 6 2 2 6 30 30 30 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 66 66 66 62 62 62 22 22 22
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 18 18 18
+- 54 54 54 62 62 62 2 2 6 2 2 6
+- 2 2 6 30 30 30 46 46 46 70 70 70
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 226 226 226 10 10 10
+- 2 2 6 6 6 6 30 30 30 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 66 66 66 58 58 58 22 22 22
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 22 22 22
+- 58 58 58 62 62 62 2 2 6 2 2 6
+- 2 2 6 2 2 6 30 30 30 78 78 78
+-250 250 250 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 206 206 206 2 2 6
+- 22 22 22 34 34 34 18 14 6 22 22 22
+- 26 26 26 18 18 18 6 6 6 2 2 6
+- 2 2 6 82 82 82 54 54 54 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 26 26 26
+- 62 62 62 106 106 106 74 54 14 185 133 11
+-210 162 10 121 92 8 6 6 6 62 62 62
+-238 238 238 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 246 246 246
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 158 158 158 18 18 18
+- 14 14 14 2 2 6 2 2 6 2 2 6
+- 6 6 6 18 18 18 66 66 66 38 38 38
+- 6 6 6 94 94 94 50 50 50 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 10 10 10 10 10 10 18 18 18 38 38 38
+- 78 78 78 142 134 106 216 158 10 242 186 14
+-246 190 14 246 190 14 156 118 10 10 10 10
+- 90 90 90 238 238 238 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 246 230 190
+-238 204 91 238 204 91 181 142 44 37 26 9
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 38 38 38 46 46 46
+- 26 26 26 106 106 106 54 54 54 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 14 14 14 22 22 22
+- 30 30 30 38 38 38 50 50 50 70 70 70
+-106 106 106 190 142 34 226 170 11 242 186 14
+-246 190 14 246 190 14 246 190 14 154 114 10
+- 6 6 6 74 74 74 226 226 226 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 231 231 231 250 250 250
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 228 184 62
+-241 196 14 241 208 19 232 195 16 38 30 10
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 6 6 6 30 30 30 26 26 26
+-203 166 17 154 142 90 66 66 66 26 26 26
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 18 18 18 38 38 38 58 58 58
+- 78 78 78 86 86 86 101 101 101 123 123 123
+-175 146 61 210 150 10 234 174 13 246 186 14
+-246 190 14 246 190 14 246 190 14 238 190 10
+-102 78 10 2 2 6 46 46 46 198 198 198
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 234 234 234 242 242 242
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 224 178 62
+-242 186 14 241 196 14 210 166 10 22 18 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 6 6 6 121 92 8
+-238 202 15 232 195 16 82 82 82 34 34 34
+- 10 10 10 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 14 14 14 38 38 38 70 70 70 154 122 46
+-190 142 34 200 144 11 197 138 11 197 138 11
+-213 154 11 226 170 11 242 186 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-225 175 15 46 32 6 2 2 6 22 22 22
+-158 158 158 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 242 242 242 224 178 62
+-239 182 13 236 186 11 213 154 11 46 32 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 61 42 6 225 175 15
+-238 190 10 236 186 11 112 100 78 42 42 42
+- 14 14 14 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 22 22 22 54 54 54 154 122 46 213 154 11
+-226 170 11 230 174 11 226 170 11 226 170 11
+-236 178 12 242 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-241 196 14 184 144 12 10 10 10 2 2 6
+- 6 6 6 116 116 116 242 242 242 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 231 231 231 198 198 198 214 170 54
+-236 178 12 236 178 12 210 150 10 137 92 6
+- 18 14 6 2 2 6 2 2 6 2 2 6
+- 6 6 6 70 47 6 200 144 11 236 178 12
+-239 182 13 239 182 13 124 112 88 58 58 58
+- 22 22 22 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 70 70 70 180 133 36 226 170 11
+-239 182 13 242 186 14 242 186 14 246 186 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 232 195 16 98 70 6 2 2 6
+- 2 2 6 2 2 6 66 66 66 221 221 221
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 206 206 206 198 198 198 214 166 58
+-230 174 11 230 174 11 216 158 10 192 133 9
+-163 110 8 116 81 8 102 78 10 116 81 8
+-167 114 7 197 138 11 226 170 11 239 182 13
+-242 186 14 242 186 14 162 146 94 78 78 78
+- 34 34 34 14 14 14 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 30 30 30 78 78 78 190 142 34 226 170 11
+-239 182 13 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 241 196 14 203 166 17 22 18 6
+- 2 2 6 2 2 6 2 2 6 38 38 38
+-218 218 218 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 206 206 206 198 198 198 202 162 69
+-226 170 11 236 178 12 224 166 10 210 150 10
+-200 144 11 197 138 11 192 133 9 197 138 11
+-210 150 10 226 170 11 242 186 14 246 190 14
+-246 190 14 246 186 14 225 175 15 124 112 88
+- 62 62 62 30 30 30 14 14 14 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 78 78 78 174 135 50 224 166 10
+-239 182 13 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 241 196 14 139 102 15
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 78 78 78 250 250 250 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-250 250 250 214 214 214 198 198 198 190 150 46
+-219 162 10 236 178 12 234 174 13 224 166 10
+-216 158 10 213 154 11 213 154 11 216 158 10
+-226 170 11 239 182 13 246 190 14 246 190 14
+-246 190 14 246 190 14 242 186 14 206 162 42
+-101 101 101 58 58 58 30 30 30 14 14 14
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 74 74 74 174 135 50 216 158 10
+-236 178 12 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 241 196 14 226 184 13
+- 61 42 6 2 2 6 2 2 6 2 2 6
+- 22 22 22 238 238 238 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 226 226 226 187 187 187 180 133 36
+-216 158 10 236 178 12 239 182 13 236 178 12
+-230 174 11 226 170 11 226 170 11 230 174 11
+-236 178 12 242 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 186 14 239 182 13
+-206 162 42 106 106 106 66 66 66 34 34 34
+- 14 14 14 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 26 26 26 70 70 70 163 133 67 213 154 11
+-236 178 12 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 241 196 14
+-190 146 13 18 14 6 2 2 6 2 2 6
+- 46 46 46 246 246 246 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 221 221 221 86 86 86 156 107 11
+-216 158 10 236 178 12 242 186 14 246 186 14
+-242 186 14 239 182 13 239 182 13 242 186 14
+-242 186 14 246 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-242 186 14 225 175 15 142 122 72 66 66 66
+- 30 30 30 10 10 10 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 26 26 26 70 70 70 163 133 67 210 150 10
+-236 178 12 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-232 195 16 121 92 8 34 34 34 106 106 106
+-221 221 221 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-242 242 242 82 82 82 18 14 6 163 110 8
+-216 158 10 236 178 12 242 186 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 242 186 14 163 133 67
+- 46 46 46 18 18 18 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 10 10 10
+- 30 30 30 78 78 78 163 133 67 210 150 10
+-236 178 12 246 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-241 196 14 215 174 15 190 178 144 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 218 218 218
+- 58 58 58 2 2 6 22 18 6 167 114 7
+-216 158 10 236 178 12 246 186 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 186 14 242 186 14 190 150 46
+- 54 54 54 22 22 22 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 38 38 38 86 86 86 180 133 36 213 154 11
+-236 178 12 246 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 232 195 16 190 146 13 214 214 214
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 250 250 250 170 170 170 26 26 26
+- 2 2 6 2 2 6 37 26 9 163 110 8
+-219 162 10 239 182 13 246 186 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 186 14 236 178 12 224 166 10 142 122 72
+- 46 46 46 18 18 18 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 18 18 18
+- 50 50 50 109 106 95 192 133 9 224 166 10
+-242 186 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-242 186 14 226 184 13 210 162 10 142 110 46
+-226 226 226 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-253 253 253 253 253 253 253 253 253 253 253 253
+-198 198 198 66 66 66 2 2 6 2 2 6
+- 2 2 6 2 2 6 50 34 6 156 107 11
+-219 162 10 239 182 13 246 186 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 242 186 14
+-234 174 13 213 154 11 154 122 46 66 66 66
+- 30 30 30 10 10 10 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 22 22 22
+- 58 58 58 154 121 60 206 145 10 234 174 13
+-242 186 14 246 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 186 14 236 178 12 210 162 10 163 110 8
+- 61 42 6 138 138 138 218 218 218 250 250 250
+-253 253 253 253 253 253 253 253 253 250 250 250
+-242 242 242 210 210 210 144 144 144 66 66 66
+- 6 6 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 61 42 6 163 110 8
+-216 158 10 236 178 12 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 239 182 13 230 174 11 216 158 10
+-190 142 34 124 112 88 70 70 70 38 38 38
+- 18 18 18 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 22 22 22
+- 62 62 62 168 124 44 206 145 10 224 166 10
+-236 178 12 239 182 13 242 186 14 242 186 14
+-246 186 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 236 178 12 216 158 10 175 118 6
+- 80 54 7 2 2 6 6 6 6 30 30 30
+- 54 54 54 62 62 62 50 50 50 38 38 38
+- 14 14 14 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 6 6 6 80 54 7 167 114 7
+-213 154 11 236 178 12 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 190 14 242 186 14 239 182 13 239 182 13
+-230 174 11 210 150 10 174 135 50 124 112 88
+- 82 82 82 54 54 54 34 34 34 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 18 18 18
+- 50 50 50 158 118 36 192 133 9 200 144 11
+-216 158 10 219 162 10 224 166 10 226 170 11
+-230 174 11 236 178 12 239 182 13 239 182 13
+-242 186 14 246 186 14 246 190 14 246 190 14
+-246 190 14 246 190 14 246 190 14 246 190 14
+-246 186 14 230 174 11 210 150 10 163 110 8
+-104 69 6 10 10 10 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 6 6 6 91 60 6 167 114 7
+-206 145 10 230 174 11 242 186 14 246 190 14
+-246 190 14 246 190 14 246 186 14 242 186 14
+-239 182 13 230 174 11 224 166 10 213 154 11
+-180 133 36 124 112 88 86 86 86 58 58 58
+- 38 38 38 22 22 22 10 10 10 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 14 14 14
+- 34 34 34 70 70 70 138 110 50 158 118 36
+-167 114 7 180 123 7 192 133 9 197 138 11
+-200 144 11 206 145 10 213 154 11 219 162 10
+-224 166 10 230 174 11 239 182 13 242 186 14
+-246 186 14 246 186 14 246 186 14 246 186 14
+-239 182 13 216 158 10 185 133 11 152 99 6
+-104 69 6 18 14 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 2 2 6 2 2 6 2 2 6
+- 2 2 6 6 6 6 80 54 7 152 99 6
+-192 133 9 219 162 10 236 178 12 239 182 13
+-246 186 14 242 186 14 239 182 13 236 178 12
+-224 166 10 206 145 10 192 133 9 154 121 60
+- 94 94 94 62 62 62 42 42 42 22 22 22
+- 14 14 14 6 6 6 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 18 18 18 34 34 34 58 58 58 78 78 78
+-101 98 89 124 112 88 142 110 46 156 107 11
+-163 110 8 167 114 7 175 118 6 180 123 7
+-185 133 11 197 138 11 210 150 10 219 162 10
+-226 170 11 236 178 12 236 178 12 234 174 13
+-219 162 10 197 138 11 163 110 8 130 83 6
+- 91 60 6 10 10 10 2 2 6 2 2 6
+- 18 18 18 38 38 38 38 38 38 38 38 38
+- 38 38 38 38 38 38 38 38 38 38 38 38
+- 38 38 38 38 38 38 26 26 26 2 2 6
+- 2 2 6 6 6 6 70 47 6 137 92 6
+-175 118 6 200 144 11 219 162 10 230 174 11
+-234 174 13 230 174 11 219 162 10 210 150 10
+-192 133 9 163 110 8 124 112 88 82 82 82
+- 50 50 50 30 30 30 14 14 14 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 14 14 14 22 22 22 34 34 34
+- 42 42 42 58 58 58 74 74 74 86 86 86
+-101 98 89 122 102 70 130 98 46 121 87 25
+-137 92 6 152 99 6 163 110 8 180 123 7
+-185 133 11 197 138 11 206 145 10 200 144 11
+-180 123 7 156 107 11 130 83 6 104 69 6
+- 50 34 6 54 54 54 110 110 110 101 98 89
+- 86 86 86 82 82 82 78 78 78 78 78 78
+- 78 78 78 78 78 78 78 78 78 78 78 78
+- 78 78 78 82 82 82 86 86 86 94 94 94
+-106 106 106 101 101 101 86 66 34 124 80 6
+-156 107 11 180 123 7 192 133 9 200 144 11
+-206 145 10 200 144 11 192 133 9 175 118 6
+-139 102 15 109 106 95 70 70 70 42 42 42
+- 22 22 22 10 10 10 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 6 6 6 10 10 10
+- 14 14 14 22 22 22 30 30 30 38 38 38
+- 50 50 50 62 62 62 74 74 74 90 90 90
+-101 98 89 112 100 78 121 87 25 124 80 6
+-137 92 6 152 99 6 152 99 6 152 99 6
+-138 86 6 124 80 6 98 70 6 86 66 30
+-101 98 89 82 82 82 58 58 58 46 46 46
+- 38 38 38 34 34 34 34 34 34 34 34 34
+- 34 34 34 34 34 34 34 34 34 34 34 34
+- 34 34 34 34 34 34 38 38 38 42 42 42
+- 54 54 54 82 82 82 94 86 76 91 60 6
+-134 86 6 156 107 11 167 114 7 175 118 6
+-175 118 6 167 114 7 152 99 6 121 87 25
+-101 98 89 62 62 62 34 34 34 18 18 18
+- 6 6 6 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 6 6 6 10 10 10
+- 18 18 18 22 22 22 30 30 30 42 42 42
+- 50 50 50 66 66 66 86 86 86 101 98 89
+-106 86 58 98 70 6 104 69 6 104 69 6
+-104 69 6 91 60 6 82 62 34 90 90 90
+- 62 62 62 38 38 38 22 22 22 14 14 14
+- 10 10 10 10 10 10 10 10 10 10 10 10
+- 10 10 10 10 10 10 6 6 6 10 10 10
+- 10 10 10 10 10 10 10 10 10 14 14 14
+- 22 22 22 42 42 42 70 70 70 89 81 66
+- 80 54 7 104 69 6 124 80 6 137 92 6
+-134 86 6 116 81 8 100 82 52 86 86 86
+- 58 58 58 30 30 30 14 14 14 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 10 10 10 14 14 14
+- 18 18 18 26 26 26 38 38 38 54 54 54
+- 70 70 70 86 86 86 94 86 76 89 81 66
+- 89 81 66 86 86 86 74 74 74 50 50 50
+- 30 30 30 14 14 14 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 18 18 18 34 34 34 58 58 58
+- 82 82 82 89 81 66 89 81 66 89 81 66
+- 94 86 66 94 86 76 74 74 74 50 50 50
+- 26 26 26 14 14 14 6 6 6 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 6 6 6 6 6 6 14 14 14 18 18 18
+- 30 30 30 38 38 38 46 46 46 54 54 54
+- 50 50 50 42 42 42 30 30 30 18 18 18
+- 10 10 10 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 6 6 6 14 14 14 26 26 26
+- 38 38 38 50 50 50 58 58 58 58 58 58
+- 54 54 54 42 42 42 30 30 30 18 18 18
+- 10 10 10 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 6 6 6 10 10 10 14 14 14 18 18 18
+- 18 18 18 14 14 14 10 10 10 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 6 6 6
+- 14 14 14 18 18 18 22 22 22 22 22 22
+- 18 18 18 14 14 14 10 10 10 6 6 6
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
+- 0 0 0 0 0 0 0 0 0 0 0 0
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
++0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
++37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
++2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
++4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
++1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
++153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
++0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
++60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
++4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
++2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
++4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
++165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
++1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
++3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
++163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
++0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
++37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
++37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
++156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
++125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
++0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
++174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
++0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
++64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
++5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
++156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
++156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
++174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
++1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
++13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
++174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
++22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
++90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
++0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
++174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
++156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
++163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
++4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
++5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
++131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
++190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
++90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
++31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
++4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
++155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
++167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
++153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
++41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
++1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
++177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
++125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
++136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
++7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
++125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
++156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
++137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
++156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
++167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
++0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
++166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
++6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
++90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
++1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
++167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
++157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
++26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
++158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
++165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
++60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
++137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
++52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
++13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
++4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
++0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
++158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
++167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
++4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
++174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
++155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
++137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
++16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
++136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
++2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
++4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
++37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
++157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
++153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
++4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
++125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
++156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
++174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
++4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
++136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
++1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
++2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
++0 0 0 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
++4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
++158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
++153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
++37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
++4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
++4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
++154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
++174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
++32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
++28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
++50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
++0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
++2 0 0 0 0 0
++4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
++0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
++174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
++165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
++4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
++4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
++174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
++60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
++136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
++22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
++136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
++26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
++37 38 37 0 0 0
++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
++13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
++153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
++177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
++4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
++5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
++6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
++166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
++4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
++146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
++71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
++90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
++125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
++85 115 134 4 0 0
++4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
++125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
++155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
++125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
++0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
++5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
++37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
++4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
++90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
++2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
++13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
++166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
++60 73 81 4 0 0
++4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
++174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
++156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
++4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
++10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
++4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
++80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
++28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
++50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
++1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
++167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
++16 19 21 4 0 0
++4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
++158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
++167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
++4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
++4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
++80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
++4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
++3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
++146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
++68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
++136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
++24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
++163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
++4 0 0 4 3 3
++3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
++156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
++155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
++2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
++136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
++0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
++0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
++136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
++28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
++22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
++137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
++60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
++3 2 2 4 4 4
++3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
++157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
++37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
++4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
++0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
++101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
++14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
++22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
++136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
++17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
++2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
++166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
++13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
++4 4 4 4 4 4
++1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
++163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
++4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
++4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
++40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
++101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
++136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
++136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
++136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
++3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
++174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
++4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
++155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
++4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
++4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
++101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
++136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
++136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
++136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
++90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
++85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
++167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
++6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
++5 5 5 5 5 5
++1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
++131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
++6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
++0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
++101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
++101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
++136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
++101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
++7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
++174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
++24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
++5 5 5 4 4 4
++4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
++131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
++6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
++13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
++101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
++136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
++136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
++2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
++174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
++137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
++4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
++64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
++90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
++136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
++101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
++37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
++167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
++3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
++153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
++4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
++90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
++90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
++101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
++101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
++35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
++154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
++60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
++153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
++4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
++64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
++136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
++13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
++174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
++6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
++156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
++4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
++90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
++90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
++101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
++2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
++174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
++158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
++4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
++37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
++90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
++90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
++90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
++5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
++167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
++6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
++163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
++4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
++18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
++64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
++90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
++101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
++13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
++3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
++174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
++4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
++167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
++4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
++26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
++90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
++101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
++7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
++4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
++174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
++174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
++5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
++90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
++101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
++2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
++3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
++153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
++4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
++174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
++18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
++26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
++35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
++2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
++3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
++131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
++4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
++174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
++18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
++26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
++7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
++4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
++174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
++5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
++18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
++18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
++26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
++28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
++3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
++4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
++174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
++10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
++18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
++90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
++3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
++177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
++10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
++26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
++6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
++10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
++2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
++177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
++10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
++26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
++7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
++3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
++21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
++190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
++5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
++10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
++24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
++18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
++28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
++26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
++190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
++10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
++0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
++26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
++37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
++90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
++4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
++193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
++5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
++10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
++1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
++26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
++22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
++26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
++190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
++5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
++10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
++2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
++26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
++10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
++26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
++193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
++5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
++13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
++10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
++26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
++4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
++190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
++5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
++28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
++10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
++28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
++26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
++26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
++4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
++193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
++5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
++4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
++10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
++18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
++22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
++4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
++190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
++6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
++1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
++18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
++10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
++26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
++1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
++5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
++137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
++193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
++2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
++4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
++10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
++10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
++26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
++2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
++3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
++131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
++193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
++0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
++4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
++13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
++10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
++28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
++4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
++0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
++125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
++193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
++120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
++4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
++4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
++10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
++4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
++24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
++125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
++0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
++174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
++220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
++3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
++4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
++10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
++1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
++5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
++137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
++125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
++0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
++193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
++220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
++4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
++22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
++4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
++166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
++125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
++220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
++205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
++24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
++4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
++4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
++4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
++2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
++156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
++137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
++125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
++193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
++5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
++1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
++5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
++60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
++153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
++125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
++6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
++193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
++244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
++0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
++4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
++3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
++220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
++153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
++13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
++6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
++244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
++220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
++3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
++0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
++177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
++158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
++4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
++6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
++177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
++220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
++125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
++4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
++37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
++174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
++158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
++26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
++205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
++244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
++0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
++177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
++174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
++60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
++6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
++6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
++220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
++220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
++0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
++220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
++174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
++4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
++6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
++4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
++220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
++205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
++60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
++177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
++190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
++4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
++125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
++205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
++193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
++190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
++153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
++6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
++4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
++4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
++205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
++220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
++174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
++6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
++5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
++4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
++220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
++190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
++193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
++4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
++4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
++6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
++174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
++193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
++193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
++6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
++5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
++5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
++6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
++193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
++60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
++5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
++5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
++4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
++193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
++6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
++4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
++4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
++6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
++153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
++6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
++6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
++24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
++6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
++4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
++4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
++5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
++6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
++6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
++4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
++4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
++6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
++6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
++4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
++4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
++5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
++5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
++5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
++4 4 4 4 4 4
+diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
+index fe92eed..106e085 100644
+--- a/drivers/video/mb862xx/mb862xxfb_accel.c
++++ b/drivers/video/mb862xx/mb862xxfb_accel.c
+@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
+ struct mb862xxfb_par *par = info->par;
+
+ if (info->var.bits_per_pixel == 32) {
+- info->fbops->fb_fillrect = cfb_fillrect;
+- info->fbops->fb_copyarea = cfb_copyarea;
+- info->fbops->fb_imageblit = cfb_imageblit;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
++ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
++ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
++ pax_close_kernel();
+ } else {
+ outreg(disp, GC_L0EM, 3);
+- info->fbops->fb_fillrect = mb86290fb_fillrect;
+- info->fbops->fb_copyarea = mb86290fb_copyarea;
+- info->fbops->fb_imageblit = mb86290fb_imageblit;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
++ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
++ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
++ pax_close_kernel();
+ }
+ outreg(draw, GDC_REG_DRAW_BASE, 0);
+ outreg(draw, GDC_REG_MODE_MISC, 0x8000);
+diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
+index 081dc47..6e20d0b 100644
+--- a/drivers/video/nvidia/nvidia.c
++++ b/drivers/video/nvidia/nvidia.c
+@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
+ info->fix.line_length = (info->var.xres_virtual *
+ info->var.bits_per_pixel) >> 3;
+ if (info->var.accel_flags) {
+- info->fbops->fb_imageblit = nvidiafb_imageblit;
+- info->fbops->fb_fillrect = nvidiafb_fillrect;
+- info->fbops->fb_copyarea = nvidiafb_copyarea;
+- info->fbops->fb_sync = nvidiafb_sync;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
++ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
++ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
++ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
++ pax_close_kernel();
+ info->pixmap.scan_align = 4;
+ info->flags &= ~FBINFO_HWACCEL_DISABLED;
+ info->flags |= FBINFO_READS_FAST;
+ NVResetGraphics(info);
+ } else {
+- info->fbops->fb_imageblit = cfb_imageblit;
+- info->fbops->fb_fillrect = cfb_fillrect;
+- info->fbops->fb_copyarea = cfb_copyarea;
+- info->fbops->fb_sync = NULL;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
++ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
++ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
++ *(void **)&info->fbops->fb_sync = NULL;
++ pax_close_kernel();
+ info->pixmap.scan_align = 1;
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ info->flags &= ~FBINFO_READS_FAST;
+@@ -1173,8 +1177,11 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
+ info->pixmap.size = 8 * 1024;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+- if (!hwcur)
+- info->fbops->fb_cursor = NULL;
++ if (!hwcur) {
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_cursor = NULL;
++ pax_close_kernel();
++ }
+
+ info->var.accel_flags = (!noaccel);
+
+diff --git a/drivers/video/output.c b/drivers/video/output.c
+index 0d6f2cd..6285b97 100644
+--- a/drivers/video/output.c
++++ b/drivers/video/output.c
+@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
+ new_dev->props = op;
+ new_dev->dev.class = &video_output_class;
+ new_dev->dev.parent = dev;
+- dev_set_name(&new_dev->dev, name);
++ dev_set_name(&new_dev->dev, "%s", name);
+ dev_set_drvdata(&new_dev->dev, devdata);
+ ret_code = device_register(&new_dev->dev);
+ if (ret_code) {
+diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
+index 28b1c6c..b9939d9 100644
+--- a/drivers/video/s1d13xxxfb.c
++++ b/drivers/video/s1d13xxxfb.c
+@@ -883,8 +883,10 @@ s1d13xxxfb_probe(struct platform_device *pdev)
+
+ switch(prod_id) {
+ case S1D13506_PROD_ID: /* activate acceleration */
+- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
++ pax_open_kernel();
++ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
++ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
++ pax_close_kernel();
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+ FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
+ break;
+diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
+index dd9533a..aff3199e 100644
+--- a/drivers/video/smscufx.c
++++ b/drivers/video/smscufx.c
+@@ -1172,7 +1172,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+ info->fbdefio = NULL;
+- info->fbops->fb_mmap = ufx_ops_mmap;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
++ pax_close_kernel();
+ }
+
+ pr_debug("released /dev/fb%d user=%d count=%d",
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index cb5988f..439ecb0 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
+ dlfb_urb_completion(urb);
+
+ error:
+- atomic_add(bytes_sent, &dev->bytes_sent);
+- atomic_add(bytes_identical, &dev->bytes_identical);
+- atomic_add(width*height*2, &dev->bytes_rendered);
++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
+ end_cycles = get_cycles();
+- atomic_add(((unsigned int) ((end_cycles - start_cycles)
++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dev->cpu_kcycles_used);
+
+@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
+ dlfb_urb_completion(urb);
+
+ error:
+- atomic_add(bytes_sent, &dev->bytes_sent);
+- atomic_add(bytes_identical, &dev->bytes_identical);
+- atomic_add(bytes_rendered, &dev->bytes_rendered);
++ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
++ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
++ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
+ end_cycles = get_cycles();
+- atomic_add(((unsigned int) ((end_cycles - start_cycles)
++ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
+ >> 10)), /* Kcycles */
+ &dev->cpu_kcycles_used);
+ }
+@@ -986,7 +986,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+ info->fbdefio = NULL;
+- info->fbops->fb_mmap = dlfb_ops_mmap;
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
++ pax_close_kernel();
+ }
+
+ pr_warn("released /dev/fb%d user=%d count=%d\n",
+@@ -1368,7 +1370,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_rendered));
++ atomic_read_unchecked(&dev->bytes_rendered));
+ }
+
+ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_identical));
++ atomic_read_unchecked(&dev->bytes_identical));
+ }
+
+ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->bytes_sent));
++ atomic_read_unchecked(&dev->bytes_sent));
+ }
+
+ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+@@ -1392,7 +1394,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+- atomic_read(&dev->cpu_kcycles_used));
++ atomic_read_unchecked(&dev->cpu_kcycles_used));
+ }
+
+ static ssize_t edid_show(
+@@ -1449,10 +1451,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
+ struct fb_info *fb_info = dev_get_drvdata(fbdev);
+ struct dlfb_data *dev = fb_info->par;
+
+- atomic_set(&dev->bytes_rendered, 0);
+- atomic_set(&dev->bytes_identical, 0);
+- atomic_set(&dev->bytes_sent, 0);
+- atomic_set(&dev->cpu_kcycles_used, 0);
++ atomic_set_unchecked(&dev->bytes_rendered, 0);
++ atomic_set_unchecked(&dev->bytes_identical, 0);
++ atomic_set_unchecked(&dev->bytes_sent, 0);
++ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
+
+ return count;
+ }
+diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
+index 8813588..65454ec 100644
+--- a/drivers/video/uvesafb.c
++++ b/drivers/video/uvesafb.c
+@@ -19,6 +19,7 @@
+ #include <linux/io.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/moduleloader.h>
+ #include <video/edid.h>
+ #include <video/uvesafb.h>
+ #ifdef CONFIG_X86
+@@ -73,7 +74,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
+ struct uvesafb_task *utask;
+ struct uvesafb_ktask *task;
+
+- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
++ if (!capable(CAP_SYS_ADMIN))
+ return;
+
+ if (msg->seq >= UVESAFB_TASKS_MAX)
+@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
+ NULL,
+ };
+
+- return call_usermodehelper(v86d_path, argv, envp, 1);
++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
+ }
+
+ /*
+@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
+ if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
+ par->pmi_setpal = par->ypan = 0;
+ } else {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_MODULES
++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
++#endif
++ if (!par->pmi_code) {
++ par->pmi_setpal = par->ypan = 0;
++ return 0;
++ }
++#endif
++
+ par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
+ + task->t.regs.edi);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
++ pax_close_kernel();
++
++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
++#else
+ par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
+ par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
++#endif
++
+ printk(KERN_INFO "uvesafb: protected mode interface info at "
+ "%04x:%04x\n",
+ (u16)task->t.regs.es, (u16)task->t.regs.edi);
+@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
+ par->ypan = ypan;
+
+ if (par->pmi_setpal || par->ypan) {
++#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
+ if (__supported_pte_mask & _PAGE_NX) {
+ par->pmi_setpal = par->ypan = 0;
+ printk(KERN_WARNING "uvesafb: NX protection is actively."
+ "We have better not to use the PMI.\n");
+- } else {
++ } else
++#endif
+ uvesafb_vbe_getpmi(task, par);
+- }
+ }
+ #else
+ /* The protected mode interface is not available on non-x86. */
+@@ -1449,8 +1473,11 @@ static void __devinit uvesafb_init_info(struct fb_info *info,
+ info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
+
+ /* Disable blanking if the user requested so. */
+- if (!blank)
+- info->fbops->fb_blank = NULL;
++ if (!blank) {
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_blank = NULL;
++ pax_close_kernel();
++ }
+
+ /*
+ * Find out how much IO memory is required for the mode with
+@@ -1526,8 +1553,11 @@ static void __devinit uvesafb_init_info(struct fb_info *info,
+ info->flags = FBINFO_FLAG_DEFAULT |
+ (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
+
+- if (!par->ypan)
+- info->fbops->fb_pan_display = NULL;
++ if (!par->ypan) {
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_pan_display = NULL;
++ pax_close_kernel();
++ }
+ }
+
+ static void __devinit uvesafb_init_mtrr(struct fb_info *info)
+@@ -1828,6 +1858,11 @@ out:
+ if (par->vbe_modes)
+ kfree(par->vbe_modes);
+
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ framebuffer_release(info);
+ return err;
+ }
+@@ -1854,6 +1889,12 @@ static int uvesafb_remove(struct platform_device *dev)
+ kfree(par->vbe_state_orig);
+ if (par->vbe_state_saved)
+ kfree(par->vbe_state_saved);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ if (par->pmi_code)
++ module_free_exec(NULL, par->pmi_code);
++#endif
++
+ }
+
+ framebuffer_release(info);
+diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
+index 501b340..d80aa17 100644
+--- a/drivers/video/vesafb.c
++++ b/drivers/video/vesafb.c
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
+ static int vram_total __initdata; /* Set total amount of memory */
+ static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
+ static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
+-static void (*pmi_start)(void) __read_mostly;
+-static void (*pmi_pal) (void) __read_mostly;
++static void (*pmi_start)(void) __read_only;
++static void (*pmi_pal) (void) __read_only;
+ static int depth __read_mostly;
+ static int vga_compat __read_mostly;
+ /* --------------------------------------------------------------------- */
+@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
+ unsigned int size_vmode;
+ unsigned int size_remap;
+ unsigned int size_total;
++ void *pmi_code = NULL;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
+ return -ENODEV;
+@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
+ size_remap = size_total;
+ vesafb_fix.smem_len = size_remap;
+
+-#ifndef __i386__
+- screen_info.vesapm_seg = 0;
+-#endif
+-
+ if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
+ printk(KERN_WARNING
+ "vesafb: cannot reserve video memory at 0x%lx\n",
+@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
+ printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
+
++#ifdef __i386__
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_code = module_alloc_exec(screen_info.vesapm_size);
++ if (!pmi_code)
++#elif !defined(CONFIG_PAX_KERNEXEC)
++ if (0)
++#endif
++
++#endif
++ screen_info.vesapm_seg = 0;
++
+ if (screen_info.vesapm_seg) {
+- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
+- screen_info.vesapm_seg,screen_info.vesapm_off);
++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
+ }
+
+ if (screen_info.vesapm_seg < 0xc000)
+@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
+
+ if (ypan || pmi_setpal) {
+ unsigned short *pmi_base;
++
+ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
+- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
+- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pax_open_kernel();
++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
++#else
++ pmi_code = pmi_base;
++#endif
++
++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
++
++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ pmi_start = ktva_ktla(pmi_start);
++ pmi_pal = ktva_ktla(pmi_pal);
++ pax_close_kernel();
++#endif
++
+ printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
+ if (pmi_base[3]) {
+ printk(KERN_INFO "vesafb: pmi: ports = ");
+@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
+ (ypan ? FBINFO_HWACCEL_YPAN : 0);
+
+- if (!ypan)
+- info->fbops->fb_pan_display = NULL;
++ if (!ypan) {
++ pax_open_kernel();
++ *(void **)&info->fbops->fb_pan_display = NULL;
++ pax_close_kernel();
++ }
+
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+ err = -ENOMEM;
+@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
+ info->node, info->fix.id);
+ return 0;
+ err:
++
++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
++ module_free_exec(NULL, pmi_code);
++#endif
++
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
+index 88714ae..16c2e11 100644
+--- a/drivers/video/via/via_clock.h
++++ b/drivers/video/via/via_clock.h
+@@ -56,7 +56,7 @@ struct via_clock {
+
+ void (*set_engine_pll_state)(u8 state);
+ void (*set_engine_pll)(struct via_pll_config config);
+-};
++} __no_const;
+
+
+ static inline u32 get_pll_internal_frequency(u32 ref_freq,
+diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
+index 1aa3897..3a6b3f1 100644
+--- a/drivers/xen/xenfs/super.c
++++ b/drivers/xen/xenfs/super.c
+@@ -116,6 +116,7 @@ static struct file_system_type xenfs_type = {
+ .mount = xenfs_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("xenfs");
+
+ static int __init xenfs_init(void)
+ {
+diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
+index fef20db..d28b1ab 100644
+--- a/drivers/xen/xenfs/xenstored.c
++++ b/drivers/xen/xenfs/xenstored.c
+@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
+ static int xsd_kva_open(struct inode *inode, struct file *file)
+ {
+ file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL);
++#else
+ xen_store_interface);
++#endif
++
+ if (!file->private_data)
+ return -ENOMEM;
+ return 0;
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index 2524e4c..2962cc6a 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -185,7 +185,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
+
+ retval = v9fs_file_write_internal(inode,
+ v9inode->writeback_fid,
+- (__force const char __user *)buffer,
++ (const char __force_user *)buffer,
+ len, &offset, 0);
+ if (retval > 0)
+ retval = 0;
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 879ed88..bc03a01 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ void
+ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
+ IS_ERR(s) ? "<error>" : s);
+diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
+index c70251d..fe305fd 100644
+--- a/fs/9p/vfs_super.c
++++ b/fs/9p/vfs_super.c
+@@ -366,3 +366,4 @@ struct file_system_type v9fs_fs_type = {
+ .owner = THIS_MODULE,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT,
+ };
++MODULE_ALIAS_FS("9p");
+diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
+index 79e2ca7..5828ad1 100644
+--- a/fs/Kconfig.binfmt
++++ b/fs/Kconfig.binfmt
+@@ -86,7 +86,7 @@ config HAVE_AOUT
+
+ config BINFMT_AOUT
+ tristate "Kernel support for a.out and ECOFF binaries"
+- depends on HAVE_AOUT
++ depends on HAVE_AOUT && BROKEN
+ ---help---
+ A.out (Assembler.OUTput) is a set of formats for libraries and
+ executables used in the earliest versions of UNIX. Linux used
+diff --git a/fs/adfs/super.c b/fs/adfs/super.c
+index c8bf36a..d7b2b33 100644
+--- a/fs/adfs/super.c
++++ b/fs/adfs/super.c
+@@ -516,6 +516,7 @@ static struct file_system_type adfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("adfs");
+
+ static int __init init_adfs_fs(void)
+ {
+diff --git a/fs/affs/super.c b/fs/affs/super.c
+index b31507d..5b42a3b 100644
+--- a/fs/affs/super.c
++++ b/fs/affs/super.c
+@@ -597,6 +597,7 @@ static struct file_system_type affs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("affs");
+
+ static int __init init_affs_fs(void)
+ {
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index d890ae3..5733a4b 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+ struct afs_vnode *vnode;
+ struct super_block *sb;
+ struct inode *inode;
+- static atomic_t afs_autocell_ino;
++ static atomic_unchecked_t afs_autocell_ino;
+
+ _enter("{%x:%u},%*.*s,",
+ AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
+@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+ data.fid.unique = 0;
+ data.fid.vnode = 0;
+
+- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
++ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
+ afs_iget5_autocell_test, afs_iget5_set,
+ &data);
+ if (!inode) {
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index 356dcf0..c0046cd 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -43,6 +43,7 @@ struct file_system_type afs_fs_type = {
+ .kill_sb = afs_kill_super,
+ .fs_flags = 0,
+ };
++MODULE_ALIAS_FS("afs");
+
+ static const struct super_operations afs_super_ops = {
+ .statfs = afs_statfs,
+diff --git a/fs/aio.c b/fs/aio.c
+index 8cdd8ea..64197b4 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+ size += sizeof(struct io_event) * nr_events;
+ nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+- if (nr_pages < 0)
++ if (nr_pages <= 0)
+ return -EINVAL;
+
+ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
+@@ -1461,18 +1461,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
+ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+ {
+ ssize_t ret;
++ struct iovec iovstack;
+
+ #ifdef CONFIG_COMPAT
+ if (compat)
+ ret = compat_rw_copy_check_uvector(type,
+ (struct compat_iovec __user *)kiocb->ki_buf,
+- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++ kiocb->ki_nbytes, 1, &iovstack,
+ &kiocb->ki_iovec, 1);
+ else
+ #endif
+ ret = rw_copy_check_uvector(type,
+ (struct iovec __user *)kiocb->ki_buf,
+- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
++ kiocb->ki_nbytes, 1, &iovstack,
+ &kiocb->ki_iovec, 1);
+ if (ret < 0)
+ goto out;
+@@ -1481,6 +1482,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+ if (ret < 0)
+ goto out;
+
++ if (kiocb->ki_iovec == &iovstack) {
++ kiocb->ki_inline_vec = iovstack;
++ kiocb->ki_iovec = &kiocb->ki_inline_vec;
++ }
+ kiocb->ki_nr_segs = kiocb->ki_nbytes;
+ kiocb->ki_cur_seg = 0;
+ /* ki_nbytes/left now reflect bytes instead of segs */
+diff --git a/fs/attr.c b/fs/attr.c
+index b8f55c4..4c2b80c 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
+ unsigned long limit;
+
+ limit = rlimit(RLIMIT_FSIZE);
++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+diff --git a/fs/autofs4/init.c b/fs/autofs4/init.c
+index c038727..4ba2927 100644
+--- a/fs/autofs4/init.c
++++ b/fs/autofs4/init.c
+@@ -26,6 +26,7 @@ static struct file_system_type autofs_fs_type = {
+ .mount = autofs_mount,
+ .kill_sb = autofs4_kill_sb,
+ };
++MODULE_ALIAS_FS("autofs");
+
+ static int __init init_autofs4_fs(void)
+ {
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index e1fbdee..69291a4 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
+ {
+ unsigned long sigpipe, flags;
+ mm_segment_t fs;
+- const char *data = (const char *)addr;
++ const char __user *data = (const char __force_user *)addr;
+ ssize_t wr = 0;
+
+ /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
+@@ -338,6 +338,10 @@ static int validate_request(struct autofs_wait_queue **wait,
+ return 1;
+ }
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
++#endif
++
+ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
+ enum autofs_notify notify)
+ {
+@@ -371,7 +375,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
+
+ /* If this is a direct mount request create a dummy name */
+ if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ /* this name does get written to userland via autofs4_write() */
++ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
++#else
+ qstr.len = sprintf(name, "%p", dentry);
++#endif
+ else {
+ qstr.len = autofs4_getpath(sbi, dentry, &name);
+ if (!qstr.len) {
+diff --git a/fs/befs/endian.h b/fs/befs/endian.h
+index 2722387..c8dd2a7 100644
+--- a/fs/befs/endian.h
++++ b/fs/befs/endian.h
+@@ -11,7 +11,7 @@
+
+ #include <asm/byteorder.h>
+
+-static inline u64
++static inline u64 __intentional_overflow(-1)
+ fs64_to_cpu(const struct super_block *sb, fs64 n)
+ {
+ if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
+ return (__force fs64)cpu_to_be64(n);
+ }
+
+-static inline u32
++static inline u32 __intentional_overflow(-1)
+ fs32_to_cpu(const struct super_block *sb, fs32 n)
+ {
+ if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
+diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
+index 8342ca6..a9dca40 100644
+--- a/fs/befs/linuxvfs.c
++++ b/fs/befs/linuxvfs.c
+@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+ befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+ if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+@@ -937,6 +937,7 @@ static struct file_system_type befs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("befs");
+
+ static int __init
+ init_befs_fs(void)
+diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
+index 697af5b..ab6db36 100644
+--- a/fs/bfs/inode.c
++++ b/fs/bfs/inode.c
+@@ -470,6 +470,7 @@ static struct file_system_type bfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("bfs");
+
+ static int __init init_bfs_fs(void)
+ {
+diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
+index a6395bd..f1e376a 100644
+--- a/fs/binfmt_aout.c
++++ b/fs/binfmt_aout.c
+@@ -16,6 +16,7 @@
+ #include <linux/string.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+ #include <linux/ptrace.h>
+@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
+ #endif
+ # define START_STACK(u) ((void __user *)u.start_stack)
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
+
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
+ dump.u_dsize = 0;
+
+ /* Make sure we have enough room to write the stack and data areas. */
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
+ if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
+ dump.u_ssize = 0;
+
+@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ rlim = rlimit(RLIMIT_DATA);
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
++
++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+@@ -259,9 +266,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+ current->mm->free_area_cache = current->mm->mmap_base;
+ current->mm->cached_hole_size = 0;
+
++ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
++ if (retval < 0) {
++ /* Someone check-me: is this error path enough? */
++ send_sig(SIGKILL, current, 0);
++ return retval;
++ }
++
+ install_exec_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->mm->pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ if (N_MAGIC(ex) == OMAGIC) {
+ unsigned long text_addr, map_size;
+ loff_t pos;
+@@ -334,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+@@ -352,13 +387,6 @@ beyond_if:
+ return retval;
+ }
+
+- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+- if (retval < 0) {
+- /* Someone check-me: is this error path enough? */
+- send_sig(SIGKILL, current, 0);
+- return retval;
+- }
+-
+ current->mm->start_stack =
+ (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
+ #ifdef __alpha__
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 8dd615c..cb7cd01 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -32,6 +32,7 @@
+ #include <linux/elf.h>
+ #include <linux/utsname.h>
+ #include <linux/coredump.h>
++#include <linux/xattr.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+@@ -51,6 +52,14 @@ static int elf_core_dump(struct coredump_params *cprm);
+ #define elf_core_dump NULL
+ #endif
+
++#ifdef CONFIG_PAX_MPROTECT
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++static void elf_handle_mmap(struct file *file);
++#endif
++
+ #if ELF_EXEC_PAGESIZE > PAGE_SIZE
+ #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+ #else
+@@ -70,6 +79,15 @@ static struct linux_binfmt elf_format = {
+ .load_binary = load_elf_binary,
+ .load_shlib = load_elf_library,
+ .core_dump = elf_core_dump,
++
++#ifdef CONFIG_PAX_MPROTECT
++ .handle_mprotect= elf_handle_mprotect,
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ .handle_mmap = elf_handle_mmap,
++#endif
++
+ .min_coredump = ELF_EXEC_PAGESIZE,
+ };
+
+@@ -77,6 +95,8 @@ static struct linux_binfmt elf_format = {
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++ unsigned long e = end;
++
+ start = ELF_PAGEALIGN(start);
+ end = ELF_PAGEALIGN(end);
+ if (end > start) {
+@@ -87,7 +107,7 @@ static int set_brk(unsigned long start, unsigned long end)
+ if (BAD_ADDR(addr))
+ return addr;
+ }
+- current->mm->start_brk = current->mm->brk = end;
++ current->mm->start_brk = current->mm->brk = e;
+ return 0;
+ }
+
+@@ -148,12 +168,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+ elf_addr_t __user *u_rand_bytes;
+ const char *k_platform = ELF_PLATFORM;
+ const char *k_base_platform = ELF_BASE_PLATFORM;
+- unsigned char k_rand_bytes[16];
++ u32 k_rand_bytes[4];
+ int items;
+ elf_addr_t *elf_info;
+ int ei_index = 0;
+ const struct cred *cred = current_cred();
+ struct vm_area_struct *vma;
++ unsigned long saved_auxv[AT_VECTOR_SIZE];
+
+ /*
+ * In some cases (e.g. Hyper-Threading), we want to avoid L1
+@@ -195,8 +216,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+ * Generate 16 random bytes for userspace PRNG seeding.
+ */
+ get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+- u_rand_bytes = (elf_addr_t __user *)
+- STACK_ALLOC(p, sizeof(k_rand_bytes));
++ srandom32(k_rand_bytes[0] ^ random32());
++ srandom32(k_rand_bytes[1] ^ random32());
++ srandom32(k_rand_bytes[2] ^ random32());
++ srandom32(k_rand_bytes[3] ^ random32());
++ p = STACK_ROUND(p, sizeof(k_rand_bytes));
++ u_rand_bytes = (elf_addr_t __user *) p;
+ if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+ return -EFAULT;
+
+@@ -308,9 +333,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+ return -EFAULT;
+ current->mm->env_end = p;
+
++ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
++
+ /* Put the elf_info on the stack in the right place. */
+ sp = (elf_addr_t __user *)envp + 1;
+- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
++ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
+ return -EFAULT;
+ return 0;
+ }
+@@ -376,15 +403,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
+ an ELF header */
+
+ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+- struct file *interpreter, unsigned long *interp_map_addr,
+- unsigned long no_base)
++ struct file *interpreter, unsigned long no_base)
+ {
+ struct elf_phdr *elf_phdata;
+ struct elf_phdr *eppnt;
+- unsigned long load_addr = 0;
++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
+ int load_addr_set = 0;
+ unsigned long last_bss = 0, elf_bss = 0;
+- unsigned long error = ~0UL;
++ unsigned long error = -EINVAL;
+ unsigned long total_size;
+ int retval, i, size;
+
+@@ -430,6 +456,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ goto out_close;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+ eppnt = elf_phdata;
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+@@ -453,8 +484,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ map_addr = elf_map(interpreter, load_addr + vaddr,
+ eppnt, elf_prot, elf_type, total_size);
+ total_size = 0;
+- if (!*interp_map_addr)
+- *interp_map_addr = map_addr;
+ error = map_addr;
+ if (BAD_ADDR(map_addr))
+ goto out_close;
+@@ -473,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ k = load_addr + eppnt->p_vaddr;
+ if (BAD_ADDR(k) ||
+ eppnt->p_filesz > eppnt->p_memsz ||
+- eppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - eppnt->p_memsz < k) {
++ eppnt->p_memsz > pax_task_size ||
++ pax_task_size - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+@@ -513,11 +542,13 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+
+ /* Map the last of the bss segment */
+- down_write(&current->mm->mmap_sem);
+- error = do_brk(elf_bss, last_bss - elf_bss);
+- up_write(&current->mm->mmap_sem);
+- if (BAD_ADDR(error))
+- goto out_close;
++ if (last_bss > elf_bss) {
++ down_write(&current->mm->mmap_sem);
++ error = do_brk(elf_bss, last_bss - elf_bss);
++ up_write(&current->mm->mmap_sem);
++ if (BAD_ADDR(error))
++ goto out_close;
++ }
+ }
+
+ error = load_addr;
+@@ -528,6 +559,315 @@ out:
+ return error;
+ }
+
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++#ifdef CONFIG_PAX_SOFTMODE
++static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++#ifdef CONFIG_PAX_SOFTMODE
++static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (pax_flags_softmode & MF_PAX_MPROTECT)
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_EI_PAX
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= MF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#else
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ pax_flags |= MF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_flags |= MF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ pax_flags |= MF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (randomize_va_space)
++ pax_flags |= MF_PAX_RANDMMAP;
++#endif
++
++#endif
++
++ return pax_flags;
++}
++
++static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
++ return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
++ break;
++ }
++#endif
++
++ return ~0UL;
++}
++
++static unsigned long pax_parse_xattr_pax(struct file * const file)
++{
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ ssize_t xattr_size, i;
++ unsigned char xattr_value[sizeof("pemrs") - 1];
++ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
++
++ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
++ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
++ return ~0UL;
++
++ for (i = 0; i < xattr_size; i++)
++ switch (xattr_value[i]) {
++ default:
++ return ~0UL;
++
++#define parse_flag(option1, option2, flag) \
++ case option1: \
++ if (pax_flags_hardmode & MF_PAX_##flag) \
++ return ~0UL; \
++ pax_flags_hardmode |= MF_PAX_##flag; \
++ break; \
++ case option2: \
++ if (pax_flags_softmode & MF_PAX_##flag) \
++ return ~0UL; \
++ pax_flags_softmode |= MF_PAX_##flag; \
++ break;
++
++ parse_flag('p', 'P', PAGEEXEC);
++ parse_flag('e', 'E', EMUTRAMP);
++ parse_flag('m', 'M', MPROTECT);
++ parse_flag('r', 'R', RANDMMAP);
++ parse_flag('s', 'S', SEGMEXEC);
++
++#undef parse_flag
++ }
++
++ if (pax_flags_hardmode & pax_flags_softmode)
++ return ~0UL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
++ else
++#endif
++
++ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
++#else
++ return ~0UL;
++#endif
++
++}
++
++static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
++{
++ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
++
++ pax_flags = pax_parse_ei_pax(elf_ex);
++ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
++ xattr_pax_flags = pax_parse_xattr_pax(file);
++
++ if (pt_pax_flags == ~0UL)
++ pt_pax_flags = xattr_pax_flags;
++ else if (xattr_pax_flags == ~0UL)
++ xattr_pax_flags = pt_pax_flags;
++ if (pt_pax_flags != xattr_pax_flags)
++ return -EINVAL;
++ if (pt_pax_flags != ~0UL)
++ pax_flags = pt_pax_flags;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ if ((__supported_pte_mask & _PAGE_NX))
++ pax_flags &= ~MF_PAX_SEGMEXEC;
++ else
++ pax_flags &= ~MF_PAX_PAGEEXEC;
++ }
++#endif
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->mm->pax_flags = pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -544,6 +884,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+ unsigned int random_variable = 0;
+
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++ return stack_top - current->mm->delta_stack;
++#endif
++
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
+ random_variable = get_random_int() & STACK_RND_MASK;
+@@ -562,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ unsigned long load_addr = 0, load_bias = 0;
+ int load_addr_set = 0;
+ char * elf_interpreter = NULL;
+- unsigned long error;
++ unsigned long error = 0;
+ struct elf_phdr *elf_ppnt, *elf_phdata;
+ unsigned long elf_bss, elf_brk;
+ int retval, i;
+@@ -572,11 +917,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long reloc_func_desc __maybe_unused = 0;
+ int executable_stack = EXSTACK_DEFAULT;
+- unsigned long def_flags = 0;
+ struct {
+ struct elfhdr elf_ex;
+ struct elfhdr interp_elf_ex;
+ } *loc;
++ unsigned long pax_task_size;
+
+ loc = kmalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+@@ -713,11 +1058,82 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+
+ /* OK, This is the point of no return */
+ current->flags &= ~PF_FORKNOEXEC;
+- current->mm->def_flags = def_flags;
++ current->mm->def_flags = 0;
+
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ current->mm->pax_flags = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_initial_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_initial_flags_func)
++ (pax_set_initial_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
++ current->mm->context.user_cs_limit = PAGE_SIZE;
++ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
++ }
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++ current->mm->def_flags |= VM_NOHUGEPAGE;
++ } else
++#endif
++
++ pax_task_size = TASK_SIZE;
++
++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
++ put_cpu();
++ }
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ executable_stack = EXSTACK_DISABLE_X;
++ current->personality &= ~READ_IMPLIES_EXEC;
++ } else
++#endif
++
+ if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+@@ -808,6 +1224,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
++#ifdef CONFIG_SPARC64
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
++#else
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
++#endif
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
++ elf_flags |= MAP_FIXED;
++ }
++#endif
++
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+@@ -840,9 +1270,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsz so it is only necessary to check p_memsz.
+ */
+- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+- elf_ppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - elf_ppnt->p_memsz < k) {
++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++ elf_ppnt->p_memsz > pax_task_size ||
++ pax_task_size - elf_ppnt->p_memsz < k) {
+ /* set_brk can never work. Avoid overflows. */
+ send_sig(SIGKILL, current, 0);
+ retval = -EINVAL;
+@@ -881,17 +1311,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ goto out_free_dentry;
+ }
+ if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
+- send_sig(SIGSEGV, current, 0);
+- retval = -EFAULT; /* Nobody gets to see this, but.. */
+- goto out_free_dentry;
++ /*
++ * This bss-zeroing can fail if the ELF
++ * file specifies odd protections. So
++ * we don't check the return value
++ */
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
++ unsigned long start, size, flags, vm_flags;
++
++ start = ELF_PAGEALIGN(elf_brk);
++ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
++ flags = MAP_FIXED | MAP_PRIVATE;
++ vm_flags = VM_DONTEXPAND | VM_RESERVED;
++
++ down_write(&current->mm->mmap_sem);
++ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
++ retval = -ENOMEM;
++ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
++// if (current->personality & ADDR_NO_RANDOMIZE)
++// vm_flags |= VM_READ | VM_MAYREAD;
++ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
++ retval = IS_ERR_VALUE(start) ? start : 0;
++ }
++ up_write(&current->mm->mmap_sem);
++ if (retval == 0)
++ retval = set_brk(start + size, start + size + PAGE_SIZE);
++ if (retval < 0) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++ }
++#endif
++
+ if (elf_interpreter) {
+- unsigned long uninitialized_var(interp_map_addr);
+-
+ elf_entry = load_elf_interp(&loc->interp_elf_ex,
+ interpreter,
+- &interp_map_addr,
+ load_bias);
+ if (!IS_ERR((void *)elf_entry)) {
+ /*
+@@ -1098,7 +1555,7 @@ out:
+ * Decide what to dump of a segment, part, all or none.
+ */
+ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+- unsigned long mm_flags)
++ unsigned long mm_flags, long signr)
+ {
+ #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
+
+@@ -1132,7 +1589,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+ if (vma->vm_file == NULL)
+ return 0;
+
+- if (FILTER(MAPPED_PRIVATE))
++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
+ goto whole;
+
+ /*
+@@ -1354,9 +1811,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+ {
+ elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
+ int i = 0;
+- do
++ do {
+ i += 2;
+- while (auxv[i - 2] != AT_NULL);
++ } while (auxv[i - 2] != AT_NULL);
+ fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ }
+
+@@ -1851,14 +2308,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+ }
+
+ static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
+- unsigned long mm_flags)
++ struct coredump_params *cprm)
+ {
+ struct vm_area_struct *vma;
+ size_t size = 0;
+
+ for (vma = first_vma(current, gate_vma); vma != NULL;
+ vma = next_vma(vma, gate_vma))
+- size += vma_dump_size(vma, mm_flags);
++ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+ return size;
+ }
+
+@@ -1952,7 +2409,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+
+ dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+
+- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
++ offset += elf_core_vma_data_size(gate_vma, cprm);
+ offset += elf_core_extra_data_size();
+ e_shoff = offset;
+
+@@ -1966,10 +2423,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+ offset = dataoff;
+
+ size += sizeof(*elf);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+ goto end_coredump;
+
+ size += sizeof(*phdr4note);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit
+ || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+ goto end_coredump;
+@@ -1983,7 +2442,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ phdr.p_offset = offset;
+ phdr.p_vaddr = vma->vm_start;
+ phdr.p_paddr = 0;
+- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+ phdr.p_memsz = vma->vm_end - vma->vm_start;
+ offset += phdr.p_filesz;
+ phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+@@ -1994,6 +2453,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ phdr.p_align = ELF_EXEC_PAGESIZE;
+
+ size += sizeof(phdr);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit
+ || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+ goto end_coredump;
+@@ -2018,7 +2478,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ unsigned long addr;
+ unsigned long end;
+
+- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
+
+ for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
+ struct page *page;
+@@ -2027,6 +2487,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ page = get_dump_page(addr);
+ if (page) {
+ void *kaddr = kmap(page);
++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
+ stop = ((size += PAGE_SIZE) > cprm->limit) ||
+ !dump_write(cprm->file, kaddr,
+ PAGE_SIZE);
+@@ -2044,6 +2505,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+
+ if (e_phnum == PN_XNUM) {
+ size += sizeof(*shdr4extnum);
++ gr_learn_resource(current, RLIMIT_CORE, size, 1);
+ if (size > cprm->limit
+ || !dump_write(cprm->file, shdr4extnum,
+ sizeof(*shdr4extnum)))
+@@ -2064,6 +2526,167 @@ out:
+
+ #endif /* CONFIG_ELF_CORE */
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
++ * we'll remove VM_MAYWRITE for good on RELRO segments.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p;
++ unsigned long i;
++ unsigned long oldflags;
++ bool is_textrel_rw, is_textrel_rx, is_relro;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
++ return;
++
++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
++
++#ifdef CONFIG_PAX_ELFRELOCS
++ /* possible TEXTREL */
++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++#else
++ is_textrel_rw = false;
++ is_textrel_rx = false;
++#endif
++
++ /* possible RELRO */
++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++
++ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
++ return;
++
++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++#else
++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
++#endif
++
++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++ return;
++ switch (elf_p.p_type) {
++ case PT_DYNAMIC:
++ if (!is_textrel_rw && !is_textrel_rx)
++ continue;
++ i = 0UL;
++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
++ elf_dyn dyn;
++
++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
++ break;
++ if (dyn.d_tag == DT_NULL)
++ break;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ gr_log_textrel(vma);
++ if (is_textrel_rw)
++ vma->vm_flags |= VM_MAYWRITE;
++ else
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ vma->vm_flags &= ~VM_MAYWRITE;
++ break;
++ }
++ i++;
++ }
++ is_textrel_rw = false;
++ is_textrel_rx = false;
++ continue;
++
++ case PT_GNU_RELRO:
++ if (!is_relro)
++ continue;
++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
++ vma->vm_flags &= ~VM_MAYWRITE;
++ is_relro = false;
++ continue;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ case PT_PAX_FLAGS: {
++ const char *msg_mprotect = "", *msg_emutramp = "";
++ char *buffer_lib, *buffer_exe;
++
++ if (elf_p.p_flags & PF_NOMPROTECT)
++ msg_mprotect = "MPROTECT disabled";
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
++ msg_emutramp = "EMUTRAMP enabled";
++#endif
++
++ if (!msg_mprotect[0] && !msg_emutramp[0])
++ continue;
++
++ if (!printk_ratelimit())
++ continue;
++
++ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
++ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
++ if (buffer_lib && buffer_exe) {
++ char *path_lib, *path_exe;
++
++ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
++ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
++
++ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
++ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
++
++ }
++ free_page((unsigned long)buffer_exe);
++ free_page((unsigned long)buffer_lib);
++ continue;
++ }
++#endif
++
++ }
++ }
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++
++extern int grsec_enable_log_rwxmaps;
++
++static void elf_handle_mmap(struct file *file)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p;
++ unsigned long i;
++
++ if (!grsec_enable_log_rwxmaps)
++ return;
++
++ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++ return;
++ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
++ gr_log_ptgnustack(file);
++ }
++}
++#endif
++
+ static int __init init_elf_binfmt(void)
+ {
+ return register_binfmt(&elf_format);
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index 1bffbe0..c8c283e 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
+ realdatastart = (unsigned long) -ENOMEM;
+ printk("Unable to allocate RAM for process data, errno %d\n",
+ (int)-realdatastart);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
++ up_write(&current->mm->mmap_sem);
+ ret = realdatastart;
+ goto err;
+ }
+@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
+ }
+ if (IS_ERR_VALUE(result)) {
+ printk("Unable to read data+bss, errno %d\n", (int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
+ do_munmap(current->mm, realdatastart, len);
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
+ }
+ if (IS_ERR_VALUE(result)) {
+ printk("Unable to read code+data+bss, errno %d\n",(int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len + data_len + extra +
+ MAX_SHARED_LIBS * sizeof(unsigned long));
++ up_write(&current->mm->mmap_sem);
+ ret = result;
+ goto err;
+ }
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 7423cb9..9379ddd 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -719,6 +719,7 @@ static struct file_system_type bm_fs_type = {
+ .mount = bm_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("binfmt_misc");
+
+ static int __init init_misc_binfmt(void)
+ {
+diff --git a/fs/bio.c b/fs/bio.c
+index b84d851..0dd5077 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -848,7 +848,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+ /*
+ * Overflow, abort
+ */
+- if (end < start)
++ if (end < start || end - start > INT_MAX - nr_pages)
+ return ERR_PTR(-EINVAL);
+
+ nr_pages += end - start;
+@@ -982,7 +982,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
+ /*
+ * Overflow, abort
+ */
+- if (end < start)
++ if (end < start || end - start > INT_MAX - nr_pages)
+ return ERR_PTR(-EINVAL);
+
+ nr_pages += end - start;
+@@ -1244,7 +1244,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
+ const int read = bio_data_dir(bio) == READ;
+ struct bio_map_data *bmd = bio->bi_private;
+ int i;
+- char *p = bmd->sgvecs[0].iov_base;
++ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index c103267..260cbd9 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -690,7 +690,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+ else if (bdev->bd_contains == bdev)
+ return true; /* is a whole device which isn't held */
+
+- else if (whole->bd_holder == bd_may_claim)
++ else if (whole->bd_holder == (void *)bd_may_claim)
+ return true; /* is a partition of a device that is being partitioned */
+ else if (whole->bd_holder != NULL)
+ return false; /* is a partition of a held device */
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index dede441..f2a2507 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ free_extent_buffer(buf);
+ add_root_to_dirty_list(root);
+ } else {
+- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+- parent_start = parent->start;
+- else
++ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
++ if (parent)
++ parent_start = parent->start;
++ else
++ parent_start = 0;
++ } else
+ parent_start = 0;
+
+ WARN_ON(trans->transid != btrfs_header_generation(parent));
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 49eefdb..547693e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -5642,7 +5642,7 @@ again:
+
+ if (ret == -ENOSPC && num_bytes > min_alloc_size) {
+ num_bytes = num_bytes >> 1;
+- num_bytes = num_bytes & ~(root->sectorsize - 1);
++ num_bytes = num_bytes & ~((u64)root->sectorsize - 1);
+ num_bytes = max(num_bytes, min_alloc_size);
+ do_chunk_alloc(trans, root->fs_info->extent_root,
+ num_bytes, data, CHUNK_ALLOC_FORCE);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 618ae6f..118fe0c 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+ for (i = 0; i < num_types; i++) {
+ struct btrfs_space_info *tmp;
+
++ /* Don't copy in more than we allocated */
+ if (!slot_count)
+ break;
+
++ slot_count--;
++
+ info = NULL;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+ memcpy(dest, &space, sizeof(space));
+ dest++;
+ space_args.total_spaces++;
+- slot_count--;
+ }
+- if (!slot_count)
+- break;
+ }
+ up_read(&info->groups_sem);
+ }
+
+- user_dest = (struct btrfs_ioctl_space_info *)
++ user_dest = (struct btrfs_ioctl_space_info __user *)
+ (arg + sizeof(struct btrfs_ioctl_space_args));
+
+ if (copy_to_user(user_dest, dest_orig, alloc_size))
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index cfb5543..1ae7347 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+
+- BUG_ON((struct btrfs_root *)node->data != root);
++ BUG_ON(!node || (struct btrfs_root *)node->data != root);
+
+ if (!del) {
+ spin_lock(&rc->reloc_root_tree.lock);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index ddf2c90..37afd35 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -348,7 +348,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
+ ret < 0 ? -1 : ref_level,
+ ret < 0 ? -1 : ref_root);
+ } while (ret != 1);
++ btrfs_release_path(path);
+ } else {
++ btrfs_release_path(path);
+ swarn.path = path;
+ iterate_extent_inodes(fs_info, path, found_key.objectid,
+ extent_offset,
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 200f63b..490b833 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1227,6 +1227,7 @@ static struct file_system_type btrfs_fs_type = {
+ .kill_sb = kill_anon_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("btrfs");
+
+ /*
+ * used by btrfsctl to scan devices when no FS is mounted
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 19a4f0b..6638f5c 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -3314,7 +3314,7 @@ void __init buffer_init(void)
+ bh_cachep = kmem_cache_create("buffer_head",
+ sizeof(struct buffer_head), 0,
+ (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+- SLAB_MEM_SPREAD),
++ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
+ NULL);
+
+ /*
+diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
+index 622f469..e8d2d55 100644
+--- a/fs/cachefiles/bind.c
++++ b/fs/cachefiles/bind.c
+@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
+ args);
+
+ /* start by checking things over */
+- ASSERT(cache->fstop_percent >= 0 &&
+- cache->fstop_percent < cache->fcull_percent &&
++ ASSERT(cache->fstop_percent < cache->fcull_percent &&
+ cache->fcull_percent < cache->frun_percent &&
+ cache->frun_percent < 100);
+
+- ASSERT(cache->bstop_percent >= 0 &&
+- cache->bstop_percent < cache->bcull_percent &&
++ ASSERT(cache->bstop_percent < cache->bcull_percent &&
+ cache->bcull_percent < cache->brun_percent &&
+ cache->brun_percent < 100);
+
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index 0a1467b..6a53245 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
+ if (n > buflen)
+ return -EMSGSIZE;
+
+- if (copy_to_user(_buffer, buffer, n) != 0)
++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
+ return -EFAULT;
+
+ return n;
+@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
+ if (test_bit(CACHEFILES_DEAD, &cache->flags))
+ return -EIO;
+
+- if (datalen < 0 || datalen > PAGE_SIZE - 1)
++ if (datalen > PAGE_SIZE - 1)
+ return -EOPNOTSUPP;
+
+ /* drag the command string into the kernel so we can parse it */
+@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+- if (fstop < 0 || fstop >= cache->fcull_percent)
++ if (fstop >= cache->fcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->fstop_percent = fstop;
+@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+- if (bstop < 0 || bstop >= cache->bcull_percent)
++ if (bstop >= cache->bcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->bstop_percent = bstop;
+diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
+index bd6bc1b..b627b53 100644
+--- a/fs/cachefiles/internal.h
++++ b/fs/cachefiles/internal.h
+@@ -57,7 +57,7 @@ struct cachefiles_cache {
+ wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
+ struct rb_root active_nodes; /* active nodes (can't be culled) */
+ rwlock_t active_lock; /* lock for active_nodes */
+- atomic_t gravecounter; /* graveyard uniquifier */
++ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
+ unsigned frun_percent; /* when to stop culling (% files) */
+ unsigned fcull_percent; /* when to start culling (% files) */
+ unsigned fstop_percent; /* when to stop allocating (% files) */
+@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
+ * proc.c
+ */
+ #ifdef CONFIG_CACHEFILES_HISTOGRAM
+-extern atomic_t cachefiles_lookup_histogram[HZ];
+-extern atomic_t cachefiles_mkdir_histogram[HZ];
+-extern atomic_t cachefiles_create_histogram[HZ];
++extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++extern atomic_unchecked_t cachefiles_create_histogram[HZ];
+
+ extern int __init cachefiles_proc_init(void);
+ extern void cachefiles_proc_cleanup(void);
+ static inline
+-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
++void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
+ {
+ unsigned long jif = jiffies - start_jif;
+ if (jif >= HZ)
+ jif = HZ - 1;
+- atomic_inc(&histogram[jif]);
++ atomic_inc_unchecked(&histogram[jif]);
+ }
+
+ #else
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index a0358c2..d6137f2 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -318,7 +318,7 @@ try_again:
+ /* first step is to make up a grave dentry in the graveyard */
+ sprintf(nbuffer, "%08x%08x",
+ (uint32_t) get_seconds(),
+- (uint32_t) atomic_inc_return(&cache->gravecounter));
++ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
+
+ /* do the multiway lock magic */
+ trap = lock_rename(cache->graveyard, dir);
+diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
+index eccd339..4c1d995 100644
+--- a/fs/cachefiles/proc.c
++++ b/fs/cachefiles/proc.c
+@@ -14,9 +14,9 @@
+ #include <linux/seq_file.h>
+ #include "internal.h"
+
+-atomic_t cachefiles_lookup_histogram[HZ];
+-atomic_t cachefiles_mkdir_histogram[HZ];
+-atomic_t cachefiles_create_histogram[HZ];
++atomic_unchecked_t cachefiles_lookup_histogram[HZ];
++atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
++atomic_unchecked_t cachefiles_create_histogram[HZ];
+
+ /*
+ * display the latency histogram
+@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
+ return 0;
+ default:
+ index = (unsigned long) v - 3;
+- x = atomic_read(&cachefiles_lookup_histogram[index]);
+- y = atomic_read(&cachefiles_mkdir_histogram[index]);
+- z = atomic_read(&cachefiles_create_histogram[index]);
++ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
++ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
++ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
+ if (x == 0 && y == 0 && z == 0)
+ return 0;
+
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index b4d2438..0935840 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = file->f_op->write(
+- file, (const void __user *) data, len, &pos);
++ file, (const void __force_user *) data, len, &pos);
+ set_fs(old_fs);
+ kunmap(page);
+ if (ret != len)
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 9895400..78a67e7 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ unsigned frag = fpos_frag(filp->f_pos);
+- int off = fpos_off(filp->f_pos);
++ unsigned int off = fpos_off(filp->f_pos);
+ int err;
+ u32 ftype;
+ struct ceph_mds_reply_info_parsed *rinfo;
+@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
+ if (nd &&
+ (nd->flags & LOOKUP_OPEN) &&
+ !(nd->intent.open.flags & O_CREAT)) {
+- int mode = nd->intent.open.create_mode & ~current->fs->umask;
++ int mode = nd->intent.open.create_mode & ~current_umask();
+ return ceph_lookup_open(dir, dentry, nd, mode, 1);
+ }
+
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index de268a8..2a158be 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -785,7 +785,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
+ /*
+ * construct our own bdi so we can control readahead, etc.
+ */
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+
+ static int ceph_register_bdi(struct super_block *sb,
+ struct ceph_fs_client *fsc)
+@@ -802,7 +802,7 @@ static int ceph_register_bdi(struct super_block *sb,
+ default_backing_dev_info.ra_pages;
+
+ err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
+- atomic_long_inc_return(&bdi_seq));
++ atomic_long_inc_return_unchecked(&bdi_seq));
+ if (!err)
+ sb->s_bdi = &fsc->backing_dev_info;
+ return err;
+@@ -901,6 +901,7 @@ static struct file_system_type ceph_fs_type = {
+ .kill_sb = ceph_kill_sb,
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
+ };
++MODULE_ALIAS_FS("ceph");
+
+ #define _STRINGIFY(x) #x
+ #define STRINGIFY(x) _STRINGIFY(x)
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 84e8c07..6170d31 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+
+ if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_set(&totBufAllocCount, 0);
+- atomic_set(&totSmBufAllocCount, 0);
++ atomic_set_unchecked(&totBufAllocCount, 0);
++ atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(tmp1, &cifs_tcp_ses_list) {
+@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ tcon = list_entry(tmp3,
+ struct cifs_tcon,
+ tcon_list);
+- atomic_set(&tcon->num_smbs_sent, 0);
+- atomic_set(&tcon->num_writes, 0);
+- atomic_set(&tcon->num_reads, 0);
+- atomic_set(&tcon->num_oplock_brks, 0);
+- atomic_set(&tcon->num_opens, 0);
+- atomic_set(&tcon->num_posixopens, 0);
+- atomic_set(&tcon->num_posixmkdirs, 0);
+- atomic_set(&tcon->num_closes, 0);
+- atomic_set(&tcon->num_deletes, 0);
+- atomic_set(&tcon->num_mkdirs, 0);
+- atomic_set(&tcon->num_rmdirs, 0);
+- atomic_set(&tcon->num_renames, 0);
+- atomic_set(&tcon->num_t2renames, 0);
+- atomic_set(&tcon->num_ffirst, 0);
+- atomic_set(&tcon->num_fnext, 0);
+- atomic_set(&tcon->num_fclose, 0);
+- atomic_set(&tcon->num_hardlinks, 0);
+- atomic_set(&tcon->num_symlinks, 0);
+- atomic_set(&tcon->num_locks, 0);
++ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
++ atomic_set_unchecked(&tcon->num_writes, 0);
++ atomic_set_unchecked(&tcon->num_reads, 0);
++ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
++ atomic_set_unchecked(&tcon->num_opens, 0);
++ atomic_set_unchecked(&tcon->num_posixopens, 0);
++ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
++ atomic_set_unchecked(&tcon->num_closes, 0);
++ atomic_set_unchecked(&tcon->num_deletes, 0);
++ atomic_set_unchecked(&tcon->num_mkdirs, 0);
++ atomic_set_unchecked(&tcon->num_rmdirs, 0);
++ atomic_set_unchecked(&tcon->num_renames, 0);
++ atomic_set_unchecked(&tcon->num_t2renames, 0);
++ atomic_set_unchecked(&tcon->num_ffirst, 0);
++ atomic_set_unchecked(&tcon->num_fnext, 0);
++ atomic_set_unchecked(&tcon->num_fclose, 0);
++ atomic_set_unchecked(&tcon->num_hardlinks, 0);
++ atomic_set_unchecked(&tcon->num_symlinks, 0);
++ atomic_set_unchecked(&tcon->num_locks, 0);
+ }
+ }
+ }
+@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ smBufAllocCount.counter, cifs_min_small);
+ #ifdef CONFIG_CIFS_STATS2
+ seq_printf(m, "Total Large %d Small %d Allocations\n",
+- atomic_read(&totBufAllocCount),
+- atomic_read(&totSmBufAllocCount));
++ atomic_read_unchecked(&totBufAllocCount),
++ atomic_read_unchecked(&totSmBufAllocCount));
+ #endif /* CONFIG_CIFS_STATS2 */
+
+ seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
+@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+ seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
+- atomic_read(&tcon->num_smbs_sent),
+- atomic_read(&tcon->num_oplock_brks));
++ atomic_read_unchecked(&tcon->num_smbs_sent),
++ atomic_read_unchecked(&tcon->num_oplock_brks));
+ seq_printf(m, "\nReads: %d Bytes: %lld",
+- atomic_read(&tcon->num_reads),
++ atomic_read_unchecked(&tcon->num_reads),
+ (long long)(tcon->bytes_read));
+ seq_printf(m, "\nWrites: %d Bytes: %lld",
+- atomic_read(&tcon->num_writes),
++ atomic_read_unchecked(&tcon->num_writes),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nFlushes: %d",
+- atomic_read(&tcon->num_flushes));
++ atomic_read_unchecked(&tcon->num_flushes));
+ seq_printf(m, "\nLocks: %d HardLinks: %d "
+ "Symlinks: %d",
+- atomic_read(&tcon->num_locks),
+- atomic_read(&tcon->num_hardlinks),
+- atomic_read(&tcon->num_symlinks));
++ atomic_read_unchecked(&tcon->num_locks),
++ atomic_read_unchecked(&tcon->num_hardlinks),
++ atomic_read_unchecked(&tcon->num_symlinks));
+ seq_printf(m, "\nOpens: %d Closes: %d "
+ "Deletes: %d",
+- atomic_read(&tcon->num_opens),
+- atomic_read(&tcon->num_closes),
+- atomic_read(&tcon->num_deletes));
++ atomic_read_unchecked(&tcon->num_opens),
++ atomic_read_unchecked(&tcon->num_closes),
++ atomic_read_unchecked(&tcon->num_deletes));
+ seq_printf(m, "\nPosix Opens: %d "
+ "Posix Mkdirs: %d",
+- atomic_read(&tcon->num_posixopens),
+- atomic_read(&tcon->num_posixmkdirs));
++ atomic_read_unchecked(&tcon->num_posixopens),
++ atomic_read_unchecked(&tcon->num_posixmkdirs));
+ seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+- atomic_read(&tcon->num_mkdirs),
+- atomic_read(&tcon->num_rmdirs));
++ atomic_read_unchecked(&tcon->num_mkdirs),
++ atomic_read_unchecked(&tcon->num_rmdirs));
+ seq_printf(m, "\nRenames: %d T2 Renames %d",
+- atomic_read(&tcon->num_renames),
+- atomic_read(&tcon->num_t2renames));
++ atomic_read_unchecked(&tcon->num_renames),
++ atomic_read_unchecked(&tcon->num_t2renames));
+ seq_printf(m, "\nFindFirst: %d FNext %d "
+ "FClose %d",
+- atomic_read(&tcon->num_ffirst),
+- atomic_read(&tcon->num_fnext),
+- atomic_read(&tcon->num_fclose));
++ atomic_read_unchecked(&tcon->num_ffirst),
++ atomic_read_unchecked(&tcon->num_fnext),
++ atomic_read_unchecked(&tcon->num_fclose));
+ }
+ }
+ }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 25bb97f..a0095de 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -799,6 +799,7 @@ struct file_system_type cifs_fs_type = {
+ .kill_sb = cifs_kill_sb,
+ /* .fs_flags */
+ };
++MODULE_ALIAS_FS("cifs");
+ const struct inode_operations cifs_dir_inode_ops = {
+ .create = cifs_create,
+ .lookup = cifs_lookup,
+@@ -1018,7 +1019,7 @@ cifs_init_request_bufs(void)
+ cifs_req_cachep = kmem_cache_create("cifs_request",
+ CIFSMaxBufSize +
+ MAX_CIFS_HDR_SIZE, 0,
+- SLAB_HWCACHE_ALIGN, NULL);
++ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
+ if (cifs_req_cachep == NULL)
+ return -ENOMEM;
+
+@@ -1045,7 +1046,7 @@ cifs_init_request_bufs(void)
+ efficient to alloc 1 per page off the slab compared to 17K (5page)
+ alloc of large cifs buffers even when page debugging is on */
+ cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
+- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
++ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
+ NULL);
+ if (cifs_sm_req_cachep == NULL) {
+ mempool_destroy(cifs_req_poolp);
+@@ -1130,8 +1131,8 @@ init_cifs(void)
+ atomic_set(&bufAllocCount, 0);
+ atomic_set(&smBufAllocCount, 0);
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_set(&totBufAllocCount, 0);
+- atomic_set(&totSmBufAllocCount, 0);
++ atomic_set_unchecked(&totBufAllocCount, 0);
++ atomic_set_unchecked(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
+
+ atomic_set(&midCount, 0);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 7b68088..17a275b 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -390,28 +390,28 @@ struct cifs_tcon {
+ __u16 Flags; /* optional support bits */
+ enum statusEnum tidStatus;
+ #ifdef CONFIG_CIFS_STATS
+- atomic_t num_smbs_sent;
+- atomic_t num_writes;
+- atomic_t num_reads;
+- atomic_t num_flushes;
+- atomic_t num_oplock_brks;
+- atomic_t num_opens;
+- atomic_t num_closes;
+- atomic_t num_deletes;
+- atomic_t num_mkdirs;
+- atomic_t num_posixopens;
+- atomic_t num_posixmkdirs;
+- atomic_t num_rmdirs;
+- atomic_t num_renames;
+- atomic_t num_t2renames;
+- atomic_t num_ffirst;
+- atomic_t num_fnext;
+- atomic_t num_fclose;
+- atomic_t num_hardlinks;
+- atomic_t num_symlinks;
+- atomic_t num_locks;
+- atomic_t num_acl_get;
+- atomic_t num_acl_set;
++ atomic_unchecked_t num_smbs_sent;
++ atomic_unchecked_t num_writes;
++ atomic_unchecked_t num_reads;
++ atomic_unchecked_t num_flushes;
++ atomic_unchecked_t num_oplock_brks;
++ atomic_unchecked_t num_opens;
++ atomic_unchecked_t num_closes;
++ atomic_unchecked_t num_deletes;
++ atomic_unchecked_t num_mkdirs;
++ atomic_unchecked_t num_posixopens;
++ atomic_unchecked_t num_posixmkdirs;
++ atomic_unchecked_t num_rmdirs;
++ atomic_unchecked_t num_renames;
++ atomic_unchecked_t num_t2renames;
++ atomic_unchecked_t num_ffirst;
++ atomic_unchecked_t num_fnext;
++ atomic_unchecked_t num_fclose;
++ atomic_unchecked_t num_hardlinks;
++ atomic_unchecked_t num_symlinks;
++ atomic_unchecked_t num_locks;
++ atomic_unchecked_t num_acl_get;
++ atomic_unchecked_t num_acl_set;
+ #ifdef CONFIG_CIFS_STATS2
+ unsigned long long time_writes;
+ unsigned long long time_reads;
+@@ -626,7 +626,7 @@ convert_delimiter(char *path, char delim)
+ }
+
+ #ifdef CONFIG_CIFS_STATS
+-#define cifs_stats_inc atomic_inc
++#define cifs_stats_inc atomic_inc_unchecked
+
+ static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
+ unsigned int bytes)
+@@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
+ /* Various Debug counters */
+ GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
+ #ifdef CONFIG_CIFS_STATS2
+-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
+-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
++GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
++GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
+ #endif
+ GLOBAL_EXTERN atomic_t smBufAllocCount;
+ GLOBAL_EXTERN atomic_t midCount;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index c55808e..c1814ab 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1690,10 +1690,14 @@ static int cifs_writepages(struct address_space *mapping,
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
+ } else {
+- index = wbc->range_start >> PAGE_CACHE_SHIFT;
+- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
++ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+ range_whole = true;
++ index = 0;
++ end = ULONG_MAX;
++ } else {
++ index = wbc->range_start >> PAGE_CACHE_SHIFT;
++ end = wbc->range_end >> PAGE_CACHE_SHIFT;
++ }
+ scanned = true;
+ }
+ retry:
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index 6b0e064..94e6c3c 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -600,7 +600,7 @@ symlink_exit:
+
+ void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
+ {
+- char *p = nd_get_link(nd);
++ const char *p = nd_get_link(nd);
+ if (!IS_ERR(p))
+ kfree(p);
+ }
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 703ef5c..2a44ed5 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -156,7 +156,7 @@ cifs_buf_get(void)
+ memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
+ atomic_inc(&bufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_inc(&totBufAllocCount);
++ atomic_inc_unchecked(&totBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+ }
+
+@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
+ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+ atomic_inc(&smBufAllocCount);
+ #ifdef CONFIG_CIFS_STATS2
+- atomic_inc(&totSmBufAllocCount);
++ atomic_inc_unchecked(&totSmBufAllocCount);
+ #endif /* CONFIG_CIFS_STATS2 */
+
+ }
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 52a820a..1d8ab03 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -86,14 +86,17 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+
+ dentry = d_lookup(parent, name);
+ if (dentry) {
++ int err;
+ inode = dentry->d_inode;
+ /* update inode in place if i_ino didn't change */
+ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ cifs_fattr_to_inode(inode, fattr);
+ return dentry;
+ }
+- d_drop(dentry);
++ err = d_invalidate(dentry);
+ dput(dentry);
++ if (err)
++ return NULL;
+ }
+
+ /*
+diff --git a/fs/coda/cache.c b/fs/coda/cache.c
+index 6901578..d402eb5 100644
+--- a/fs/coda/cache.c
++++ b/fs/coda/cache.c
+@@ -24,7 +24,7 @@
+ #include "coda_linux.h"
+ #include "coda_cache.h"
+
+-static atomic_t permission_epoch = ATOMIC_INIT(0);
++static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
+
+ /* replace or extend an acl cache hit */
+ void coda_cache_enter(struct inode *inode, int mask)
+@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
+ struct coda_inode_info *cii = ITOC(inode);
+
+ spin_lock(&cii->c_lock);
+- cii->c_cached_epoch = atomic_read(&permission_epoch);
++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
+ if (cii->c_uid != current_fsuid()) {
+ cii->c_uid = current_fsuid();
+ cii->c_cached_perm = mask;
+@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
+ {
+ struct coda_inode_info *cii = ITOC(inode);
+ spin_lock(&cii->c_lock);
+- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
++ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
+ spin_unlock(&cii->c_lock);
+ }
+
+ /* remove all acl caches */
+ void coda_cache_clear_all(struct super_block *sb)
+ {
+- atomic_inc(&permission_epoch);
++ atomic_inc_unchecked(&permission_epoch);
+ }
+
+
+@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
+ spin_lock(&cii->c_lock);
+ hit = (mask & cii->c_cached_perm) == mask &&
+ cii->c_uid == current_fsuid() &&
+- cii->c_cached_epoch == atomic_read(&permission_epoch);
++ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
+ spin_unlock(&cii->c_lock);
+
+ return hit;
+diff --git a/fs/coda/inode.c b/fs/coda/inode.c
+index 871b277..7dcf232 100644
+--- a/fs/coda/inode.c
++++ b/fs/coda/inode.c
+@@ -326,4 +326,5 @@ struct file_system_type coda_fs_type = {
+ .kill_sb = kill_anon_super,
+ .fs_flags = FS_BINARY_MOUNTDATA,
+ };
++MODULE_ALIAS_FS("coda");
+
+diff --git a/fs/compat.c b/fs/compat.c
+index 4bf082d..d33d8b7 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
+ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
+ {
+ compat_ino_t ino = stat->ino;
+- typeof(ubuf->st_uid) uid = 0;
+- typeof(ubuf->st_gid) gid = 0;
++ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
++ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
+ int err;
+
+ SET_UID(uid, stat->uid);
+@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
+
+ set_fs(KERNEL_DS);
+ /* The __user pointer cast is valid because of the set_fs() */
+- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
++ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
+ set_fs(oldfs);
+ /* truncating is ok because it's a user address */
+ if (!ret)
+@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
+ goto out;
+
+ ret = -EINVAL;
+- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
++ if (nr_segs > UIO_MAXIOV)
+ goto out;
+ if (nr_segs > fast_segs) {
+ ret = -ENOMEM;
+@@ -849,6 +849,7 @@ struct compat_old_linux_dirent {
+
+ struct compat_readdir_callback {
+ struct compat_old_linux_dirent __user *dirent;
++ struct file * file;
+ int result;
+ };
+
+@@ -866,6 +867,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
+ buf->result = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, dirent,
+@@ -898,6 +903,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
+
+ buf.result = 0;
+ buf.dirent = dirent;
++ buf.file = file;
+
+ error = vfs_readdir(file, compat_fillonedir, &buf);
+ if (buf.result)
+@@ -918,6 +924,7 @@ struct compat_linux_dirent {
+ struct compat_getdents_callback {
+ struct compat_linux_dirent __user *current_dir;
+ struct compat_linux_dirent __user *previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -939,6 +946,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
+ buf->error = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -986,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, compat_filldir, &buf);
+ if (error >= 0)
+@@ -1007,6 +1019,7 @@ out:
+ struct compat_getdents_callback64 {
+ struct linux_dirent64 __user *current_dir;
+ struct linux_dirent64 __user *previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -1023,6 +1036,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+
+ if (dirent) {
+@@ -1074,13 +1091,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, compat_filldir64, &buf);
+ if (error >= 0)
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+- typeof(lastdirent->d_off) d_off = file->f_pos;
++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
+ if (__put_user_unaligned(d_off, &lastdirent->d_off))
+ error = -EFAULT;
+ else
+diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
+index 112e45a..b59845b 100644
+--- a/fs/compat_binfmt_elf.c
++++ b/fs/compat_binfmt_elf.c
+@@ -30,11 +30,13 @@
+ #undef elf_phdr
+ #undef elf_shdr
+ #undef elf_note
++#undef elf_dyn
+ #undef elf_addr_t
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_shdr elf32_shdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+ #define elf_addr_t Elf32_Addr
+
+ /*
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index f854cf9..d513829 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
+ return -EFAULT;
+ if (__get_user(udata, &ss32->iomem_base))
+ return -EFAULT;
+- ss.iomem_base = compat_ptr(udata);
++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
+ if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+ __get_user(ss.port_high, &ss32->port_high))
+ return -EFAULT;
+@@ -704,8 +704,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
+ for (i = 0; i < nmsgs; i++) {
+ if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
+ return -EFAULT;
+- if (get_user(datap, &umsgs[i].buf) ||
+- put_user(compat_ptr(datap), &tmsgs[i].buf))
++ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
++ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
+ return -EFAULT;
+ }
+ return sys_ioctl(fd, cmd, (unsigned long)tdata);
+@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
+ copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
+ copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
+ copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
+- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
++ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
+ return -EFAULT;
+
+ return ioctl_preallocate(file, p);
+@@ -1646,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+ static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
+ {
+ unsigned int a, b;
+- a = *(unsigned int *)p;
+- b = *(unsigned int *)q;
++ a = *(const unsigned int *)p;
++ b = *(const unsigned int *)q;
+ if (a > b)
+ return 1;
+ if (a < b)
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 5ef72c8..0c72810 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -1059,10 +1059,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
+ static int configfs_depend_prep(struct dentry *origin,
+ struct config_item *target)
+ {
+- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
++ struct configfs_dirent *child_sd, *sd;
+ int ret = 0;
+
+- BUG_ON(!origin || !sd);
++ BUG_ON(!origin || !origin->d_fsdata);
++ sd = origin->d_fsdata;
+
+ if (sd->s_element == target) /* Boo-yah */
+ goto out;
+@@ -1587,7 +1588,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
+ }
+ for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+ struct configfs_dirent *next;
+- const char * name;
++ const unsigned char * name;
++ char d_name[sizeof(next->s_dentry->d_iname)];
+ int len;
+ struct inode *inode = NULL;
+
+@@ -1597,7 +1599,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
+ continue;
+
+ name = configfs_get_name(next);
+- len = strlen(name);
++ if (next->s_dentry && name == next->s_dentry->d_iname) {
++ len = next->s_dentry->d_name.len;
++ memcpy(d_name, name, len);
++ name = d_name;
++ } else
++ len = strlen(name);
+
+ /*
+ * We'll have a dentry and an inode for
+diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
+index 276e15c..aeac324 100644
+--- a/fs/configfs/mount.c
++++ b/fs/configfs/mount.c
+@@ -117,6 +117,7 @@ static struct file_system_type configfs_fs_type = {
+ .mount = configfs_do_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("configfs");
+
+ int configfs_pin_fs(void)
+ {
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index 739fb59..5385976 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -576,6 +576,7 @@ static struct file_system_type cramfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("cramfs");
+
+ static int __init init_cramfs_fs(void)
+ {
+diff --git a/fs/dcache.c b/fs/dcache.c
+index d322929..9f4b816 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -103,11 +103,11 @@ static unsigned int d_hash_shift __read_mostly;
+
+ static struct hlist_bl_head *dentry_hashtable __read_mostly;
+
+-static inline struct hlist_bl_head *d_hash(struct dentry *parent,
+- unsigned long hash)
++static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
++ unsigned int hash)
+ {
+- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
+- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
++ hash += (unsigned long) parent / L1_CACHE_BYTES;
++ hash = hash + (hash >> D_HASHBITS);
+ return dentry_hashtable + (hash & D_HASHMASK);
+ }
+
+@@ -3057,7 +3057,8 @@ void __init vfs_caches_init(unsigned long mempages)
+ mempages -= reserve;
+
+ names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
+- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
++ SLAB_NO_SANITIZE, NULL);
+
+ dcache_init();
+ inode_init();
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index fb001cd..95129c3 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -145,6 +145,7 @@ static struct file_system_type debug_fs_type = {
+ .mount = debug_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("debugfs");
+
+ static int debugfs_create_by_name(const char *name, mode_t mode,
+ struct dentry *parent,
+@@ -261,7 +262,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
+ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+ {
+ return debugfs_create_file(name,
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++ S_IFDIR | S_IRWXU,
++#else
+ S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
++#endif
+ parent, NULL, NULL);
+ }
+ EXPORT_SYMBOL_GPL(debugfs_create_dir);
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index a9be90d..3cf866c 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -705,7 +705,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+ old_fs = get_fs();
+ set_fs(get_ds());
+ rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
+- (char __user *)lower_buf,
++ (char __force_user *)lower_buf,
+ lower_bufsiz);
+ set_fs(old_fs);
+ if (rc < 0)
+@@ -751,7 +751,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ }
+ old_fs = get_fs();
+ set_fs(get_ds());
+- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
++ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
+ set_fs(old_fs);
+ if (rc < 0) {
+ kfree(buf);
+@@ -766,7 +766,7 @@ out:
+ static void
+ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
+ {
+- char *buf = nd_get_link(nd);
++ const char *buf = nd_get_link(nd);
+ if (!IS_ERR(buf)) {
+ /* Free the char* */
+ kfree(buf);
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 5ce56e7..d80e1db 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1152,7 +1152,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ struct ecryptfs_message *msg = NULL;
+ char *auth_tok_sig;
+ char *payload = NULL;
+- size_t payload_len;
++ size_t payload_len = 0;
+ int rc;
+
+ rc = ecryptfs_get_auth_tok_sig(&auth_tok_sig, auth_tok);
+@@ -1204,8 +1204,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ crypt_stat->key_size);
+ }
+ out:
+- if (msg)
+- kfree(msg);
++ kfree(msg);
+ kfree(payload);
+ return rc;
+ }
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index 94afdfd..bdb8854 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -629,6 +629,7 @@ static struct file_system_type ecryptfs_fs_type = {
+ .kill_sb = ecryptfs_kill_block_super,
+ .fs_flags = 0
+ };
++MODULE_ALIAS_FS("ecryptfs");
+
+ /**
+ * inode_info_init_once
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index de42310..867dddd 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -338,7 +338,7 @@ check_list:
+ goto out_unlock_msg_ctx;
+ i = 5;
+ if (msg_ctx->msg) {
+- if (copy_to_user(&buf[i], packet_length, packet_length_size))
++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
+ goto out_unlock_msg_ctx;
+ i += packet_length_size;
+ if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
+diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
+index 608c1c3..7d040a8 100644
+--- a/fs/ecryptfs/read_write.c
++++ b/fs/ecryptfs/read_write.c
+@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
+ return -EIO;
+ fs_save = get_fs();
+ set_fs(get_ds());
+- rc = vfs_write(lower_file, data, size, &offset);
++ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
+ set_fs(fs_save);
+ mark_inode_dirty_sync(ecryptfs_inode);
+ return rc;
+@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
+ return -EIO;
+ fs_save = get_fs();
+ set_fs(get_ds());
+- rc = vfs_read(lower_file, data, size, &offset);
++ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
+ set_fs(fs_save);
+ return rc;
+ }
+diff --git a/fs/efs/super.c b/fs/efs/super.c
+index 0f31acb..395ebc9 100644
+--- a/fs/efs/super.c
++++ b/fs/efs/super.c
+@@ -33,6 +33,7 @@ static struct file_system_type efs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("efs");
+
+ static struct pt_types sgi_pt_types[] = {
+ {0x00, "SGI vh"},
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 451b9b8..12e5a03 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1560,8 +1560,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
+ error = PTR_ERR(file);
+ goto out_free_fd;
+ }
+- fd_install(fd, file);
+ ep->file = file;
++ fd_install(fd, file);
+ return fd;
+
+ out_free_fd:
+diff --git a/fs/exec.c b/fs/exec.c
+index 78199eb..e6c84a8 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -55,12 +55,35 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/random.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++
++#ifdef CONFIG_PAX_REFCOUNT
++#include <linux/kallsyms.h>
++#include <linux/kdebug.h>
++#endif
++
++#include <trace/events/fs.h>
+
+ #include <asm/uaccess.h>
++#include <asm/sections.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
+ #include "internal.h"
+
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void __weak pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
++}
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++EXPORT_SYMBOL(pax_set_initial_flags_func);
++#endif
++
+ int core_uses_pid;
+ char core_pattern[CORENAME_MAX_SIZE] = "core";
+ unsigned int core_pipe_limit;
+@@ -70,20 +93,23 @@ struct core_name {
+ char *corename;
+ int used, size;
+ };
+-static atomic_t call_count = ATOMIC_INIT(1);
++static atomic_unchecked_t call_count = ATOMIC_INIT(1);
+
+ /* The maximal length of core_pattern is also specified in sysctl.c */
+
+ static LIST_HEAD(formats);
+ static DEFINE_RWLOCK(binfmt_lock);
+
++extern int gr_process_kernel_exec_ban(void);
++extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
++
+ int __register_binfmt(struct linux_binfmt * fmt, int insert)
+ {
+ if (!fmt)
+ return -EINVAL;
+ write_lock(&binfmt_lock);
+- insert ? list_add(&fmt->lh, &formats) :
+- list_add_tail(&fmt->lh, &formats);
++ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
++ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
+ write_unlock(&binfmt_lock);
+ return 0;
+ }
+@@ -93,7 +119,7 @@ EXPORT_SYMBOL(__register_binfmt);
+ void unregister_binfmt(struct linux_binfmt * fmt)
+ {
+ write_lock(&binfmt_lock);
+- list_del(&fmt->lh);
++ pax_list_del((struct list_head *)&fmt->lh);
+ write_unlock(&binfmt_lock);
+ }
+
+@@ -188,18 +214,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ int write)
+ {
+ struct page *page;
+- int ret;
+
+-#ifdef CONFIG_STACK_GROWSUP
+- if (write) {
+- ret = expand_downwards(bprm->vma, pos);
+- if (ret < 0)
+- return NULL;
+- }
+-#endif
+- ret = get_user_pages(current, bprm->mm, pos,
+- 1, write, 1, &page, NULL);
+- if (ret <= 0)
++ if (0 > expand_downwards(bprm->vma, pos))
++ return NULL;
++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
+ return NULL;
+
+ if (write) {
+@@ -215,6 +233,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ if (size <= ARG_MAX)
+ return page;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ // only allow 512KB for argv+env on suid/sgid binaries
++ // to prevent easy ASLR exhaustion
++ if (((bprm->cred->euid != current_euid()) ||
++ (bprm->cred->egid != current_egid())) &&
++ (size > (512 * 1024))) {
++ put_page(page);
++ return NULL;
++ }
++#endif
++
+ /*
+ * Limit to 1/4-th the stack size for the argv+env strings.
+ * This ensures that:
+@@ -274,6 +303,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+ vma->vm_end = STACK_TOP_MAX;
+ vma->vm_start = vma->vm_end - PAGE_SIZE;
+ vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+
+@@ -288,6 +322,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+ mm->stack_vm = mm->total_vm = 1;
+ up_write(&mm->mmap_sem);
+ bprm->p = vma->vm_end - sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ if (randomize_va_space)
++ bprm->p ^= random32() & ~PAGE_MASK;
++#endif
++
+ return 0;
+ err:
+ up_write(&mm->mmap_sem);
+@@ -403,12 +443,12 @@ struct user_arg_ptr {
+ union {
+ const char __user *const __user *native;
+ #ifdef CONFIG_COMPAT
+- compat_uptr_t __user *compat;
++ const compat_uptr_t __user *compat;
+ #endif
+ } ptr;
+ };
+
+-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
++const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+ {
+ const char __user *native;
+
+@@ -417,14 +457,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+ compat_uptr_t compat;
+
+ if (get_user(compat, argv.ptr.compat + nr))
+- return ERR_PTR(-EFAULT);
++ return (const char __force_user *)ERR_PTR(-EFAULT);
+
+ return compat_ptr(compat);
+ }
+ #endif
+
+ if (get_user(native, argv.ptr.native + nr))
+- return ERR_PTR(-EFAULT);
++ return (const char __force_user *)ERR_PTR(-EFAULT);
+
+ return native;
+ }
+@@ -443,11 +483,12 @@ static int count(struct user_arg_ptr argv, int max)
+ if (!p)
+ break;
+
+- if (IS_ERR(p))
++ if (IS_ERR((const char __force_kernel *)p))
+ return -EFAULT;
+
+- if (i++ >= max)
++ if (i >= max)
+ return -E2BIG;
++ ++i;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTNOHAND;
+@@ -477,7 +518,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
+
+ ret = -EFAULT;
+ str = get_user_arg_ptr(argv, argc);
+- if (IS_ERR(str))
++ if (IS_ERR((const char __force_kernel *)str))
+ goto out;
+
+ len = strnlen_user(str, MAX_ARG_STRLEN);
+@@ -559,7 +600,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
+ int r;
+ mm_segment_t oldfs = get_fs();
+ struct user_arg_ptr argv = {
+- .ptr.native = (const char __user *const __user *)__argv,
++ .ptr.native = (const char __force_user * const __force_user *)__argv,
+ };
+
+ set_fs(KERNEL_DS);
+@@ -594,7 +635,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+ unsigned long new_end = old_end - shift;
+ struct mmu_gather tlb;
+
+- BUG_ON(new_start > new_end);
++ if (new_start >= new_end || new_start < mmap_min_addr)
++ return -ENOMEM;
+
+ /*
+ * ensure there are no vmas between where we want to go
+@@ -603,6 +645,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+ if (vma != find_vma(mm, new_start))
+ return -EFAULT;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ BUG_ON(pax_find_mirror_vma(vma));
++#endif
++
+ /*
+ * cover the whole range: [new_start, old_end)
+ */
+@@ -683,10 +729,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ stack_top = arch_align_stack(stack_top);
+ stack_top = PAGE_ALIGN(stack_top);
+
+- if (unlikely(stack_top < mmap_min_addr) ||
+- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+- return -ENOMEM;
+-
+ stack_shift = vma->vm_end - stack_top;
+
+ bprm->p -= stack_shift;
+@@ -698,8 +740,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ bprm->exec -= stack_shift;
+
+ down_write(&mm->mmap_sem);
++
++ /* Move stack pages down in memory. */
++ if (stack_shift) {
++ ret = shift_arg_pages(vma, stack_shift);
++ if (ret)
++ goto out_unlock;
++ }
++
+ vm_flags = VM_STACK_FLAGS;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ /*
+ * Adjust stack execute permissions; explicitly enable for
+ * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
+@@ -718,13 +780,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ goto out_unlock;
+ BUG_ON(prev != vma);
+
+- /* Move stack pages down in memory. */
+- if (stack_shift) {
+- ret = shift_arg_pages(vma, stack_shift);
+- if (ret)
+- goto out_unlock;
+- }
+-
+ /* mprotect_fixup is overkill to remove the temporary stack flags */
+ vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
+
+@@ -748,6 +803,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
+ #endif
+ current->mm->start_stack = bprm->p;
+ ret = expand_stack(vma, stack_base);
++
++#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
++ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
++ unsigned long size, flags, vm_flags;
++
++ size = STACK_TOP - vma->vm_end;
++ flags = MAP_FIXED | MAP_PRIVATE;
++ vm_flags = VM_DONTEXPAND | VM_RESERVED;
++
++ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
++
++#ifdef CONFIG_X86
++ if (!ret) {
++ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
++ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
++ }
++#endif
++
++ }
++#endif
++
+ if (ret)
+ ret = -EFAULT;
+
+@@ -782,6 +858,8 @@ struct file *open_exec(const char *name)
+
+ fsnotify_open(file);
+
++ trace_open_exec(name);
++
+ err = deny_write_access(file);
+ if (err)
+ goto exit;
+@@ -805,7 +883,7 @@ int kernel_read(struct file *file, loff_t offset,
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- result = vfs_read(file, (void __user *)addr, count, &pos);
++ result = vfs_read(file, (void __force_user *)addr, count, &pos);
+ set_fs(old_fs);
+ return result;
+ }
+@@ -1070,6 +1148,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
+ perf_event_comm(tsk);
+ }
+
++static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
++{
++ int i, ch;
++
++ /* Copies the binary name from after last slash */
++ for (i = 0; (ch = *(fn++)) != '\0';) {
++ if (ch == '/')
++ i = 0; /* overwrite what we wrote */
++ else
++ if (i < len - 1)
++ tcomm[i++] = ch;
++ }
++ tcomm[i] = '\0';
++}
++
+ int flush_old_exec(struct linux_binprm * bprm)
+ {
+ int retval;
+@@ -1084,6 +1177,7 @@ int flush_old_exec(struct linux_binprm * bprm)
+
+ set_mm_exe_file(bprm->mm, bprm->file);
+
++ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
+ /*
+ * Release all of the old mmap stuff
+ */
+@@ -1116,10 +1210,6 @@ EXPORT_SYMBOL(would_dump);
+
+ void setup_new_exec(struct linux_binprm * bprm)
+ {
+- int i, ch;
+- const char *name;
+- char tcomm[sizeof(current->comm)];
+-
+ arch_pick_mmap_layout(current->mm);
+
+ /* This is the point of no return */
+@@ -1130,18 +1220,7 @@ void setup_new_exec(struct linux_binprm * bprm)
+ else
+ set_dumpable(current->mm, suid_dumpable);
+
+- name = bprm->filename;
+-
+- /* Copies the binary name from after last slash */
+- for (i=0; (ch = *(name++)) != '\0';) {
+- if (ch == '/')
+- i = 0; /* overwrite what we wrote */
+- else
+- if (i < (sizeof(tcomm) - 1))
+- tcomm[i++] = ch;
+- }
+- tcomm[i] = '\0';
+- set_task_comm(current, tcomm);
++ set_task_comm(current, bprm->tcomm);
+
+ /* Set the new mm task size. We have to do that late because it may
+ * depend on TIF_32BIT which is only updated in flush_thread() on
+@@ -1229,7 +1308,7 @@ void install_exec_creds(struct linux_binprm *bprm)
+ * wait until new credentials are committed
+ * by commit_creds() above
+ */
+- if (get_dumpable(current->mm) != SUID_DUMP_USER)
++ if (get_dumpable(current->mm) != SUID_DUMPABLE_ENABLED)
+ perf_event_exit_task(current);
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+@@ -1259,6 +1338,13 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+ bprm->unsafe |= LSM_UNSAFE_PTRACE;
+ }
+
++ /*
++ * This isn't strictly necessary, but it makes it harder for LSMs to
++ * mess up.
++ */
++ if (current->no_new_privs)
++ bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
++
+ n_fs = 1;
+ spin_lock(&p->fs->lock);
+ rcu_read_lock();
+@@ -1268,7 +1354,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+ }
+ rcu_read_unlock();
+
+- if (p->fs->users > n_fs) {
++ if (atomic_read(&p->fs->users) > n_fs) {
+ bprm->unsafe |= LSM_UNSAFE_SHARE;
+ } else {
+ res = -EAGAIN;
+@@ -1302,7 +1388,8 @@ int prepare_binprm(struct linux_binprm *bprm)
+ bprm->cred->euid = current_euid();
+ bprm->cred->egid = current_egid();
+
+- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
++ if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
++ !current->no_new_privs) {
+ /* Set-uid? */
+ if (mode & S_ISUID) {
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+@@ -1463,6 +1550,31 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+
+ EXPORT_SYMBOL(search_binary_handler);
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++static DEFINE_PER_CPU(u64, exec_counter);
++static int __init init_exec_counters(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu) {
++ per_cpu(exec_counter, cpu) = (u64)cpu;
++ }
++
++ return 0;
++}
++early_initcall(init_exec_counters);
++static inline void increment_exec_counter(void)
++{
++ BUILD_BUG_ON(NR_CPUS > (1 << 16));
++ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
++}
++#else
++static inline void increment_exec_counter(void) {}
++#endif
++
++extern void gr_handle_exec_args(struct linux_binprm *bprm,
++ struct user_arg_ptr argv);
++
+ /*
+ * sys_execve() executes a new program.
+ */
+@@ -1471,6 +1583,11 @@ static int do_execve_common(const char *filename,
+ struct user_arg_ptr envp,
+ struct pt_regs *regs)
+ {
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+ struct linux_binprm *bprm;
+ struct file *file;
+ struct files_struct *displaced;
+@@ -1478,6 +1595,8 @@ static int do_execve_common(const char *filename,
+ int retval;
+ const struct cred *cred = current_cred();
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
++
+ /*
+ * We move the actual failure in case of RLIMIT_NPROC excess from
+ * set*uid() to execve() because too many poorly written programs
+@@ -1518,12 +1637,22 @@ static int do_execve_common(const char *filename,
+ if (IS_ERR(file))
+ goto out_unmark;
+
++ if (gr_ptrace_readexec(file, bprm->unsafe)) {
++ retval = -EPERM;
++ goto out_file;
++ }
++
+ sched_exec();
+
+ bprm->file = file;
+ bprm->filename = filename;
+ bprm->interp = filename;
+
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ retval = -EACCES;
++ goto out_file;
++ }
++
+ retval = bprm_mm_init(bprm);
+ if (retval)
+ goto out_file;
+@@ -1540,24 +1669,70 @@ static int do_execve_common(const char *filename,
+ if (retval < 0)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ /* limit suid stack to 8MB
++ we saved the old limits above and will restore them if this exec fails
++ */
++ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
++ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
++ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
++#endif
++
++ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
++ retval = -EPERM;
++ goto out_fail;
++ }
++
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out_fail;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out_fail;
++ }
++
++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
++ bprm->unsafe);
++ if (retval < 0)
++ goto out_fail;
++
+ retval = copy_strings_kernel(1, &bprm->filename, bprm);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
+
+ bprm->exec = bprm->p;
+ retval = copy_strings(bprm->envc, envp, bprm);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
+
+ retval = copy_strings(bprm->argc, argv, bprm);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(bprm, argv);
+
+ retval = search_binary_handler(bprm,regs);
+ if (retval < 0)
+- goto out;
++ goto out_fail;
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+
+ /* execve succeeded */
++
++ increment_exec_counter();
+ current->fs->in_exec = 0;
+ current->in_execve = 0;
+ acct_update_integrals(current);
+@@ -1566,6 +1741,14 @@ static int do_execve_common(const char *filename,
+ put_files_struct(displaced);
+ return retval;
+
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
+@@ -1639,7 +1822,7 @@ static int expand_corename(struct core_name *cn)
+ {
+ char *old_corename = cn->corename;
+
+- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
++ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
+ cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+ if (!cn->corename) {
+@@ -1736,7 +1919,7 @@ static int format_corename(struct core_name *cn, long signr)
+ int pid_in_pattern = 0;
+ int err = 0;
+
+- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
++ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
+ cn->corename = kmalloc(cn->size, GFP_KERNEL);
+ cn->used = 0;
+
+@@ -1833,6 +2016,292 @@ out:
+ return ispipe;
+ }
+
++int pax_check_flags(unsigned long *flags)
++{
++ int retval = 0;
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & MF_PAX_SEGMEXEC)
++ {
++ *flags &= ~MF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & MF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & MF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & MF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~MF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++char *pax_get_path(const struct path *path, char *buf, int buflen)
++{
++ char *pathname = d_path(path, buf, buflen);
++
++ if (IS_ERR(pathname))
++ goto toolong;
++
++ pathname = mangle_path(buf, pathname, "\t\n\\");
++ if (!pathname)
++ goto toolong;
++
++ *pathname = 0;
++ return buf;
++
++toolong:
++ return "<path too long>";
++}
++EXPORT_SYMBOL(pax_get_path);
++
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
++ char *path_exec = NULL;
++ char *path_fault = NULL;
++ unsigned long start = 0UL, end = 0UL, offset = 0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec)
++ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file)
++ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
++ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
++ path_fault = "<heap>";
++ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++ path_fault = "<stack>";
++ else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++ if (tsk->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
++ else
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
++ task_uid(tsk), task_euid(tsk), pc, sp);
++ free_page((unsigned long)buffer_exec);
++ free_page((unsigned long)buffer_fault);
++ pax_report_insns(regs, pc, sp);
++ do_coredump(SIGKILL, SIGKILL, regs);
++}
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++void pax_report_refcount_overflow(struct pt_regs *regs)
++{
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
++ else
++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
++ current->comm, task_pid_nr(current), current_uid(), current_euid());
++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
++ preempt_disable();
++ show_regs(regs);
++ preempt_enable();
++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
++static noinline int check_stack_object(const void *obj, unsigned long len)
++{
++ const void * const stack = task_stack_page(current);
++ const void * const stackend = stack + THREAD_SIZE;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++ const void *frame = NULL;
++ const void *oldframe;
++#endif
++
++ if (obj + len < obj)
++ return -1;
++
++ if (obj + len <= stack || stackend <= obj)
++ return 0;
++
++ if (obj < stack || stackend < obj + len)
++ return -1;
++
++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
++ oldframe = __builtin_frame_address(1);
++ if (oldframe)
++ frame = __builtin_frame_address(2);
++ /*
++ low ----------------------------------------------> high
++ [saved bp][saved ip][args][local vars][saved bp][saved ip]
++ ^----------------^
++ allow copies only within here
++ */
++ while (stack <= frame && frame < stackend) {
++ /* if obj + len extends past the last frame, this
++ check won't pass and the next frame will be 0,
++ causing us to bail out and correctly report
++ the copy as invalid
++ */
++ if (obj + len <= frame)
++ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
++ oldframe = frame;
++ frame = *(const void * const *)frame;
++ }
++ return -1;
++#else
++ return 1;
++#endif
++}
++
++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
++{
++ if (current->signal->curr_ip)
++ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
++ else
++ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
++ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
++ dump_stack();
++ gr_handle_kernel_exploit();
++ do_group_exit(SIGKILL);
++}
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
++static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
++{
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ unsigned long textlow = ktla_ktva((unsigned long)_stext);
++#ifdef CONFIG_MODULES
++ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
++#else
++ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
++#endif
++
++#else
++ unsigned long textlow = (unsigned long)_stext;
++ unsigned long texthigh = (unsigned long)_etext;
++
++#ifdef CONFIG_X86_64
++ /* check against linear mapping as well */
++ if (high > (unsigned long)__va(__pa(textlow)) &&
++ low <= (unsigned long)__va(__pa(texthigh)))
++ return true;
++#endif
++
++#endif
++
++ if (high <= textlow || low > texthigh)
++ return false;
++ else
++ return true;
++}
++#endif
++
++void __check_object_size(const void *ptr, unsigned long n, bool to_user)
++{
++
++#ifdef CONFIG_PAX_USERCOPY
++ const char *type;
++
++ if (!n)
++ return;
++
++ type = check_heap_object(ptr, n);
++ if (!type) {
++ int ret = check_stack_object(ptr, n);
++ if (ret == 1 || ret == 2)
++ return;
++ if (ret == 0) {
++ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
++ type = "<kernel text>";
++ else
++ return;
++ } else
++ type = "<process stack>";
++ }
++
++ pax_report_usercopy(ptr, n, to_user, type);
++#endif
++
++}
++EXPORT_SYMBOL(__check_object_size);
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_track_stack(void)
++{
++ unsigned long sp = (unsigned long)&sp;
++ if (sp < current_thread_info()->lowest_stack &&
++ sp > (unsigned long)task_stack_page(current))
++ current_thread_info()->lowest_stack = sp;
++}
++EXPORT_SYMBOL(pax_track_stack);
++#endif
++
++#ifdef CONFIG_PAX_SIZE_OVERFLOW
++void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++{
++ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
++ dump_stack();
++ do_group_exit(SIGKILL);
++}
++EXPORT_SYMBOL(report_size_overflow);
++#endif
++
+ static int zap_process(struct task_struct *start, int exit_code)
+ {
+ struct task_struct *t;
+@@ -2006,17 +2475,17 @@ static void coredump_finish(struct mm_struct *mm)
+ void set_dumpable(struct mm_struct *mm, int value)
+ {
+ switch (value) {
+- case 0:
++ case SUID_DUMPABLE_DISABLED:
+ clear_bit(MMF_DUMPABLE, &mm->flags);
+ smp_wmb();
+ clear_bit(MMF_DUMP_SECURELY, &mm->flags);
+ break;
+- case 1:
++ case SUID_DUMPABLE_ENABLED:
+ set_bit(MMF_DUMPABLE, &mm->flags);
+ smp_wmb();
+ clear_bit(MMF_DUMP_SECURELY, &mm->flags);
+ break;
+- case 2:
++ case SUID_DUMPABLE_SAFE:
+ set_bit(MMF_DUMP_SECURELY, &mm->flags);
+ smp_wmb();
+ set_bit(MMF_DUMPABLE, &mm->flags);
+@@ -2029,7 +2498,7 @@ static int __get_dumpable(unsigned long mm_flags)
+ int ret;
+
+ ret = mm_flags & MMF_DUMPABLE_MASK;
+- return (ret >= 2) ? 2 : ret;
++ return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
+ }
+
+ /*
+@@ -2050,17 +2519,17 @@ static void wait_for_dump_helpers(struct file *file)
+ pipe = file->f_path.dentry->d_inode->i_pipe;
+
+ pipe_lock(pipe);
+- pipe->readers++;
+- pipe->writers--;
++ atomic_inc(&pipe->readers);
++ atomic_dec(&pipe->writers);
+
+- while ((pipe->readers > 1) && (!signal_pending(current))) {
++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
+ wake_up_interruptible_sync(&pipe->wait);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ pipe_wait(pipe);
+ }
+
+- pipe->readers--;
+- pipe->writers++;
++ atomic_dec(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe_unlock(pipe);
+
+ }
+@@ -2121,7 +2590,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ int retval = 0;
+ int flag = 0;
+ int ispipe;
+- static atomic_t core_dump_count = ATOMIC_INIT(0);
++ bool need_nonrelative = false;
++ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
+ struct coredump_params cprm = {
+ .signr = signr,
+ .regs = regs,
+@@ -2136,6 +2606,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+
+ audit_core_dumps(signr);
+
++ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
++ gr_handle_brute_attach(cprm.mm_flags);
++
+ binfmt = mm->binfmt;
+ if (!binfmt || !binfmt->core_dump)
+ goto fail;
+@@ -2146,14 +2619,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ if (!cred)
+ goto fail;
+ /*
+- * We cannot trust fsuid as being the "true" uid of the
+- * process nor do we know its entire history. We only know it
+- * was tainted so we dump it as root in mode 2.
++ * We cannot trust fsuid as being the "true" uid of the process
++ * nor do we know its entire history. We only know it was tainted
++ * so we dump it as root in mode 2, and only into a controlled
++ * environment (pipe handler or fully qualified path).
+ */
+- if (__get_dumpable(cprm.mm_flags) == 2) {
++ if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
+ /* Setuid core dump mode */
+ flag = O_EXCL; /* Stop rewrite attacks */
+ cred->fsuid = 0; /* Dump root private */
++ need_nonrelative = true;
+ }
+
+ retval = coredump_wait(exit_code, &core_state);
+@@ -2203,7 +2678,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ }
+ cprm.limit = RLIM_INFINITY;
+
+- dump_count = atomic_inc_return(&core_dump_count);
++ dump_count = atomic_inc_return_unchecked(&core_dump_count);
+ if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+ printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+ task_tgid_vnr(current), current->comm);
+@@ -2230,9 +2705,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+ } else {
+ struct inode *inode;
+
++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
++
+ if (cprm.limit < binfmt->min_coredump)
+ goto fail_unlock;
+
++ if (need_nonrelative && cn.corename[0] != '/') {
++ printk(KERN_WARNING "Pid %d(%s) can only dump core "\
++ "to fully qualified path!\n",
++ task_tgid_vnr(current), current->comm);
++ printk(KERN_WARNING "Skipping core dump\n");
++ goto fail_unlock;
++ }
++
+ cprm.file = filp_open(cn.corename,
+ O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+ 0600);
+@@ -2273,7 +2758,7 @@ close_fail:
+ filp_close(cprm.file, NULL);
+ fail_dropcount:
+ if (ispipe)
+- atomic_dec(&core_dump_count);
++ atomic_dec_unchecked(&core_dump_count);
+ fail_unlock:
+ kfree(cn.corename);
+ fail_corename:
+@@ -2292,7 +2777,7 @@ fail:
+ */
+ int dump_write(struct file *file, const void *addr, int nr)
+ {
+- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
++ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
+ }
+ EXPORT_SYMBOL(dump_write);
+
+diff --git a/fs/exofs/super.c b/fs/exofs/super.c
+index 7ed5000..cbe7b49 100644
+--- a/fs/exofs/super.c
++++ b/fs/exofs/super.c
+@@ -1008,6 +1008,7 @@ static struct file_system_type exofs_type = {
+ .mount = exofs_mount,
+ .kill_sb = generic_shutdown_super,
+ };
++MODULE_ALIAS_FS("exofs");
+
+ static int __init init_exofs(void)
+ {
+diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
+index a8cbe1b..fed04cb 100644
+--- a/fs/ext2/balloc.c
++++ b/fs/ext2/balloc.c
+@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
+
+ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+ root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
+ sbi->s_resuid != current_fsuid() &&
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+ return 0;
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index bd8ac16..43811b9 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -1494,6 +1494,7 @@ static struct file_system_type ext2_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ext2");
+
+ static int __init init_ext2_fs(void)
+ {
+diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
+index d27b71f..071b0e1 100644
+--- a/fs/ext2/xattr.c
++++ b/fs/ext2/xattr.c
+@@ -248,7 +248,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ struct buffer_head *bh = NULL;
+ struct ext2_xattr_entry *entry;
+ char *end;
+- size_t rest = buffer_size;
++ size_t rest = buffer_size, total_size = 0;
+ int error;
+
+ ea_idebug(inode, "buffer=%p, buffer_size=%ld",
+@@ -306,9 +306,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
+ buffer += size;
+ }
+ rest -= size;
++ total_size += size;
+ }
+ }
+- error = buffer_size - rest; /* total size */
++ error = total_size;
+
+ cleanup:
+ brelse(bh);
+diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
+index a203892..4e64db5 100644
+--- a/fs/ext3/balloc.c
++++ b/fs/ext3/balloc.c
+@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
+
+ free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+ root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
+- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
++ if (free_blocks < root_blocks + 1 &&
+ !use_reservation && sbi->s_resuid != current_fsuid() &&
+- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
++ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
++ !capable_nolog(CAP_SYS_RESOURCE)) {
+ return 0;
+ }
+ return 1;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index b7f314f..ef3b16c 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -3065,6 +3065,7 @@ static struct file_system_type ext3_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ext3");
+
+ static int __init init_ext3_fs(void)
+ {
+diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
+index d565759..a1315f2 100644
+--- a/fs/ext3/xattr.c
++++ b/fs/ext3/xattr.c
+@@ -335,7 +335,7 @@ static int
+ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
+ char *buffer, size_t buffer_size)
+ {
+- size_t rest = buffer_size;
++ size_t rest = buffer_size, total_size = 0;
+
+ for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
+ const struct xattr_handler *handler =
+@@ -352,9 +352,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
+ buffer += size;
+ }
+ rest -= size;
++ total_size += size;
+ }
+ }
+- return buffer_size - rest;
++ return total_size;
+ }
+
+ static int
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 2845a1f..f29de63 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -441,8 +441,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+ /* Hm, nope. Are (enough) root reserved clusters available? */
+ if (sbi->s_resuid == current_fsuid() ||
+ ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
+- capable(CAP_SYS_RESOURCE) ||
+- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
++ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
++ capable_nolog(CAP_SYS_RESOURCE)) {
+
+ if (free_clusters >= (nclusters + dirty_clusters))
+ return 1;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 22c71b9..ba28a7d 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1206,19 +1206,19 @@ struct ext4_sb_info {
+ unsigned long s_mb_last_start;
+
+ /* stats for buddy allocator */
+- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
+- atomic_t s_bal_success; /* we found long enough chunks */
+- atomic_t s_bal_allocated; /* in blocks */
+- atomic_t s_bal_ex_scanned; /* total extents scanned */
+- atomic_t s_bal_goals; /* goal hits */
+- atomic_t s_bal_breaks; /* too long searches */
+- atomic_t s_bal_2orders; /* 2^order hits */
++ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
++ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
++ atomic_unchecked_t s_bal_allocated; /* in blocks */
++ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
++ atomic_unchecked_t s_bal_goals; /* goal hits */
++ atomic_unchecked_t s_bal_breaks; /* too long searches */
++ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
+ spinlock_t s_bal_lock;
+ unsigned long s_mb_buddies_generated;
+ unsigned long long s_mb_generation_time;
+- atomic_t s_mb_lost_chunks;
+- atomic_t s_mb_preallocated;
+- atomic_t s_mb_discarded;
++ atomic_unchecked_t s_mb_lost_chunks;
++ atomic_unchecked_t s_mb_preallocated;
++ atomic_unchecked_t s_mb_discarded;
+ atomic_t s_lock_busy;
+
+ /* locality groups */
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9b8c131..d469b31 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+ BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+
+ if (EXT4_SB(sb)->s_mb_stats)
+- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
++ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
+
+ break;
+ }
+@@ -2092,7 +2092,7 @@ repeat:
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_flags |= EXT4_MB_HINT_FIRST;
+ cr = 3;
+- atomic_inc(&sbi->s_mb_lost_chunks);
++ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
+ goto repeat;
+ }
+ }
+@@ -2599,25 +2599,25 @@ int ext4_mb_release(struct super_block *sb)
+ if (sbi->s_mb_stats) {
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u blocks %u reqs (%u success)",
+- atomic_read(&sbi->s_bal_allocated),
+- atomic_read(&sbi->s_bal_reqs),
+- atomic_read(&sbi->s_bal_success));
++ atomic_read_unchecked(&sbi->s_bal_allocated),
++ atomic_read_unchecked(&sbi->s_bal_reqs),
++ atomic_read_unchecked(&sbi->s_bal_success));
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u extents scanned, %u goal hits, "
+ "%u 2^N hits, %u breaks, %u lost",
+- atomic_read(&sbi->s_bal_ex_scanned),
+- atomic_read(&sbi->s_bal_goals),
+- atomic_read(&sbi->s_bal_2orders),
+- atomic_read(&sbi->s_bal_breaks),
+- atomic_read(&sbi->s_mb_lost_chunks));
++ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
++ atomic_read_unchecked(&sbi->s_bal_goals),
++ atomic_read_unchecked(&sbi->s_bal_2orders),
++ atomic_read_unchecked(&sbi->s_bal_breaks),
++ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %lu generated and it took %Lu",
+ sbi->s_mb_buddies_generated,
+ sbi->s_mb_generation_time);
+ ext4_msg(sb, KERN_INFO,
+ "mballoc: %u preallocated, %u discarded",
+- atomic_read(&sbi->s_mb_preallocated),
+- atomic_read(&sbi->s_mb_discarded));
++ atomic_read_unchecked(&sbi->s_mb_preallocated),
++ atomic_read_unchecked(&sbi->s_mb_discarded));
+ }
+
+ free_percpu(sbi->s_locality_groups);
+@@ -3101,16 +3101,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+
+ if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
+- atomic_inc(&sbi->s_bal_reqs);
+- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
++ atomic_inc_unchecked(&sbi->s_bal_reqs);
++ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+ if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
+- atomic_inc(&sbi->s_bal_success);
+- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
++ atomic_inc_unchecked(&sbi->s_bal_success);
++ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
+ if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
+ ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
+- atomic_inc(&sbi->s_bal_goals);
++ atomic_inc_unchecked(&sbi->s_bal_goals);
+ if (ac->ac_found > sbi->s_mb_max_to_scan)
+- atomic_inc(&sbi->s_bal_breaks);
++ atomic_inc_unchecked(&sbi->s_bal_breaks);
+ }
+
+ if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
+@@ -3514,7 +3514,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ trace_ext4_mb_new_inode_pa(ac, pa);
+
+ ext4_mb_use_inode_pa(ac, pa);
+- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
++ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
+
+ ei = EXT4_I(ac->ac_inode);
+ grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+@@ -3574,7 +3574,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+ trace_ext4_mb_new_group_pa(ac, pa);
+
+ ext4_mb_use_group_pa(ac, pa);
+- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
++ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+
+ grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ lg = ac->ac_lg;
+@@ -3663,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
+ * from the bitmap and continue.
+ */
+ }
+- atomic_add(free, &sbi->s_mb_discarded);
++ atomic_add_unchecked(free, &sbi->s_mb_discarded);
+
+ return err;
+ }
+@@ -3681,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+ ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+ BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
++ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+ trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+
+ return 0;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index f3358ab..fbb1d90 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -73,7 +73,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
+ const char *function, unsigned int line, const char *msg)
+ {
+- __ext4_warning(sb, function, line, msg);
++ __ext4_warning(sb, function, line, "%s", msg);
+ __ext4_warning(sb, function, line,
+ "MMP failure info: last update time: %llu, last update "
+ "node: %s, last update device: %s\n",
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 84f84bf..a8770cd 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -92,6 +92,8 @@ static struct file_system_type ext2_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ext2");
++MODULE_ALIAS("ext2");
+ #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
+ #else
+ #define IS_EXT2_SB(sb) (0)
+@@ -106,6 +108,8 @@ static struct file_system_type ext3_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ext3");
++MODULE_ALIAS("ext3");
+ #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+ #else
+ #define IS_EXT3_SB(sb) (0)
+@@ -1438,7 +1442,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+ }
+
+ #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
++static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+ "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
+
+ #ifdef CONFIG_QUOTA
+@@ -2469,7 +2473,7 @@ struct ext4_attr {
+ ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
+ const char *, size_t);
+ int offset;
+-};
++} __do_const;
+
+ static int parse_strtoul(const char *buf,
+ unsigned long max, unsigned long *value)
+@@ -3175,7 +3179,6 @@ int ext4_calculate_overhead(struct super_block *sb)
+ ext4_fsblk_t overhead = 0;
+ char *buf = (char *) get_zeroed_page(GFP_KERNEL);
+
+- memset(buf, 0, PAGE_SIZE);
+ if (!buf)
+ return -ENOMEM;
+
+@@ -5052,7 +5055,6 @@ static inline int ext2_feature_set_ok(struct super_block *sb)
+ return 0;
+ return 1;
+ }
+-MODULE_ALIAS("ext2");
+ #else
+ static inline void register_as_ext2(void) { }
+ static inline void unregister_as_ext2(void) { }
+@@ -5085,7 +5087,6 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
+ return 0;
+ return 1;
+ }
+-MODULE_ALIAS("ext3");
+ #else
+ static inline void register_as_ext3(void) { }
+ static inline void unregister_as_ext3(void) { }
+@@ -5099,6 +5100,7 @@ static struct file_system_type ext4_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ext4");
+
+ static int __init ext4_init_feat_adverts(void)
+ {
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 05617bd..aac23ad 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -328,7 +328,7 @@ static int
+ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
+ char *buffer, size_t buffer_size)
+ {
+- size_t rest = buffer_size;
++ size_t rest = buffer_size, total_size = 0;
+
+ for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+ const struct xattr_handler *handler =
+@@ -345,9 +345,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
+ buffer += size;
+ }
+ rest -= size;
++ total_size += size;
+ }
+ }
+- return buffer_size - rest;
++ return total_size;
+ }
+
+ static int
+diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
+index 216b419..350a088 100644
+--- a/fs/fat/namei_msdos.c
++++ b/fs/fat/namei_msdos.c
+@@ -674,6 +674,7 @@ static struct file_system_type msdos_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("msdos");
+
+ static int __init init_msdos_fs(void)
+ {
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index c25cf15..e5ea08a 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -1090,6 +1090,7 @@ static struct file_system_type vfat_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("vfat");
+
+ static int __init init_vfat_fs(void)
+ {
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 22764c7..86372c9 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+ if (err)
+ return err;
+
++ if (gr_handle_chroot_fowner(pid, type))
++ return -ENOENT;
++ if (gr_check_protected_task_fowner(pid, type))
++ return -EACCES;
++
+ f_modown(filp, pid, type, force);
+ return 0;
+ }
+@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
+
+ static int f_setown_ex(struct file *filp, unsigned long arg)
+ {
+- struct f_owner_ex * __user owner_p = (void * __user)arg;
++ struct f_owner_ex __user *owner_p = (void __user *)arg;
+ struct f_owner_ex owner;
+ struct pid *pid;
+ int type;
+@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
+
+ static int f_getown_ex(struct file *filp, unsigned long arg)
+ {
+- struct f_owner_ex * __user owner_p = (void * __user)arg;
++ struct f_owner_ex __user *owner_p = (void __user *)arg;
+ struct f_owner_ex owner;
+ int ret = 0;
+
+@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
+ switch (cmd) {
+ case F_DUPFD:
+ case F_DUPFD_CLOEXEC:
++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
+ if (arg >= rlimit(RLIMIT_NOFILE))
+ break;
+ err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
+diff --git a/fs/fifo.c b/fs/fifo.c
+index cf6f434..3d7942c 100644
+--- a/fs/fifo.c
++++ b/fs/fifo.c
+@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ */
+ filp->f_op = &read_pipefifo_fops;
+ pipe->r_counter++;
+- if (pipe->readers++ == 0)
++ if (atomic_inc_return(&pipe->readers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->writers) {
++ if (!atomic_read(&pipe->writers)) {
+ if ((filp->f_flags & O_NONBLOCK)) {
+ /* suppress POLLHUP until we have
+ * seen a writer */
+@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ * errno=ENXIO when there is no process reading the FIFO.
+ */
+ ret = -ENXIO;
+- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
+ goto err;
+
+ filp->f_op = &write_pipefifo_fops;
+ pipe->w_counter++;
+- if (!pipe->writers++)
++ if (atomic_inc_return(&pipe->writers) == 1)
+ wake_up_partner(inode);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ if (wait_for_partner(inode, &pipe->r_counter))
+ goto err_wr;
+ }
+@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ */
+ filp->f_op = &rdwr_pipefifo_fops;
+
+- pipe->readers++;
+- pipe->writers++;
++ atomic_inc(&pipe->readers);
++ atomic_inc(&pipe->writers);
+ pipe->r_counter++;
+ pipe->w_counter++;
+- if (pipe->readers == 1 || pipe->writers == 1)
++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
+ wake_up_partner(inode);
+ break;
+
+@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ return 0;
+
+ err_rd:
+- if (!--pipe->readers)
++ if (atomic_dec_and_test(&pipe->readers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err_wr:
+- if (!--pipe->writers)
++ if (atomic_dec_and_test(&pipe->writers))
+ wake_up_interruptible(&pipe->wait);
+ ret = -ERESTARTSYS;
+ goto err;
+
+ err:
+- if (!pipe->readers && !pipe->writers)
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
+ free_pipe_info(inode);
+
+ err_nocleanup:
+diff --git a/fs/file.c b/fs/file.c
+index 4c6992d..41c1f84 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -15,6 +15,7 @@
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/file.h>
++#include <linux/security.h>
+ #include <linux/fdtable.h>
+ #include <linux/bitops.h>
+ #include <linux/interrupt.h>
+@@ -199,7 +200,7 @@ out:
+ * Return <0 error code on error; 1 on successful completion.
+ * The files->file_lock should be held on entry, and will be held on exit.
+ */
+-static int expand_fdtable(struct files_struct *files, int nr)
++static int expand_fdtable(struct files_struct *files, unsigned int nr)
+ __releases(files->file_lock)
+ __acquires(files->file_lock)
+ {
+@@ -244,7 +245,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
+ * expanded and execution may have blocked.
+ * The files->file_lock should be held on entry, and will be held on exit.
+ */
+-int expand_files(struct files_struct *files, int nr)
++int expand_files(struct files_struct *files, unsigned int nr)
+ {
+ struct fdtable *fdt;
+
+@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
+ if (nr >= rlimit(RLIMIT_NOFILE))
+ return -EMFILE;
+
+diff --git a/fs/filesystems.c b/fs/filesystems.c
+index 0845f84..bf3fd0571 100644
+--- a/fs/filesystems.c
++++ b/fs/filesystems.c
+@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
+ int len = dot ? dot - name : strlen(name);
+
+ fs = __get_fs_type(name, len);
+- if (!fs && (request_module("%.*s", len, name) == 0))
++
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
++#else
++ if (!fs && (request_module("fs-%.*s", len, name) == 0))
++#endif
+ fs = __get_fs_type(name, len);
+
+ if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
+diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
+index 9d1c995..7685971 100644
+--- a/fs/freevxfs/vxfs_super.c
++++ b/fs/freevxfs/vxfs_super.c
+@@ -52,7 +52,6 @@ MODULE_AUTHOR("Christoph Hellwig");
+ MODULE_DESCRIPTION("Veritas Filesystem (VxFS) driver");
+ MODULE_LICENSE("Dual BSD/GPL");
+
+-MODULE_ALIAS("vxfs"); /* makes mount -t vxfs autoload the module */
+
+
+ static void vxfs_put_super(struct super_block *);
+@@ -259,6 +258,8 @@ static struct file_system_type vxfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */
++MODULE_ALIAS("vxfs");
+
+ static int __init
+ vxfs_init(void)
+diff --git a/fs/fs_struct.c b/fs/fs_struct.c
+index 78b519c..0386555 100644
+--- a/fs/fs_struct.c
++++ b/fs/fs_struct.c
+@@ -4,6 +4,7 @@
+ #include <linux/path.h>
+ #include <linux/slab.h>
+ #include <linux/fs_struct.h>
++#include <linux/grsecurity.h>
+ #include "internal.h"
+
+ static inline void path_get_longterm(struct path *path)
+@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
+ old_root = fs->root;
+ fs->root = *path;
+ path_get_longterm(path);
++ gr_set_chroot_entries(current, path);
+ write_seqcount_end(&fs->seq);
+ spin_unlock(&fs->lock);
+ if (old_root.dentry)
+@@ -74,6 +76,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
+ && fs->root.mnt == old_root->mnt) {
+ path_get_longterm(new_root);
+ fs->root = *new_root;
++ /* This function is only called
++ from pivot_root(). Leave our
++ gr_chroot_dentry and is_chrooted flags
++ as-is, so that a pivoted root isn't treated
++ as a chroot
++ */
++ //gr_set_chroot_entries(p, new_root);
+ count++;
+ }
+ if (fs->pwd.dentry == old_root->dentry
+@@ -109,7 +118,8 @@ void exit_fs(struct task_struct *tsk)
+ spin_lock(&fs->lock);
+ write_seqcount_begin(&fs->seq);
+ tsk->fs = NULL;
+- kill = !--fs->users;
++ gr_clear_chroot_entries(tsk);
++ kill = !atomic_dec_return(&fs->users);
+ write_seqcount_end(&fs->seq);
+ spin_unlock(&fs->lock);
+ task_unlock(tsk);
+@@ -123,7 +133,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
+ /* We don't need to lock fs - think why ;-) */
+ if (fs) {
+- fs->users = 1;
++ atomic_set(&fs->users, 1);
+ fs->in_exec = 0;
+ spin_lock_init(&fs->lock);
+ seqcount_init(&fs->seq);
+@@ -132,6 +142,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
+ spin_lock(&old->lock);
+ fs->root = old->root;
+ path_get_longterm(&fs->root);
++ /* instead of calling gr_set_chroot_entries here,
++ we call it from every caller of this function
++ */
+ fs->pwd = old->pwd;
+ path_get_longterm(&fs->pwd);
+ spin_unlock(&old->lock);
+@@ -150,8 +163,9 @@ int unshare_fs_struct(void)
+
+ task_lock(current);
+ spin_lock(&fs->lock);
+- kill = !--fs->users;
++ kill = !atomic_dec_return(&fs->users);
+ current->fs = new_fs;
++ gr_set_chroot_entries(current, &new_fs->root);
+ spin_unlock(&fs->lock);
+ task_unlock(current);
+
+@@ -164,13 +178,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
+
+ int current_umask(void)
+ {
+- return current->fs->umask;
++ return current->fs->umask | gr_acl_umask();
+ }
+ EXPORT_SYMBOL(current_umask);
+
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+- .users = 1,
++ .users = ATOMIC_INIT(1),
+ .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
+ .seq = SEQCNT_ZERO,
+ .umask = 0022,
+@@ -186,12 +200,13 @@ void daemonize_fs_struct(void)
+ task_lock(current);
+
+ spin_lock(&init_fs.lock);
+- init_fs.users++;
++ atomic_inc(&init_fs.users);
+ spin_unlock(&init_fs.lock);
+
+ spin_lock(&fs->lock);
+ current->fs = &init_fs;
+- kill = !--fs->users;
++ gr_set_chroot_entries(current, &current->fs->root);
++ kill = !atomic_dec_return(&fs->users);
+ spin_unlock(&fs->lock);
+
+ task_unlock(current);
+diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
+index 9905350..97ff49a 100644
+--- a/fs/fscache/cookie.c
++++ b/fs/fscache/cookie.c
+@@ -19,7 +19,7 @@
+
+ struct kmem_cache *fscache_cookie_jar;
+
+-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
++static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
+
+ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
+ static int fscache_alloc_object(struct fscache_cache *cache,
+@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
+ parent ? (char *) parent->def->name : "<no-parent>",
+ def->name, netfs_data);
+
+- fscache_stat(&fscache_n_acquires);
++ fscache_stat_unchecked(&fscache_n_acquires);
+
+ /* if there's no parent cookie, then we don't create one here either */
+ if (!parent) {
+- fscache_stat(&fscache_n_acquires_null);
++ fscache_stat_unchecked(&fscache_n_acquires_null);
+ _leave(" [no parent]");
+ return NULL;
+ }
+@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
+ /* allocate and initialise a cookie */
+ cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+ if (!cookie) {
+- fscache_stat(&fscache_n_acquires_oom);
++ fscache_stat_unchecked(&fscache_n_acquires_oom);
+ _leave(" [ENOMEM]");
+ return NULL;
+ }
+@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
+
+ switch (cookie->def->type) {
+ case FSCACHE_COOKIE_TYPE_INDEX:
+- fscache_stat(&fscache_n_cookie_index);
++ fscache_stat_unchecked(&fscache_n_cookie_index);
+ break;
+ case FSCACHE_COOKIE_TYPE_DATAFILE:
+- fscache_stat(&fscache_n_cookie_data);
++ fscache_stat_unchecked(&fscache_n_cookie_data);
+ break;
+ default:
+- fscache_stat(&fscache_n_cookie_special);
++ fscache_stat_unchecked(&fscache_n_cookie_special);
+ break;
+ }
+
+@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
+ if (fscache_acquire_non_index_cookie(cookie) < 0) {
+ atomic_dec(&parent->n_children);
+ __fscache_cookie_put(cookie);
+- fscache_stat(&fscache_n_acquires_nobufs);
++ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
+ _leave(" = NULL");
+ return NULL;
+ }
+ }
+
+- fscache_stat(&fscache_n_acquires_ok);
++ fscache_stat_unchecked(&fscache_n_acquires_ok);
+ _leave(" = %p", cookie);
+ return cookie;
+ }
+@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
+ cache = fscache_select_cache_for_object(cookie->parent);
+ if (!cache) {
+ up_read(&fscache_addremove_sem);
+- fscache_stat(&fscache_n_acquires_no_cache);
++ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
+ _leave(" = -ENOMEDIUM [no cache]");
+ return -ENOMEDIUM;
+ }
+@@ -256,14 +256,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
+ object = cache->ops->alloc_object(cache, cookie);
+ fscache_stat_d(&fscache_n_cop_alloc_object);
+ if (IS_ERR(object)) {
+- fscache_stat(&fscache_n_object_no_alloc);
++ fscache_stat_unchecked(&fscache_n_object_no_alloc);
+ ret = PTR_ERR(object);
+ goto error;
+ }
+
+- fscache_stat(&fscache_n_object_alloc);
++ fscache_stat_unchecked(&fscache_n_object_alloc);
+
+- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
++ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
+
+ _debug("ALLOC OBJ%x: %s {%lx}",
+ object->debug_id, cookie->def->name, object->events);
+@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
+ struct fscache_object *object;
+ struct hlist_node *_p;
+
+- fscache_stat(&fscache_n_updates);
++ fscache_stat_unchecked(&fscache_n_updates);
+
+ if (!cookie) {
+- fscache_stat(&fscache_n_updates_null);
++ fscache_stat_unchecked(&fscache_n_updates_null);
+ _leave(" [no cookie]");
+ return;
+ }
+@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+ struct fscache_object *object;
+ unsigned long event;
+
+- fscache_stat(&fscache_n_relinquishes);
++ fscache_stat_unchecked(&fscache_n_relinquishes);
+ if (retire)
+- fscache_stat(&fscache_n_relinquishes_retire);
++ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
+
+ if (!cookie) {
+- fscache_stat(&fscache_n_relinquishes_null);
++ fscache_stat_unchecked(&fscache_n_relinquishes_null);
+ _leave(" [no cookie]");
+ return;
+ }
+@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+
+ /* wait for the cookie to finish being instantiated (or to fail) */
+ if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
+- fscache_stat(&fscache_n_relinquishes_waitcrt);
++ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ }
+diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
+index f6aad48..88dcf26 100644
+--- a/fs/fscache/internal.h
++++ b/fs/fscache/internal.h
+@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
+ extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+ extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+
+-extern atomic_t fscache_n_op_pend;
+-extern atomic_t fscache_n_op_run;
+-extern atomic_t fscache_n_op_enqueue;
+-extern atomic_t fscache_n_op_deferred_release;
+-extern atomic_t fscache_n_op_release;
+-extern atomic_t fscache_n_op_gc;
+-extern atomic_t fscache_n_op_cancelled;
+-extern atomic_t fscache_n_op_rejected;
++extern atomic_unchecked_t fscache_n_op_pend;
++extern atomic_unchecked_t fscache_n_op_run;
++extern atomic_unchecked_t fscache_n_op_enqueue;
++extern atomic_unchecked_t fscache_n_op_deferred_release;
++extern atomic_unchecked_t fscache_n_op_release;
++extern atomic_unchecked_t fscache_n_op_gc;
++extern atomic_unchecked_t fscache_n_op_cancelled;
++extern atomic_unchecked_t fscache_n_op_rejected;
+
+-extern atomic_t fscache_n_attr_changed;
+-extern atomic_t fscache_n_attr_changed_ok;
+-extern atomic_t fscache_n_attr_changed_nobufs;
+-extern atomic_t fscache_n_attr_changed_nomem;
+-extern atomic_t fscache_n_attr_changed_calls;
++extern atomic_unchecked_t fscache_n_attr_changed;
++extern atomic_unchecked_t fscache_n_attr_changed_ok;
++extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
++extern atomic_unchecked_t fscache_n_attr_changed_nomem;
++extern atomic_unchecked_t fscache_n_attr_changed_calls;
+
+-extern atomic_t fscache_n_allocs;
+-extern atomic_t fscache_n_allocs_ok;
+-extern atomic_t fscache_n_allocs_wait;
+-extern atomic_t fscache_n_allocs_nobufs;
+-extern atomic_t fscache_n_allocs_intr;
+-extern atomic_t fscache_n_allocs_object_dead;
+-extern atomic_t fscache_n_alloc_ops;
+-extern atomic_t fscache_n_alloc_op_waits;
++extern atomic_unchecked_t fscache_n_allocs;
++extern atomic_unchecked_t fscache_n_allocs_ok;
++extern atomic_unchecked_t fscache_n_allocs_wait;
++extern atomic_unchecked_t fscache_n_allocs_nobufs;
++extern atomic_unchecked_t fscache_n_allocs_intr;
++extern atomic_unchecked_t fscache_n_allocs_object_dead;
++extern atomic_unchecked_t fscache_n_alloc_ops;
++extern atomic_unchecked_t fscache_n_alloc_op_waits;
+
+-extern atomic_t fscache_n_retrievals;
+-extern atomic_t fscache_n_retrievals_ok;
+-extern atomic_t fscache_n_retrievals_wait;
+-extern atomic_t fscache_n_retrievals_nodata;
+-extern atomic_t fscache_n_retrievals_nobufs;
+-extern atomic_t fscache_n_retrievals_intr;
+-extern atomic_t fscache_n_retrievals_nomem;
+-extern atomic_t fscache_n_retrievals_object_dead;
+-extern atomic_t fscache_n_retrieval_ops;
+-extern atomic_t fscache_n_retrieval_op_waits;
++extern atomic_unchecked_t fscache_n_retrievals;
++extern atomic_unchecked_t fscache_n_retrievals_ok;
++extern atomic_unchecked_t fscache_n_retrievals_wait;
++extern atomic_unchecked_t fscache_n_retrievals_nodata;
++extern atomic_unchecked_t fscache_n_retrievals_nobufs;
++extern atomic_unchecked_t fscache_n_retrievals_intr;
++extern atomic_unchecked_t fscache_n_retrievals_nomem;
++extern atomic_unchecked_t fscache_n_retrievals_object_dead;
++extern atomic_unchecked_t fscache_n_retrieval_ops;
++extern atomic_unchecked_t fscache_n_retrieval_op_waits;
+
+-extern atomic_t fscache_n_stores;
+-extern atomic_t fscache_n_stores_ok;
+-extern atomic_t fscache_n_stores_again;
+-extern atomic_t fscache_n_stores_nobufs;
+-extern atomic_t fscache_n_stores_oom;
+-extern atomic_t fscache_n_store_ops;
+-extern atomic_t fscache_n_store_calls;
+-extern atomic_t fscache_n_store_pages;
+-extern atomic_t fscache_n_store_radix_deletes;
+-extern atomic_t fscache_n_store_pages_over_limit;
++extern atomic_unchecked_t fscache_n_stores;
++extern atomic_unchecked_t fscache_n_stores_ok;
++extern atomic_unchecked_t fscache_n_stores_again;
++extern atomic_unchecked_t fscache_n_stores_nobufs;
++extern atomic_unchecked_t fscache_n_stores_oom;
++extern atomic_unchecked_t fscache_n_store_ops;
++extern atomic_unchecked_t fscache_n_store_calls;
++extern atomic_unchecked_t fscache_n_store_pages;
++extern atomic_unchecked_t fscache_n_store_radix_deletes;
++extern atomic_unchecked_t fscache_n_store_pages_over_limit;
+
+-extern atomic_t fscache_n_store_vmscan_not_storing;
+-extern atomic_t fscache_n_store_vmscan_gone;
+-extern atomic_t fscache_n_store_vmscan_busy;
+-extern atomic_t fscache_n_store_vmscan_cancelled;
++extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++extern atomic_unchecked_t fscache_n_store_vmscan_gone;
++extern atomic_unchecked_t fscache_n_store_vmscan_busy;
++extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
+
+-extern atomic_t fscache_n_marks;
+-extern atomic_t fscache_n_uncaches;
++extern atomic_unchecked_t fscache_n_marks;
++extern atomic_unchecked_t fscache_n_uncaches;
+
+-extern atomic_t fscache_n_acquires;
+-extern atomic_t fscache_n_acquires_null;
+-extern atomic_t fscache_n_acquires_no_cache;
+-extern atomic_t fscache_n_acquires_ok;
+-extern atomic_t fscache_n_acquires_nobufs;
+-extern atomic_t fscache_n_acquires_oom;
++extern atomic_unchecked_t fscache_n_acquires;
++extern atomic_unchecked_t fscache_n_acquires_null;
++extern atomic_unchecked_t fscache_n_acquires_no_cache;
++extern atomic_unchecked_t fscache_n_acquires_ok;
++extern atomic_unchecked_t fscache_n_acquires_nobufs;
++extern atomic_unchecked_t fscache_n_acquires_oom;
+
+-extern atomic_t fscache_n_updates;
+-extern atomic_t fscache_n_updates_null;
+-extern atomic_t fscache_n_updates_run;
++extern atomic_unchecked_t fscache_n_updates;
++extern atomic_unchecked_t fscache_n_updates_null;
++extern atomic_unchecked_t fscache_n_updates_run;
+
+-extern atomic_t fscache_n_relinquishes;
+-extern atomic_t fscache_n_relinquishes_null;
+-extern atomic_t fscache_n_relinquishes_waitcrt;
+-extern atomic_t fscache_n_relinquishes_retire;
++extern atomic_unchecked_t fscache_n_relinquishes;
++extern atomic_unchecked_t fscache_n_relinquishes_null;
++extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++extern atomic_unchecked_t fscache_n_relinquishes_retire;
+
+-extern atomic_t fscache_n_cookie_index;
+-extern atomic_t fscache_n_cookie_data;
+-extern atomic_t fscache_n_cookie_special;
++extern atomic_unchecked_t fscache_n_cookie_index;
++extern atomic_unchecked_t fscache_n_cookie_data;
++extern atomic_unchecked_t fscache_n_cookie_special;
+
+-extern atomic_t fscache_n_object_alloc;
+-extern atomic_t fscache_n_object_no_alloc;
+-extern atomic_t fscache_n_object_lookups;
+-extern atomic_t fscache_n_object_lookups_negative;
+-extern atomic_t fscache_n_object_lookups_positive;
+-extern atomic_t fscache_n_object_lookups_timed_out;
+-extern atomic_t fscache_n_object_created;
+-extern atomic_t fscache_n_object_avail;
+-extern atomic_t fscache_n_object_dead;
++extern atomic_unchecked_t fscache_n_object_alloc;
++extern atomic_unchecked_t fscache_n_object_no_alloc;
++extern atomic_unchecked_t fscache_n_object_lookups;
++extern atomic_unchecked_t fscache_n_object_lookups_negative;
++extern atomic_unchecked_t fscache_n_object_lookups_positive;
++extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
++extern atomic_unchecked_t fscache_n_object_created;
++extern atomic_unchecked_t fscache_n_object_avail;
++extern atomic_unchecked_t fscache_n_object_dead;
+
+-extern atomic_t fscache_n_checkaux_none;
+-extern atomic_t fscache_n_checkaux_okay;
+-extern atomic_t fscache_n_checkaux_update;
+-extern atomic_t fscache_n_checkaux_obsolete;
++extern atomic_unchecked_t fscache_n_checkaux_none;
++extern atomic_unchecked_t fscache_n_checkaux_okay;
++extern atomic_unchecked_t fscache_n_checkaux_update;
++extern atomic_unchecked_t fscache_n_checkaux_obsolete;
+
+ extern atomic_t fscache_n_cop_alloc_object;
+ extern atomic_t fscache_n_cop_lookup_object;
+@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
+ atomic_inc(stat);
+ }
+
++static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
++{
++ atomic_inc_unchecked(stat);
++}
++
+ static inline void fscache_stat_d(atomic_t *stat)
+ {
+ atomic_dec(stat);
+@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
+
+ #define __fscache_stat(stat) (NULL)
+ #define fscache_stat(stat) do {} while (0)
++#define fscache_stat_unchecked(stat) do {} while (0)
+ #define fscache_stat_d(stat) do {} while (0)
+ #endif
+
+diff --git a/fs/fscache/object.c b/fs/fscache/object.c
+index b6b897c..0ffff9c 100644
+--- a/fs/fscache/object.c
++++ b/fs/fscache/object.c
+@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+ /* update the object metadata on disk */
+ case FSCACHE_OBJECT_UPDATING:
+ clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
+- fscache_stat(&fscache_n_updates_run);
++ fscache_stat_unchecked(&fscache_n_updates_run);
+ fscache_stat(&fscache_n_cop_update_object);
+ object->cache->ops->update_object(object);
+ fscache_stat_d(&fscache_n_cop_update_object);
+@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+- fscache_stat(&fscache_n_object_dead);
++ fscache_stat_unchecked(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* handle the parent cache of this object being withdrawn from
+@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+- fscache_stat(&fscache_n_object_dead);
++ fscache_stat_unchecked(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* complain about the object being woken up once it is
+@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
+ parent->cookie->def->name, cookie->def->name,
+ object->cache->tag->name);
+
+- fscache_stat(&fscache_n_object_lookups);
++ fscache_stat_unchecked(&fscache_n_object_lookups);
+ fscache_stat(&fscache_n_cop_lookup_object);
+ ret = object->cache->ops->lookup_object(object);
+ fscache_stat_d(&fscache_n_cop_lookup_object);
+@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
+ if (ret == -ETIMEDOUT) {
+ /* probably stuck behind another object, so move this one to
+ * the back of the queue */
+- fscache_stat(&fscache_n_object_lookups_timed_out);
++ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ }
+
+@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
+
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+- fscache_stat(&fscache_n_object_lookups_negative);
++ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
+
+ /* transit here to allow write requests to begin stacking up
+ * and read requests to begin returning ENODATA */
+@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
+ * result, in which case there may be data available */
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+- fscache_stat(&fscache_n_object_lookups_positive);
++ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
+
+ clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ } else {
+ ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
+- fscache_stat(&fscache_n_object_created);
++ fscache_stat_unchecked(&fscache_n_object_created);
+
+ object->state = FSCACHE_OBJECT_AVAILABLE;
+ spin_unlock(&object->lock);
+@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
+ fscache_enqueue_dependents(object);
+
+ fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
+- fscache_stat(&fscache_n_object_avail);
++ fscache_stat_unchecked(&fscache_n_object_avail);
+
+ _leave("");
+ }
+@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+ enum fscache_checkaux result;
+
+ if (!object->cookie->def->check_aux) {
+- fscache_stat(&fscache_n_checkaux_none);
++ fscache_stat_unchecked(&fscache_n_checkaux_none);
+ return FSCACHE_CHECKAUX_OKAY;
+ }
+
+@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+ switch (result) {
+ /* entry okay as is */
+ case FSCACHE_CHECKAUX_OKAY:
+- fscache_stat(&fscache_n_checkaux_okay);
++ fscache_stat_unchecked(&fscache_n_checkaux_okay);
+ break;
+
+ /* entry requires update */
+ case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+- fscache_stat(&fscache_n_checkaux_update);
++ fscache_stat_unchecked(&fscache_n_checkaux_update);
+ break;
+
+ /* entry requires deletion */
+ case FSCACHE_CHECKAUX_OBSOLETE:
+- fscache_stat(&fscache_n_checkaux_obsolete);
++ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
+ break;
+
+ default:
+diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
+index 30afdfa..2256596 100644
+--- a/fs/fscache/operation.c
++++ b/fs/fscache/operation.c
+@@ -17,7 +17,7 @@
+ #include <linux/slab.h>
+ #include "internal.h"
+
+-atomic_t fscache_op_debug_id;
++atomic_unchecked_t fscache_op_debug_id;
+ EXPORT_SYMBOL(fscache_op_debug_id);
+
+ /**
+@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
+ ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
+- fscache_stat(&fscache_n_op_enqueue);
++ fscache_stat_unchecked(&fscache_n_op_enqueue);
+ switch (op->flags & FSCACHE_OP_TYPE) {
+ case FSCACHE_OP_ASYNC:
+ _debug("queue async");
+@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ if (op->processor)
+ fscache_enqueue_operation(op);
+- fscache_stat(&fscache_n_op_run);
++ fscache_stat_unchecked(&fscache_n_op_run);
+ }
+
+ /*
+@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+ if (object->n_ops > 1) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_in_progress, ==, 0);
+@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
+ object->n_exclusive++; /* reads and writes must wait */
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ ret = 0;
+ } else {
+ /* not allowed to submit ops in any other state */
+@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
+ if (object->n_exclusive > 0) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_exclusive, ==, 0);
+@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
+ object->n_ops++;
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+- fscache_stat(&fscache_n_op_pend);
++ fscache_stat_unchecked(&fscache_n_op_pend);
+ ret = 0;
+ } else if (object->state == FSCACHE_OBJECT_DYING ||
+ object->state == FSCACHE_OBJECT_LC_DYING ||
+ object->state == FSCACHE_OBJECT_WITHDRAWING) {
+- fscache_stat(&fscache_n_op_rejected);
++ fscache_stat_unchecked(&fscache_n_op_rejected);
+ ret = -ENOBUFS;
+ } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+ fscache_report_unexpected_submission(object, op, ostate);
+@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
+
+ ret = -EBUSY;
+ if (!list_empty(&op->pend_link)) {
+- fscache_stat(&fscache_n_op_cancelled);
++ fscache_stat_unchecked(&fscache_n_op_cancelled);
+ list_del_init(&op->pend_link);
+ object->n_ops--;
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
+ if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
+ BUG();
+
+- fscache_stat(&fscache_n_op_release);
++ fscache_stat_unchecked(&fscache_n_op_release);
+
+ if (op->release) {
+ op->release(op);
+@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
+ * lock, and defer it otherwise */
+ if (!spin_trylock(&object->lock)) {
+ _debug("defer put");
+- fscache_stat(&fscache_n_op_deferred_release);
++ fscache_stat_unchecked(&fscache_n_op_deferred_release);
+
+ cache = object->cache;
+ spin_lock(&cache->op_gc_list_lock);
+@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
+
+ _debug("GC DEFERRED REL OBJ%x OP%x",
+ object->debug_id, op->debug_id);
+- fscache_stat(&fscache_n_op_gc);
++ fscache_stat_unchecked(&fscache_n_op_gc);
+
+ ASSERTCMP(atomic_read(&op->usage), ==, 0);
+
+diff --git a/fs/fscache/page.c b/fs/fscache/page.c
+index 3f7a59b..cf196cc 100644
+--- a/fs/fscache/page.c
++++ b/fs/fscache/page.c
+@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
+ val = radix_tree_lookup(&cookie->stores, page->index);
+ if (!val) {
+ rcu_read_unlock();
+- fscache_stat(&fscache_n_store_vmscan_not_storing);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
+ __fscache_uncache_page(cookie, page);
+ return true;
+ }
+@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
+ spin_unlock(&cookie->stores_lock);
+
+ if (xpage) {
+- fscache_stat(&fscache_n_store_vmscan_cancelled);
+- fscache_stat(&fscache_n_store_radix_deletes);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+ ASSERTCMP(xpage, ==, page);
+ } else {
+- fscache_stat(&fscache_n_store_vmscan_gone);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
+ }
+
+ wake_up_bit(&cookie->flags, 0);
+@@ -107,7 +107,7 @@ page_busy:
+ /* we might want to wait here, but that could deadlock the allocator as
+ * the work threads writing to the cache may all end up sleeping
+ * on memory allocation */
+- fscache_stat(&fscache_n_store_vmscan_busy);
++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
+ return false;
+ }
+ EXPORT_SYMBOL(__fscache_maybe_release_page);
+@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
+ FSCACHE_COOKIE_STORING_TAG);
+ if (!radix_tree_tag_get(&cookie->stores, page->index,
+ FSCACHE_COOKIE_PENDING_TAG)) {
+- fscache_stat(&fscache_n_store_radix_deletes);
++ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
+ xpage = radix_tree_delete(&cookie->stores, page->index);
+ }
+ spin_unlock(&cookie->stores_lock);
+@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
+
+ _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
+
+- fscache_stat(&fscache_n_attr_changed_calls);
++ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
+
+ if (fscache_object_is_active(object)) {
+ fscache_stat(&fscache_n_cop_attr_changed);
+@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+
+- fscache_stat(&fscache_n_attr_changed);
++ fscache_stat_unchecked(&fscache_n_attr_changed);
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op) {
+- fscache_stat(&fscache_n_attr_changed_nomem);
++ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+ if (fscache_submit_exclusive_op(object, op) < 0)
+ goto nobufs;
+ spin_unlock(&cookie->lock);
+- fscache_stat(&fscache_n_attr_changed_ok);
++ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
+ fscache_put_operation(op);
+ _leave(" = 0");
+ return 0;
+@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
+ nobufs:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+- fscache_stat(&fscache_n_attr_changed_nobufs);
++ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
+ _leave(" = %d", -ENOBUFS);
+ return -ENOBUFS;
+ }
+@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
+ /* allocate a retrieval operation and attempt to submit it */
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op) {
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ return NULL;
+ }
+
+@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
+ return 0;
+ }
+
+- fscache_stat(&fscache_n_retrievals_wait);
++ fscache_stat_unchecked(&fscache_n_retrievals_wait);
+
+ jif = jiffies;
+ if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) != 0) {
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ _leave(" = -ERESTARTSYS");
+ return -ERESTARTSYS;
+ }
+@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
+ */
+ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+ struct fscache_retrieval *op,
+- atomic_t *stat_op_waits,
+- atomic_t *stat_object_dead)
++ atomic_unchecked_t *stat_op_waits,
++ atomic_unchecked_t *stat_object_dead)
+ {
+ int ret;
+
+@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+ goto check_if_dead;
+
+ _debug(">>> WT");
+- fscache_stat(stat_op_waits);
++ fscache_stat_unchecked(stat_op_waits);
+ if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) < 0) {
+@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+
+ check_if_dead:
+ if (unlikely(fscache_object_is_dead(object))) {
+- fscache_stat(stat_object_dead);
++ fscache_stat_unchecked(stat_object_dead);
+ return -ENOBUFS;
+ }
+ return 0;
+@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+
+ _enter("%p,%p,,,", cookie, page);
+
+- fscache_stat(&fscache_n_retrievals);
++ fscache_stat_unchecked(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_retrieval_ops);
++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+
+ error:
+ if (ret == -ENOMEM)
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+- fscache_stat(&fscache_n_retrievals_nodata);
++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ else
+- fscache_stat(&fscache_n_retrievals_ok);
++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -429,7 +429,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+
+ _enter("%p,,%d,,,", cookie, *nr_pages);
+
+- fscache_stat(&fscache_n_retrievals);
++ fscache_stat_unchecked(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_retrieval_ops);
++ fscache_stat_unchecked(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+
+ error:
+ if (ret == -ENOMEM)
+- fscache_stat(&fscache_n_retrievals_nomem);
++ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_retrievals_intr);
++ fscache_stat_unchecked(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+- fscache_stat(&fscache_n_retrievals_nodata);
++ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ else
+- fscache_stat(&fscache_n_retrievals_ok);
++ fscache_stat_unchecked(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -545,7 +545,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_retrievals_nobufs);
++ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+
+ _enter("%p,%p,,,", cookie, page);
+
+- fscache_stat(&fscache_n_allocs);
++ fscache_stat_unchecked(&fscache_n_allocs);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+- fscache_stat(&fscache_n_alloc_ops);
++ fscache_stat_unchecked(&fscache_n_alloc_ops);
+
+ ret = fscache_wait_for_retrieval_activation(
+ object, op,
+@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
+
+ error:
+ if (ret == -ERESTARTSYS)
+- fscache_stat(&fscache_n_allocs_intr);
++ fscache_stat_unchecked(&fscache_n_allocs_intr);
+ else if (ret < 0)
+- fscache_stat(&fscache_n_allocs_nobufs);
++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+ else
+- fscache_stat(&fscache_n_allocs_ok);
++ fscache_stat_unchecked(&fscache_n_allocs_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+@@ -625,7 +625,7 @@ nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ nobufs:
+- fscache_stat(&fscache_n_allocs_nobufs);
++ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+
+ spin_lock(&cookie->stores_lock);
+
+- fscache_stat(&fscache_n_store_calls);
++ fscache_stat_unchecked(&fscache_n_store_calls);
+
+ /* find a page to store */
+ page = NULL;
+@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+ page = results[0];
+ _debug("gang %d [%lx]", n, page->index);
+ if (page->index > op->store_limit) {
+- fscache_stat(&fscache_n_store_pages_over_limit);
++ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
+ goto superseded;
+ }
+
+@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+
+- fscache_stat(&fscache_n_store_pages);
++ fscache_stat_unchecked(&fscache_n_store_pages);
+ fscache_stat(&fscache_n_cop_write_page);
+ ret = object->cache->ops->write_page(op, page);
+ fscache_stat_d(&fscache_n_cop_write_page);
+@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERT(PageFsCache(page));
+
+- fscache_stat(&fscache_n_stores);
++ fscache_stat_unchecked(&fscache_n_stores);
+
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op)
+@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+
+- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
++ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+ op->store_limit = object->store_limit;
+
+ if (fscache_submit_op(object, &op->op) < 0)
+@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+- fscache_stat(&fscache_n_store_ops);
+- fscache_stat(&fscache_n_stores_ok);
++ fscache_stat_unchecked(&fscache_n_store_ops);
++ fscache_stat_unchecked(&fscache_n_stores_ok);
+
+ /* the work queue now carries its own ref on the object */
+ fscache_put_operation(&op->op);
+@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
+ return 0;
+
+ already_queued:
+- fscache_stat(&fscache_n_stores_again);
++ fscache_stat_unchecked(&fscache_n_stores_again);
+ already_pending:
+ spin_unlock(&cookie->stores_lock);
+ spin_unlock(&object->lock);
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+- fscache_stat(&fscache_n_stores_ok);
++ fscache_stat_unchecked(&fscache_n_stores_ok);
+ _leave(" = 0");
+ return 0;
+
+@@ -851,14 +851,14 @@ nobufs:
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+- fscache_stat(&fscache_n_stores_nobufs);
++ fscache_stat_unchecked(&fscache_n_stores_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+
+ nomem_free:
+ kfree(op);
+ nomem:
+- fscache_stat(&fscache_n_stores_oom);
++ fscache_stat_unchecked(&fscache_n_stores_oom);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(page, !=, NULL);
+
+- fscache_stat(&fscache_n_uncaches);
++ fscache_stat_unchecked(&fscache_n_uncaches);
+
+ /* cache withdrawal may beat us to it */
+ if (!PageFsCache(page))
+@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
+ unsigned long loop;
+
+ #ifdef CONFIG_FSCACHE_STATS
+- atomic_add(pagevec->nr, &fscache_n_marks);
++ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
+ #endif
+
+ for (loop = 0; loop < pagevec->nr; loop++) {
+diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
+index 73c0bd7..adb2f79 100644
+--- a/fs/fscache/stats.c
++++ b/fs/fscache/stats.c
+@@ -18,95 +18,95 @@
+ /*
+ * operation counters
+ */
+-atomic_t fscache_n_op_pend;
+-atomic_t fscache_n_op_run;
+-atomic_t fscache_n_op_enqueue;
+-atomic_t fscache_n_op_requeue;
+-atomic_t fscache_n_op_deferred_release;
+-atomic_t fscache_n_op_release;
+-atomic_t fscache_n_op_gc;
+-atomic_t fscache_n_op_cancelled;
+-atomic_t fscache_n_op_rejected;
++atomic_unchecked_t fscache_n_op_pend;
++atomic_unchecked_t fscache_n_op_run;
++atomic_unchecked_t fscache_n_op_enqueue;
++atomic_unchecked_t fscache_n_op_requeue;
++atomic_unchecked_t fscache_n_op_deferred_release;
++atomic_unchecked_t fscache_n_op_release;
++atomic_unchecked_t fscache_n_op_gc;
++atomic_unchecked_t fscache_n_op_cancelled;
++atomic_unchecked_t fscache_n_op_rejected;
+
+-atomic_t fscache_n_attr_changed;
+-atomic_t fscache_n_attr_changed_ok;
+-atomic_t fscache_n_attr_changed_nobufs;
+-atomic_t fscache_n_attr_changed_nomem;
+-atomic_t fscache_n_attr_changed_calls;
++atomic_unchecked_t fscache_n_attr_changed;
++atomic_unchecked_t fscache_n_attr_changed_ok;
++atomic_unchecked_t fscache_n_attr_changed_nobufs;
++atomic_unchecked_t fscache_n_attr_changed_nomem;
++atomic_unchecked_t fscache_n_attr_changed_calls;
+
+-atomic_t fscache_n_allocs;
+-atomic_t fscache_n_allocs_ok;
+-atomic_t fscache_n_allocs_wait;
+-atomic_t fscache_n_allocs_nobufs;
+-atomic_t fscache_n_allocs_intr;
+-atomic_t fscache_n_allocs_object_dead;
+-atomic_t fscache_n_alloc_ops;
+-atomic_t fscache_n_alloc_op_waits;
++atomic_unchecked_t fscache_n_allocs;
++atomic_unchecked_t fscache_n_allocs_ok;
++atomic_unchecked_t fscache_n_allocs_wait;
++atomic_unchecked_t fscache_n_allocs_nobufs;
++atomic_unchecked_t fscache_n_allocs_intr;
++atomic_unchecked_t fscache_n_allocs_object_dead;
++atomic_unchecked_t fscache_n_alloc_ops;
++atomic_unchecked_t fscache_n_alloc_op_waits;
+
+-atomic_t fscache_n_retrievals;
+-atomic_t fscache_n_retrievals_ok;
+-atomic_t fscache_n_retrievals_wait;
+-atomic_t fscache_n_retrievals_nodata;
+-atomic_t fscache_n_retrievals_nobufs;
+-atomic_t fscache_n_retrievals_intr;
+-atomic_t fscache_n_retrievals_nomem;
+-atomic_t fscache_n_retrievals_object_dead;
+-atomic_t fscache_n_retrieval_ops;
+-atomic_t fscache_n_retrieval_op_waits;
++atomic_unchecked_t fscache_n_retrievals;
++atomic_unchecked_t fscache_n_retrievals_ok;
++atomic_unchecked_t fscache_n_retrievals_wait;
++atomic_unchecked_t fscache_n_retrievals_nodata;
++atomic_unchecked_t fscache_n_retrievals_nobufs;
++atomic_unchecked_t fscache_n_retrievals_intr;
++atomic_unchecked_t fscache_n_retrievals_nomem;
++atomic_unchecked_t fscache_n_retrievals_object_dead;
++atomic_unchecked_t fscache_n_retrieval_ops;
++atomic_unchecked_t fscache_n_retrieval_op_waits;
+
+-atomic_t fscache_n_stores;
+-atomic_t fscache_n_stores_ok;
+-atomic_t fscache_n_stores_again;
+-atomic_t fscache_n_stores_nobufs;
+-atomic_t fscache_n_stores_oom;
+-atomic_t fscache_n_store_ops;
+-atomic_t fscache_n_store_calls;
+-atomic_t fscache_n_store_pages;
+-atomic_t fscache_n_store_radix_deletes;
+-atomic_t fscache_n_store_pages_over_limit;
++atomic_unchecked_t fscache_n_stores;
++atomic_unchecked_t fscache_n_stores_ok;
++atomic_unchecked_t fscache_n_stores_again;
++atomic_unchecked_t fscache_n_stores_nobufs;
++atomic_unchecked_t fscache_n_stores_oom;
++atomic_unchecked_t fscache_n_store_ops;
++atomic_unchecked_t fscache_n_store_calls;
++atomic_unchecked_t fscache_n_store_pages;
++atomic_unchecked_t fscache_n_store_radix_deletes;
++atomic_unchecked_t fscache_n_store_pages_over_limit;
+
+-atomic_t fscache_n_store_vmscan_not_storing;
+-atomic_t fscache_n_store_vmscan_gone;
+-atomic_t fscache_n_store_vmscan_busy;
+-atomic_t fscache_n_store_vmscan_cancelled;
++atomic_unchecked_t fscache_n_store_vmscan_not_storing;
++atomic_unchecked_t fscache_n_store_vmscan_gone;
++atomic_unchecked_t fscache_n_store_vmscan_busy;
++atomic_unchecked_t fscache_n_store_vmscan_cancelled;
+
+-atomic_t fscache_n_marks;
+-atomic_t fscache_n_uncaches;
++atomic_unchecked_t fscache_n_marks;
++atomic_unchecked_t fscache_n_uncaches;
+
+-atomic_t fscache_n_acquires;
+-atomic_t fscache_n_acquires_null;
+-atomic_t fscache_n_acquires_no_cache;
+-atomic_t fscache_n_acquires_ok;
+-atomic_t fscache_n_acquires_nobufs;
+-atomic_t fscache_n_acquires_oom;
++atomic_unchecked_t fscache_n_acquires;
++atomic_unchecked_t fscache_n_acquires_null;
++atomic_unchecked_t fscache_n_acquires_no_cache;
++atomic_unchecked_t fscache_n_acquires_ok;
++atomic_unchecked_t fscache_n_acquires_nobufs;
++atomic_unchecked_t fscache_n_acquires_oom;
+
+-atomic_t fscache_n_updates;
+-atomic_t fscache_n_updates_null;
+-atomic_t fscache_n_updates_run;
++atomic_unchecked_t fscache_n_updates;
++atomic_unchecked_t fscache_n_updates_null;
++atomic_unchecked_t fscache_n_updates_run;
+
+-atomic_t fscache_n_relinquishes;
+-atomic_t fscache_n_relinquishes_null;
+-atomic_t fscache_n_relinquishes_waitcrt;
+-atomic_t fscache_n_relinquishes_retire;
++atomic_unchecked_t fscache_n_relinquishes;
++atomic_unchecked_t fscache_n_relinquishes_null;
++atomic_unchecked_t fscache_n_relinquishes_waitcrt;
++atomic_unchecked_t fscache_n_relinquishes_retire;
+
+-atomic_t fscache_n_cookie_index;
+-atomic_t fscache_n_cookie_data;
+-atomic_t fscache_n_cookie_special;
++atomic_unchecked_t fscache_n_cookie_index;
++atomic_unchecked_t fscache_n_cookie_data;
++atomic_unchecked_t fscache_n_cookie_special;
+
+-atomic_t fscache_n_object_alloc;
+-atomic_t fscache_n_object_no_alloc;
+-atomic_t fscache_n_object_lookups;
+-atomic_t fscache_n_object_lookups_negative;
+-atomic_t fscache_n_object_lookups_positive;
+-atomic_t fscache_n_object_lookups_timed_out;
+-atomic_t fscache_n_object_created;
+-atomic_t fscache_n_object_avail;
+-atomic_t fscache_n_object_dead;
++atomic_unchecked_t fscache_n_object_alloc;
++atomic_unchecked_t fscache_n_object_no_alloc;
++atomic_unchecked_t fscache_n_object_lookups;
++atomic_unchecked_t fscache_n_object_lookups_negative;
++atomic_unchecked_t fscache_n_object_lookups_positive;
++atomic_unchecked_t fscache_n_object_lookups_timed_out;
++atomic_unchecked_t fscache_n_object_created;
++atomic_unchecked_t fscache_n_object_avail;
++atomic_unchecked_t fscache_n_object_dead;
+
+-atomic_t fscache_n_checkaux_none;
+-atomic_t fscache_n_checkaux_okay;
+-atomic_t fscache_n_checkaux_update;
+-atomic_t fscache_n_checkaux_obsolete;
++atomic_unchecked_t fscache_n_checkaux_none;
++atomic_unchecked_t fscache_n_checkaux_okay;
++atomic_unchecked_t fscache_n_checkaux_update;
++atomic_unchecked_t fscache_n_checkaux_obsolete;
+
+ atomic_t fscache_n_cop_alloc_object;
+ atomic_t fscache_n_cop_lookup_object;
+@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
+ seq_puts(m, "FS-Cache statistics\n");
+
+ seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
+- atomic_read(&fscache_n_cookie_index),
+- atomic_read(&fscache_n_cookie_data),
+- atomic_read(&fscache_n_cookie_special));
++ atomic_read_unchecked(&fscache_n_cookie_index),
++ atomic_read_unchecked(&fscache_n_cookie_data),
++ atomic_read_unchecked(&fscache_n_cookie_special));
+
+ seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
+- atomic_read(&fscache_n_object_alloc),
+- atomic_read(&fscache_n_object_no_alloc),
+- atomic_read(&fscache_n_object_avail),
+- atomic_read(&fscache_n_object_dead));
++ atomic_read_unchecked(&fscache_n_object_alloc),
++ atomic_read_unchecked(&fscache_n_object_no_alloc),
++ atomic_read_unchecked(&fscache_n_object_avail),
++ atomic_read_unchecked(&fscache_n_object_dead));
+ seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
+- atomic_read(&fscache_n_checkaux_none),
+- atomic_read(&fscache_n_checkaux_okay),
+- atomic_read(&fscache_n_checkaux_update),
+- atomic_read(&fscache_n_checkaux_obsolete));
++ atomic_read_unchecked(&fscache_n_checkaux_none),
++ atomic_read_unchecked(&fscache_n_checkaux_okay),
++ atomic_read_unchecked(&fscache_n_checkaux_update),
++ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
+
+ seq_printf(m, "Pages : mrk=%u unc=%u\n",
+- atomic_read(&fscache_n_marks),
+- atomic_read(&fscache_n_uncaches));
++ atomic_read_unchecked(&fscache_n_marks),
++ atomic_read_unchecked(&fscache_n_uncaches));
+
+ seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
+ " oom=%u\n",
+- atomic_read(&fscache_n_acquires),
+- atomic_read(&fscache_n_acquires_null),
+- atomic_read(&fscache_n_acquires_no_cache),
+- atomic_read(&fscache_n_acquires_ok),
+- atomic_read(&fscache_n_acquires_nobufs),
+- atomic_read(&fscache_n_acquires_oom));
++ atomic_read_unchecked(&fscache_n_acquires),
++ atomic_read_unchecked(&fscache_n_acquires_null),
++ atomic_read_unchecked(&fscache_n_acquires_no_cache),
++ atomic_read_unchecked(&fscache_n_acquires_ok),
++ atomic_read_unchecked(&fscache_n_acquires_nobufs),
++ atomic_read_unchecked(&fscache_n_acquires_oom));
+
+ seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
+- atomic_read(&fscache_n_object_lookups),
+- atomic_read(&fscache_n_object_lookups_negative),
+- atomic_read(&fscache_n_object_lookups_positive),
+- atomic_read(&fscache_n_object_created),
+- atomic_read(&fscache_n_object_lookups_timed_out));
++ atomic_read_unchecked(&fscache_n_object_lookups),
++ atomic_read_unchecked(&fscache_n_object_lookups_negative),
++ atomic_read_unchecked(&fscache_n_object_lookups_positive),
++ atomic_read_unchecked(&fscache_n_object_created),
++ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
+
+ seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+- atomic_read(&fscache_n_updates),
+- atomic_read(&fscache_n_updates_null),
+- atomic_read(&fscache_n_updates_run));
++ atomic_read_unchecked(&fscache_n_updates),
++ atomic_read_unchecked(&fscache_n_updates_null),
++ atomic_read_unchecked(&fscache_n_updates_run));
+
+ seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
+- atomic_read(&fscache_n_relinquishes),
+- atomic_read(&fscache_n_relinquishes_null),
+- atomic_read(&fscache_n_relinquishes_waitcrt),
+- atomic_read(&fscache_n_relinquishes_retire));
++ atomic_read_unchecked(&fscache_n_relinquishes),
++ atomic_read_unchecked(&fscache_n_relinquishes_null),
++ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
++ atomic_read_unchecked(&fscache_n_relinquishes_retire));
+
+ seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
+- atomic_read(&fscache_n_attr_changed),
+- atomic_read(&fscache_n_attr_changed_ok),
+- atomic_read(&fscache_n_attr_changed_nobufs),
+- atomic_read(&fscache_n_attr_changed_nomem),
+- atomic_read(&fscache_n_attr_changed_calls));
++ atomic_read_unchecked(&fscache_n_attr_changed),
++ atomic_read_unchecked(&fscache_n_attr_changed_ok),
++ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
++ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
++ atomic_read_unchecked(&fscache_n_attr_changed_calls));
+
+ seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
+- atomic_read(&fscache_n_allocs),
+- atomic_read(&fscache_n_allocs_ok),
+- atomic_read(&fscache_n_allocs_wait),
+- atomic_read(&fscache_n_allocs_nobufs),
+- atomic_read(&fscache_n_allocs_intr));
++ atomic_read_unchecked(&fscache_n_allocs),
++ atomic_read_unchecked(&fscache_n_allocs_ok),
++ atomic_read_unchecked(&fscache_n_allocs_wait),
++ atomic_read_unchecked(&fscache_n_allocs_nobufs),
++ atomic_read_unchecked(&fscache_n_allocs_intr));
+ seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
+- atomic_read(&fscache_n_alloc_ops),
+- atomic_read(&fscache_n_alloc_op_waits),
+- atomic_read(&fscache_n_allocs_object_dead));
++ atomic_read_unchecked(&fscache_n_alloc_ops),
++ atomic_read_unchecked(&fscache_n_alloc_op_waits),
++ atomic_read_unchecked(&fscache_n_allocs_object_dead));
+
+ seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
+ " int=%u oom=%u\n",
+- atomic_read(&fscache_n_retrievals),
+- atomic_read(&fscache_n_retrievals_ok),
+- atomic_read(&fscache_n_retrievals_wait),
+- atomic_read(&fscache_n_retrievals_nodata),
+- atomic_read(&fscache_n_retrievals_nobufs),
+- atomic_read(&fscache_n_retrievals_intr),
+- atomic_read(&fscache_n_retrievals_nomem));
++ atomic_read_unchecked(&fscache_n_retrievals),
++ atomic_read_unchecked(&fscache_n_retrievals_ok),
++ atomic_read_unchecked(&fscache_n_retrievals_wait),
++ atomic_read_unchecked(&fscache_n_retrievals_nodata),
++ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
++ atomic_read_unchecked(&fscache_n_retrievals_intr),
++ atomic_read_unchecked(&fscache_n_retrievals_nomem));
+ seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
+- atomic_read(&fscache_n_retrieval_ops),
+- atomic_read(&fscache_n_retrieval_op_waits),
+- atomic_read(&fscache_n_retrievals_object_dead));
++ atomic_read_unchecked(&fscache_n_retrieval_ops),
++ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
++ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
+
+ seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
+- atomic_read(&fscache_n_stores),
+- atomic_read(&fscache_n_stores_ok),
+- atomic_read(&fscache_n_stores_again),
+- atomic_read(&fscache_n_stores_nobufs),
+- atomic_read(&fscache_n_stores_oom));
++ atomic_read_unchecked(&fscache_n_stores),
++ atomic_read_unchecked(&fscache_n_stores_ok),
++ atomic_read_unchecked(&fscache_n_stores_again),
++ atomic_read_unchecked(&fscache_n_stores_nobufs),
++ atomic_read_unchecked(&fscache_n_stores_oom));
+ seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+- atomic_read(&fscache_n_store_ops),
+- atomic_read(&fscache_n_store_calls),
+- atomic_read(&fscache_n_store_pages),
+- atomic_read(&fscache_n_store_radix_deletes),
+- atomic_read(&fscache_n_store_pages_over_limit));
++ atomic_read_unchecked(&fscache_n_store_ops),
++ atomic_read_unchecked(&fscache_n_store_calls),
++ atomic_read_unchecked(&fscache_n_store_pages),
++ atomic_read_unchecked(&fscache_n_store_radix_deletes),
++ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
+
+ seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
+- atomic_read(&fscache_n_store_vmscan_not_storing),
+- atomic_read(&fscache_n_store_vmscan_gone),
+- atomic_read(&fscache_n_store_vmscan_busy),
+- atomic_read(&fscache_n_store_vmscan_cancelled));
++ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
++ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
++ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
+
+ seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
+- atomic_read(&fscache_n_op_pend),
+- atomic_read(&fscache_n_op_run),
+- atomic_read(&fscache_n_op_enqueue),
+- atomic_read(&fscache_n_op_cancelled),
+- atomic_read(&fscache_n_op_rejected));
++ atomic_read_unchecked(&fscache_n_op_pend),
++ atomic_read_unchecked(&fscache_n_op_run),
++ atomic_read_unchecked(&fscache_n_op_enqueue),
++ atomic_read_unchecked(&fscache_n_op_cancelled),
++ atomic_read_unchecked(&fscache_n_op_rejected));
+ seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
+- atomic_read(&fscache_n_op_deferred_release),
+- atomic_read(&fscache_n_op_release),
+- atomic_read(&fscache_n_op_gc));
++ atomic_read_unchecked(&fscache_n_op_deferred_release),
++ atomic_read_unchecked(&fscache_n_op_release),
++ atomic_read_unchecked(&fscache_n_op_gc));
+
+ seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+ atomic_read(&fscache_n_cop_alloc_object),
+diff --git a/fs/fuse/control.c b/fs/fuse/control.c
+index 42593c5..0c6d731 100644
+--- a/fs/fuse/control.c
++++ b/fs/fuse/control.c
+@@ -347,6 +347,7 @@ static struct file_system_type fuse_ctl_fs_type = {
+ .mount = fuse_ctl_mount,
+ .kill_sb = fuse_ctl_kill_sb,
+ };
++MODULE_ALIAS_FS("fusectl");
+
+ int __init fuse_ctl_init(void)
+ {
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index 3426521..3b75162 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -587,10 +587,12 @@ static int __init cuse_init(void)
+ INIT_LIST_HEAD(&cuse_conntbl[i]);
+
+ /* inherit and extend fuse_dev_operations */
+- cuse_channel_fops = fuse_dev_operations;
+- cuse_channel_fops.owner = THIS_MODULE;
+- cuse_channel_fops.open = cuse_channel_open;
+- cuse_channel_fops.release = cuse_channel_release;
++ pax_open_kernel();
++ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
++ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
++ *(void **)&cuse_channel_fops.open = cuse_channel_open;
++ *(void **)&cuse_channel_fops.release = cuse_channel_release;
++ pax_close_kernel();
+
+ cuse_class = class_create(THIS_MODULE, "cuse");
+ if (IS_ERR(cuse_class))
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 5c029fb..96e676c 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+ ret = 0;
+ pipe_lock(pipe);
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 06e2f73..e6c5fc8 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1150,7 +1150,7 @@ static char *read_link(struct dentry *dentry)
+ return link;
+ }
+
+-static void free_link(char *link)
++static void free_link(const char *link)
+ {
+ if (!IS_ERR(link))
+ free_page((unsigned long) link);
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 912c250..f0aee59 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1094,6 +1094,7 @@ static struct file_system_type fuse_fs_type = {
+ .mount = fuse_mount,
+ .kill_sb = fuse_kill_sb_anon,
+ };
++MODULE_ALIAS_FS("fuse");
+
+ #ifdef CONFIG_BLOCK
+ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
+@@ -1123,6 +1124,7 @@ static struct file_system_type fuseblk_fs_type = {
+ .kill_sb = fuse_kill_sb_blk,
+ .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
+ };
++MODULE_ALIAS_FS("fuseblk");
+
+ static inline int register_fuseblk(void)
+ {
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index cfd4959..a780959 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1490,7 +1490,7 @@ out:
+
+ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ kfree(s);
+ }
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index cb23c2b..2fa1ea5 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -19,6 +19,7 @@
+ #include <linux/mount.h>
+ #include <linux/gfs2_ondisk.h>
+ #include <linux/quotaops.h>
++#include <linux/module.h>
+
+ #include "gfs2.h"
+ #include "incore.h"
+@@ -1395,6 +1396,7 @@ struct file_system_type gfs2_fs_type = {
+ .kill_sb = gfs2_kill_sb,
+ .owner = THIS_MODULE,
+ };
++MODULE_ALIAS_FS("gfs2");
+
+ struct file_system_type gfs2meta_fs_type = {
+ .name = "gfs2meta",
+@@ -1402,4 +1404,4 @@ struct file_system_type gfs2meta_fs_type = {
+ .mount = gfs2_mount_meta,
+ .owner = THIS_MODULE,
+ };
+-
++MODULE_ALIAS_FS("gfs2meta");
+diff --git a/fs/hfs/super.c b/fs/hfs/super.c
+index 1b55f70..bd6c289 100644
+--- a/fs/hfs/super.c
++++ b/fs/hfs/super.c
+@@ -460,6 +460,7 @@ static struct file_system_type hfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("hfs");
+
+ static void hfs_init_once(void *p)
+ {
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
+index d24a9b6..b398147 100644
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -582,6 +582,7 @@ static struct file_system_type hfsplus_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("hfsplus");
+
+ static void hfsplus_init_once(void *p)
+ {
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 2f72da5..7ee87b1 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -999,6 +999,7 @@ static struct file_system_type hostfs_type = {
+ .kill_sb = hostfs_kill_sb,
+ .fs_flags = 0,
+ };
++MODULE_ALIAS_FS("hostfs");
+
+ static int __init init_hostfs(void)
+ {
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index f760c15..61b7191 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -691,6 +691,7 @@ static struct file_system_type hpfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("hpfs");
+
+ static int __init init_hpfs_fs(void)
+ {
+diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
+index f590b11..414cf4b 100644
+--- a/fs/hppfs/hppfs.c
++++ b/fs/hppfs/hppfs.c
+@@ -758,6 +758,7 @@ static struct file_system_type hppfs_type = {
+ .kill_sb = kill_anon_super,
+ .fs_flags = 0,
+ };
++MODULE_ALIAS_FS("hppfs");
+
+ static int __init init_hppfs(void)
+ {
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 0aa424a..c5563a6 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -134,6 +134,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+ struct hstate *h = hstate_file(file);
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+@@ -146,18 +147,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ start_addr = mm->free_area_cache;
+
+ if (len <= mm->cached_hole_size)
+- start_addr = TASK_UNMAPPED_BASE;
++ start_addr = mm->mmap_base;
+
+ full_search:
+ addr = ALIGN(start_addr, huge_page_size(h));
+@@ -169,15 +173,17 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
++ mm->free_area_cache = addr + len;
+ return addr;
++ }
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
+ }
+@@ -896,8 +902,9 @@ static struct file_system_type hugetlbfs_fs_type = {
+ .mount = hugetlbfs_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("hugetlbfs");
+
+-static struct vfsmount *hugetlbfs_vfsmount;
++struct vfsmount *hugetlbfs_vfsmount;
+
+ static int can_do_hugetlb_shm(void)
+ {
+diff --git a/fs/inode.c b/fs/inode.c
+index e2d3633..e6f3833 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
+
+ #ifdef CONFIG_SMP
+ if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+- static atomic_t shared_last_ino;
+- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
++ static atomic_unchecked_t shared_last_ino;
++ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
+
+ res = next - LAST_INO_BATCH;
+ }
+@@ -855,8 +855,7 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
+ struct file_system_type *type = inode->i_sb->s_type;
+
+ /* Set new key only if filesystem hasn't already changed it */
+- if (!lockdep_match_class(&inode->i_mutex,
+- &type->i_mutex_key)) {
++ if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
+ /*
+ * ensure nobody is actually holding i_mutex
+ */
+@@ -883,6 +882,7 @@ void unlock_new_inode(struct inode *inode)
+ spin_lock(&inode->i_lock);
+ WARN_ON(!(inode->i_state & I_NEW));
+ inode->i_state &= ~I_NEW;
++ smp_mb();
+ wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&inode->i_lock);
+ }
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index a5f25a7..8ac9cc8 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -1539,6 +1539,8 @@ static struct file_system_type iso9660_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("iso9660");
++MODULE_ALIAS("iso9660");
+
+ static int __init init_iso9660_fs(void)
+ {
+@@ -1576,5 +1578,3 @@ static void __exit exit_iso9660_fs(void)
+ module_init(init_iso9660_fs)
+ module_exit(exit_iso9660_fs)
+ MODULE_LICENSE("GPL");
+-/* Actual filesystem name is iso9660, as requested in filesystems.c */
+-MODULE_ALIAS("iso9660");
+diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
+index e513f19..2ab1351 100644
+--- a/fs/jffs2/erase.c
++++ b/fs/jffs2/erase.c
+@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
+ struct jffs2_unknown_node marker = {
+ .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = cpu_to_je32(c->cleanmarker_size)
++ .totlen = cpu_to_je32(c->cleanmarker_size),
++ .hdr_crc = cpu_to_je32(0)
+ };
+
+ jffs2_prealloc_raw_node_refs(c, jeb, 1);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index e7e9744..0de4fd9 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -357,6 +357,7 @@ static struct file_system_type jffs2_fs_type = {
+ .mount = jffs2_mount,
+ .kill_sb = jffs2_kill_sb,
+ };
++MODULE_ALIAS_FS("jffs2");
+
+ static int __init init_jffs2_fs(void)
+ {
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index 464cd76..3a3ed7e 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
+ {
+ .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
+- .totlen = constant_cpu_to_je32(8)
++ .totlen = constant_cpu_to_je32(8),
++ .hdr_crc = constant_cpu_to_je32(0)
+ };
+
+ /*
+diff --git a/fs/jfs/super.c b/fs/jfs/super.c
+index a44eff076..a4bf76a 100644
+--- a/fs/jfs/super.c
++++ b/fs/jfs/super.c
+@@ -780,6 +780,7 @@ static struct file_system_type jfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("jfs");
+
+ static void init_once(void *foo)
+ {
+@@ -802,7 +803,7 @@ static int __init init_jfs_fs(void)
+
+ jfs_inode_cachep =
+ kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
+- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
++ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
+ init_once);
+ if (jfs_inode_cachep == NULL)
+ return -ENOMEM;
+diff --git a/fs/libfs.c b/fs/libfs.c
+index f6d411e..e82a08d 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+ struct dentry *next;
++ char d_name[sizeof(next->d_iname)];
++ const unsigned char *name;
++
+ next = list_entry(p, struct dentry, d_u.d_child);
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!simple_positive(next)) {
+@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+
+ spin_unlock(&next->d_lock);
+ spin_unlock(&dentry->d_lock);
+- if (filldir(dirent, next->d_name.name,
++ name = next->d_name.name;
++ if (name == next->d_iname) {
++ memcpy(d_name, name, next->d_name.len);
++ name = d_name;
++ }
++ if (filldir(dirent, name,
+ next->d_name.len, filp->f_pos,
+ next->d_inode->i_ino,
+ dt_type(next->d_inode)) < 0)
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 8392cb8..80d6193 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
+ /*
+ * Cookie counter for NLM requests
+ */
+-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
++static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
+
+ void nlmclnt_next_cookie(struct nlm_cookie *c)
+ {
+- u32 cookie = atomic_inc_return(&nlm_cookie);
++ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
+
+ memcpy(c->data, &cookie, 4);
+ c->len=4;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 2444780..2544030 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -295,7 +295,7 @@ int lockd_up(void)
+ svc_sock_update_bufs(serv);
+ serv->sv_maxconn = nlm_max_connections;
+
+- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
++ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
+ if (IS_ERR(nlmsvc_task)) {
+ error = PTR_ERR(nlmsvc_task);
+ svc_exit_thread(nlmsvc_rqst);
+diff --git a/fs/locks.c b/fs/locks.c
+index fcc50ab..c3dacf26 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
+ return;
+
+ if (filp->f_op && filp->f_op->flock) {
+- struct file_lock fl = {
++ struct file_lock flock = {
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+- filp->f_op->flock(filp, F_SETLKW, &fl);
+- if (fl.fl_ops && fl.fl_ops->fl_release_private)
+- fl.fl_ops->fl_release_private(&fl);
++ filp->f_op->flock(filp, F_SETLKW, &flock);
++ if (flock.fl_ops && flock.fl_ops->fl_release_private)
++ flock.fl_ops->fl_release_private(&flock);
+ }
+
+ lock_flocks();
+diff --git a/fs/logfs/super.c b/fs/logfs/super.c
+index e795c234..136932a 100644
+--- a/fs/logfs/super.c
++++ b/fs/logfs/super.c
+@@ -609,6 +609,7 @@ static struct file_system_type logfs_fs_type = {
+ .fs_flags = FS_REQUIRES_DEV,
+
+ };
++MODULE_ALIAS_FS("logfs");
+
+ static int __init logfs_init(void)
+ {
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index 4d46a6a..dee1cdf 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -653,6 +653,7 @@ static struct file_system_type minix_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("minix");
+
+ static int __init init_minix_fs(void)
+ {
+diff --git a/fs/namei.c b/fs/namei.c
+index 9680cef..36c9152 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
+ if (ret != -EACCES)
+ return ret;
+
++#ifdef CONFIG_GRKERNSEC
++ /* we'll block if we have to log due to a denied capability use */
++ if (mask & MAY_NOT_BLOCK)
++ return -ECHILD;
++#endif
++
+ if (S_ISDIR(inode->i_mode)) {
+ /* DACs are overridable for directories */
+- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
+- return 0;
+ if (!(mask & MAY_WRITE))
+- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
++ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
++ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
+ return 0;
++ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
++ return 0;
+ return -EACCES;
+ }
+ /*
++ * Searching includes executable on directories, else just read.
++ */
++ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
++ if (mask == MAY_READ)
++ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
++ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
++ return 0;
++
++ /*
+ * Read/write DACs are always overridable.
+ * Executable DACs are overridable when there is
+ * at least one exec bit set.
+@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
+ return 0;
+
+- /*
+- * Searching includes executable on directories, else just read.
+- */
+- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+- if (mask == MAY_READ)
+- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
+- return 0;
+-
+ return -EACCES;
+ }
+
+@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ return error;
+ }
+
++ if (gr_handle_follow_link(dentry->d_parent->d_inode,
++ dentry->d_inode, dentry, nd->path.mnt)) {
++ error = -EACCES;
++ *p = ERR_PTR(error); /* no ->put_link(), please */
++ path_put(&nd->path);
++ return error;
++ }
++
+ nd->last_type = LAST_BIND;
+ *p = dentry->d_inode->i_op->follow_link(dentry, nd);
+ error = PTR_ERR(*p);
+ if (!IS_ERR(*p)) {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ error = 0;
+ if (s)
+ error = __vfs_follow_link(nd, s);
+@@ -1345,6 +1361,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+ if (!res)
+ res = walk_component(nd, path, &nd->last,
+ nd->last_type, LOOKUP_FOLLOW);
++ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
++ res = -EACCES;
+ put_link(nd, &link, cookie);
+ } while (res > 0);
+
+@@ -1617,6 +1635,8 @@ static int path_lookupat(int dfd, const char *name,
+ err = follow_link(&link, nd, &cookie);
+ if (!err)
+ err = lookup_last(nd, &path);
++ if (!err && gr_handle_symlink_owner(&link, nd->inode))
++ err = -EACCES;
+ put_link(nd, &link, cookie);
+ }
+ }
+@@ -1624,6 +1644,13 @@ static int path_lookupat(int dfd, const char *name,
+ if (!err)
+ err = complete_walk(nd);
+
++ if (!err && !(nd->flags & LOOKUP_PARENT)) {
++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++ path_put(&nd->path);
++ err = -ENOENT;
++ }
++ }
++
+ if (!err && nd->flags & LOOKUP_DIRECTORY) {
+ if (!nd->inode->i_op->lookup) {
+ path_put(&nd->path);
+@@ -1655,6 +1682,12 @@ static int do_path_lookup(int dfd, const char *name,
+ if (nd->path.dentry && nd->inode)
+ audit_inode(name, nd->path.dentry);
+ }
++ if (*name != '/' && nd->path.dentry && nd->inode) {
++ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
++ path_put(&nd->path);
++ return -ENOENT;
++ }
++ }
+ }
+ return retval;
+ }
+@@ -1784,7 +1817,13 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+ if (!len)
+ return ERR_PTR(-EACCES);
+
++ if (unlikely(name[0] == '.')) {
++ if (len < 2 || (len == 2 && name[1] == '.'))
++ return ERR_PTR(-EACCES);
++ }
++
+ hash = init_name_hash();
++
+ while (len--) {
+ c = *(const unsigned char *)name++;
+ if (c == '/' || c == '\0')
+@@ -2048,6 +2087,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+ if (flag & O_NOATIME && !inode_owner_or_capable(inode))
+ return -EPERM;
+
++ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
++ return -EPERM;
++ if (gr_handle_rawio(inode))
++ return -EPERM;
++ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
++ return -EACCES;
++
+ return 0;
+ }
+
+@@ -2083,7 +2129,7 @@ static inline int open_to_namei_flags(int flag)
+ /*
+ * Handle the last step of open()
+ */
+-static struct file *do_last(struct nameidata *nd, struct path *path,
++static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
+ const struct open_flags *op, const char *pathname)
+ {
+ struct dentry *dir = nd->path.dentry;
+@@ -2109,16 +2155,32 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ error = complete_walk(nd);
+ if (error)
+ return ERR_PTR(error);
++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++ error = -ENOENT;
++ goto exit;
++ }
+ audit_inode(pathname, nd->path.dentry);
+ if (open_flag & O_CREAT) {
+ error = -EISDIR;
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ goto ok;
+ case LAST_BIND:
+ error = complete_walk(nd);
+ if (error)
+ return ERR_PTR(error);
++ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
++ error = -ENOENT;
++ goto exit;
++ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ audit_inode(pathname, dir);
+ goto ok;
+ }
+@@ -2134,18 +2196,31 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ !symlink_ok);
+ if (error < 0)
+ return ERR_PTR(error);
+- if (error) /* symlink */
++ if (error) /* symlink */ {
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ return NULL;
++ }
+ /* sayonara */
+ error = complete_walk(nd);
+ if (error)
+ return ERR_PTR(error);
++ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
++ error = -ENOENT;
++ goto exit;
++ }
+
+ error = -ENOTDIR;
+ if (nd->flags & LOOKUP_DIRECTORY) {
+ if (!nd->inode->i_op->lookup)
+ goto exit;
+ }
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ audit_inode(pathname, nd->path.dentry);
+ goto ok;
+ }
+@@ -2180,6 +2255,17 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ /* Negative dentry, just create the file */
+ if (!dentry->d_inode) {
+ int mode = op->mode;
++
++ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++
++ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++
+ if (!IS_POSIXACL(dir->d_inode))
+ mode &= ~current_umask();
+ /*
+@@ -2203,6 +2289,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ error = vfs_create(dir->d_inode, dentry, mode, nd);
+ if (error)
+ goto exit_mutex_unlock;
++ else
++ gr_handle_create(path->dentry, path->mnt);
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(nd->path.dentry);
+ nd->path.dentry = dentry;
+@@ -2212,6 +2300,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ /*
+ * It already exists.
+ */
++
++ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
++ error = -ENOENT;
++ goto exit_mutex_unlock;
++ }
++
++ /* only check if O_CREAT is specified, all other checks need to go
++ into may_open */
++ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
++ error = -EACCES;
++ goto exit_mutex_unlock;
++ }
++
+ mutex_unlock(&dir->d_inode->i_mutex);
+ audit_inode(pathname, path->dentry);
+
+@@ -2230,11 +2331,17 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ if (!path->dentry->d_inode)
+ goto exit_dput;
+
+- if (path->dentry->d_inode->i_op->follow_link)
++ if (path->dentry->d_inode->i_op->follow_link) {
++ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
++ error = -EACCES;
++ goto exit;
++ }
+ return NULL;
++ }
+
+ path_to_nameidata(path, nd);
+ nd->inode = path->dentry->d_inode;
++
+ /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
+ error = complete_walk(nd);
+ if (error)
+@@ -2242,6 +2349,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ error = -EISDIR;
+ if (S_ISDIR(nd->inode->i_mode))
+ goto exit;
++
++ if (link && gr_handle_symlink_owner(link, nd->inode)) {
++ error = -EACCES;
++ goto exit;
++ }
++
+ ok:
+ if (!S_ISREG(nd->inode->i_mode))
+ will_truncate = 0;
+@@ -2314,7 +2427,7 @@ static struct file *path_openat(int dfd, const char *pathname,
+ if (unlikely(error))
+ goto out_filp;
+
+- filp = do_last(nd, &path, op, pathname);
++ filp = do_last(nd, &path, NULL, op, pathname);
+ while (unlikely(!filp)) { /* trailing symlink */
+ struct path link = path;
+ void *cookie;
+@@ -2329,8 +2442,9 @@ static struct file *path_openat(int dfd, const char *pathname,
+ error = follow_link(&link, nd, &cookie);
+ if (unlikely(error))
+ filp = ERR_PTR(error);
+- else
+- filp = do_last(nd, &path, op, pathname);
++ else {
++ filp = do_last(nd, &path, &link, op, pathname);
++ }
+ put_link(nd, &link, cookie);
+ }
+ out:
+@@ -2424,6 +2538,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
+ *path = nd.path;
+ return dentry;
+ eexist:
++ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
++ dput(dentry);
++ dentry = ERR_PTR(-ENOENT);
++ goto fail;
++ }
+ dput(dentry);
+ dentry = ERR_PTR(-EEXIST);
+ fail:
+@@ -2446,6 +2565,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
+ }
+ EXPORT_SYMBOL(user_path_create);
+
++static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
++{
++ char *tmp = getname(pathname);
++ struct dentry *res;
++ if (IS_ERR(tmp))
++ return ERR_CAST(tmp);
++ res = kern_path_create(dfd, tmp, path, is_dir);
++ if (IS_ERR(res))
++ putname(tmp);
++ else
++ *to = tmp;
++ return res;
++}
++
+ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ {
+ int error = may_create(dir, dentry);
+@@ -2513,6 +2646,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_dput;
++
++ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
++ error = -EPERM;
++ goto out_drop_write;
++ }
++
++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
++ error = -EACCES;
++ goto out_drop_write;
++ }
++
+ error = security_path_mknod(&path, dentry, mode, dev);
+ if (error)
+ goto out_drop_write;
+@@ -2530,6 +2674,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+ }
+ out_drop_write:
+ mnt_drop_write(path.mnt);
++
++ if (!error)
++ gr_handle_create(dentry, path.mnt);
+ out_dput:
+ dput(dentry);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+@@ -2579,12 +2726,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_dput;
++
++ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
++ error = -EACCES;
++ goto out_drop_write;
++ }
++
+ error = security_path_mkdir(&path, dentry, mode);
+ if (error)
+ goto out_drop_write;
+ error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ out_drop_write:
+ mnt_drop_write(path.mnt);
++
++ if (!error)
++ gr_handle_create(dentry, path.mnt);
+ out_dput:
+ dput(dentry);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+@@ -2664,6 +2820,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+ char * name;
+ struct dentry *dentry;
+ struct nameidata nd;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ error = user_path_parent(dfd, pathname, &nd, &name);
+ if (error)
+@@ -2692,6 +2850,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
+ error = -ENOENT;
+ goto exit3;
+ }
++
++ saved_ino = dentry->d_inode->i_ino;
++ saved_dev = gr_get_dev_from_dentry(dentry);
++
++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit3;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit3;
+@@ -2699,6 +2866,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+ if (error)
+ goto exit4;
+ error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
++ if (!error && (saved_dev || saved_ino))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit4:
+ mnt_drop_write(nd.path.mnt);
+ exit3:
+@@ -2761,6 +2930,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+ struct dentry *dentry;
+ struct nameidata nd;
+ struct inode *inode = NULL;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ error = user_path_parent(dfd, pathname, &nd, &name);
+ if (error)
+@@ -2783,6 +2954,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+ if (!inode)
+ goto slashes;
+ ihold(inode);
++
++ if (inode->i_nlink <= 1) {
++ saved_ino = inode->i_ino;
++ saved_dev = gr_get_dev_from_dentry(dentry);
++ }
++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
++ error = -EACCES;
++ goto exit2;
++ }
++
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit2;
+@@ -2790,6 +2971,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+ if (error)
+ goto exit3;
+ error = vfs_unlink(nd.path.dentry->d_inode, dentry);
++ if (!error && (saved_ino || saved_dev))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit3:
+ mnt_drop_write(nd.path.mnt);
+ exit2:
+@@ -2865,10 +3048,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_dput;
++
++ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
++ error = -EACCES;
++ goto out_drop_write;
++ }
++
+ error = security_path_symlink(&path, dentry, from);
+ if (error)
+ goto out_drop_write;
+ error = vfs_symlink(path.dentry->d_inode, dentry, from);
++ if (!error)
++ gr_handle_create(dentry, path.mnt);
+ out_drop_write:
+ mnt_drop_write(path.mnt);
+ out_dput:
+@@ -2940,6 +3131,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+ {
+ struct dentry *new_dentry;
+ struct path old_path, new_path;
++ char *to = NULL;
+ int how = 0;
+ int error;
+
+@@ -2963,7 +3155,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+ if (error)
+ return error;
+
+- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
++ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
+ error = PTR_ERR(new_dentry);
+ if (IS_ERR(new_dentry))
+ goto out;
+@@ -2974,13 +3166,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+ error = mnt_want_write(new_path.mnt);
+ if (error)
+ goto out_dput;
++
++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
++ old_path.dentry->d_inode,
++ old_path.dentry->d_inode->i_mode, to)) {
++ error = -EACCES;
++ goto out_drop_write;
++ }
++
++ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
++ old_path.dentry, old_path.mnt, to)) {
++ error = -EACCES;
++ goto out_drop_write;
++ }
++
+ error = security_path_link(old_path.dentry, &new_path, new_dentry);
+ if (error)
+ goto out_drop_write;
+ error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
++ if (!error)
++ gr_handle_create(new_dentry, new_path.mnt);
+ out_drop_write:
+ mnt_drop_write(new_path.mnt);
+ out_dput:
++ putname(to);
+ dput(new_dentry);
+ mutex_unlock(&new_path.dentry->d_inode->i_mutex);
+ path_put(&new_path);
+@@ -3208,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+ if (new_dentry == trap)
+ goto exit5;
+
++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
++ old_dentry, old_dir->d_inode, oldnd.path.mnt,
++ to);
++ if (error)
++ goto exit5;
++
+ error = mnt_want_write(oldnd.path.mnt);
+ if (error)
+ goto exit5;
+@@ -3217,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+ goto exit6;
+ error = vfs_rename(old_dir->d_inode, old_dentry,
+ new_dir->d_inode, new_dentry);
++ if (!error)
++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
+ exit6:
+ mnt_drop_write(oldnd.path.mnt);
+ exit5:
+@@ -3242,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+
+ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+ {
++ char tmpbuf[64];
++ const char *newlink;
+ int len;
+
+ len = PTR_ERR(link);
+@@ -3251,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+ len = strlen(link);
+ if (len > (unsigned) buflen)
+ len = buflen;
+- if (copy_to_user(buffer, link, len))
++
++ if (len < sizeof(tmpbuf)) {
++ memcpy(tmpbuf, link, len);
++ newlink = tmpbuf;
++ } else
++ newlink = link;
++
++ if (copy_to_user(buffer, newlink, len))
+ len = -EFAULT;
+ out:
+ return len;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index ca4913a..8eb2439 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1327,6 +1327,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
+ if (!(sb->s_flags & MS_RDONLY))
+ retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+ up_write(&sb->s_umount);
++
++ gr_log_remount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -1346,6 +1349,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
+ br_write_unlock(vfsmount_lock);
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
++
++ gr_log_unmount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -1357,7 +1363,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
+ * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+ */
+
+-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
++SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
+ {
+ struct path path;
+ int retval;
+@@ -1396,7 +1402,7 @@ out:
+ /*
+ * The 2.0 compatible umount. No flags.
+ */
+-SYSCALL_DEFINE1(oldumount, char __user *, name)
++SYSCALL_DEFINE1(oldumount, const char __user *, name)
+ {
+ return sys_umount(name, 0);
+ }
+@@ -2337,6 +2343,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
+ MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ MS_STRICTATIME);
+
++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+ data_page);
+@@ -2351,6 +2367,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
+ dev_name, data_page);
+ dput_out:
+ path_put(&path);
++
++ gr_log_mount(dev_name, dir_name, retval);
++
+ return retval;
+ }
+
+@@ -2389,7 +2408,7 @@ void mnt_make_shortterm(struct vfsmount *mnt)
+ * Allocate a new namespace structure and populate it with contents
+ * copied from the namespace of the passed in task structure.
+ */
+-static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
++static __latent_entropy struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
+ struct fs_struct *fs)
+ {
+ struct mnt_namespace *new_ns;
+@@ -2518,8 +2537,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+ }
+ EXPORT_SYMBOL(mount_subtree);
+
+-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+- char __user *, type, unsigned long, flags, void __user *, data)
++SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
++ const char __user *, type, unsigned long, flags, void __user *, data)
+ {
+ int ret;
+ char *kernel_type;
+@@ -2606,6 +2625,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ if (error)
+ goto out2;
+
++ if (gr_handle_chroot_pivot()) {
++ error = -EPERM;
++ goto out2;
++ }
++
+ get_fs_root(current->fs, &root);
+ error = lock_mount(&old);
+ if (error)
+diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
+index cbd1a61..b43f68b 100644
+--- a/fs/ncpfs/inode.c
++++ b/fs/ncpfs/inode.c
+@@ -1041,6 +1041,7 @@ static struct file_system_type ncp_fs_type = {
+ .kill_sb = kill_anon_super,
+ .fs_flags = FS_BINARY_MOUNTDATA,
+ };
++MODULE_ALIAS_FS("ncpfs");
+
+ static int __init init_ncp_fs(void)
+ {
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 516f337..82a82df 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -250,7 +250,6 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+ struct svc_rqst *rqstp;
+ int (*callback_svc)(void *vrqstp);
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+- char svc_name[12];
+ int ret = 0;
+ int minorversion_setup;
+
+@@ -280,10 +279,9 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+
+ svc_sock_update_bufs(serv);
+
+- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
+ cb_info->serv = serv;
+ cb_info->rqst = rqstp;
+- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
++ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
+ if (IS_ERR(cb_info->task)) {
+ ret = PTR_ERR(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 3fde055..de27202 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -50,7 +50,7 @@ struct callback_op {
+ callback_decode_arg_t decode_args;
+ callback_encode_res_t encode_res;
+ long res_maxsize;
+-};
++} __do_const;
+
+ static struct callback_op callback_ops[];
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 756f4df..8bd49ca 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -500,7 +500,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ nfs_refresh_inode(dentry->d_inode, entry->fattr);
+ goto out;
+ } else {
+- d_drop(dentry);
++ if (d_invalidate(dentry) != 0)
++ goto out;
+ dput(dentry);
+ }
+ }
+@@ -1164,6 +1165,8 @@ out_set_verifier:
+ out_zap_parent:
+ nfs_zap_caches(dir);
+ out_bad:
++ nfs_free_fattr(fattr);
++ nfs_free_fhandle(fhandle);
+ nfs_mark_for_revalidate(dir);
+ if (inode && S_ISDIR(inode->i_mode)) {
+ /* Purge readdir caches. */
+@@ -1176,8 +1179,6 @@ out_zap_parent:
+ shrink_dcache_parent(dentry);
+ }
+ d_drop(dentry);
+- nfs_free_fattr(fattr);
+- nfs_free_fhandle(fhandle);
+ dput(parent);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
+ __func__, dentry->d_parent->d_name.name,
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index b78b5b6..c64d84f 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
+ return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
+ }
+
+-static atomic_long_t nfs_attr_generation_counter;
++static atomic_long_unchecked_t nfs_attr_generation_counter;
+
+ static unsigned long nfs_read_attr_generation_counter(void)
+ {
+- return atomic_long_read(&nfs_attr_generation_counter);
++ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
+ }
+
+ unsigned long nfs_inc_attr_generation_counter(void)
+ {
+- return atomic_long_inc_return(&nfs_attr_generation_counter);
++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
+ }
+
+ void nfs_fattr_init(struct nfs_fattr *fattr)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3d02931..e2b575a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1037,7 +1037,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
+ struct nfs4_state *state = opendata->state;
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs_delegation *delegation;
+- int open_mode = opendata->o_arg.open_flags & O_EXCL;
++ int open_mode = opendata->o_arg.open_flags;
+ fmode_t fmode = opendata->o_arg.fmode;
+ nfs4_stateid stateid;
+ int ret = -EAGAIN;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 1943898..396c460 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -282,6 +282,7 @@ static struct file_system_type nfs_fs_type = {
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
++MODULE_ALIAS_FS("nfs");
+
+ struct file_system_type nfs_xdev_fs_type = {
+ .owner = THIS_MODULE,
+@@ -338,6 +339,8 @@ static struct file_system_type nfs4_remote_fs_type = {
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+ };
++MODULE_ALIAS_FS("nfs4");
++MODULE_ALIAS("nfs4");
+
+ struct file_system_type nfs4_xdev_fs_type = {
+ .owner = THIS_MODULE,
+@@ -3089,6 +3092,4 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ return res;
+ }
+
+-MODULE_ALIAS("nfs4");
+-
+ #endif /* CONFIG_NFS_V4 */
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index e065497..258fa11 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1048,7 +1048,7 @@ struct nfsd4_operation {
+ char *op_name;
+ /* Try to get response size before operation */
+ nfsd4op_rsize op_rsize_bop;
+-};
++} __do_const;
+
+ static struct nfsd4_operation nfsd4_ops[];
+
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ade5316..f1a6152 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1445,7 +1445,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
+
+ typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
+
+-static nfsd4_dec nfsd4_dec_ops[] = {
++static const nfsd4_dec nfsd4_dec_ops[] = {
+ [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
+ [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
+ [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
+@@ -1485,7 +1485,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
+ [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
+ };
+
+-static nfsd4_dec nfsd41_dec_ops[] = {
++static const nfsd4_dec nfsd41_dec_ops[] = {
+ [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
+ [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
+ [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
+@@ -1547,7 +1547,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
+ };
+
+ struct nfsd4_minorversion_ops {
+- nfsd4_dec *decoders;
++ const nfsd4_dec *decoders;
+ int nops;
+ };
+
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 2cbac34..21c9120 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -259,13 +259,16 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+ {
+ struct svc_cacherep *rp;
+ struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
+- int len;
++ long len;
+
+ if (!(rp = rqstp->rq_cacherep) || cache_disabled)
+ return;
+
+- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
+- len >>= 2;
++ if (statp) {
++ len = (char*)statp - (char*)resv->iov_base;
++ len = resv->iov_len - len;
++ len >>= 2;
++ }
+
+ /* Don't cache excessive amounts of data and XDR failures */
+ if (!statp || len > (256 >> 2)) {
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index c45a2ea..1a6bd66 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1102,6 +1102,7 @@ static struct file_system_type nfsd_fs_type = {
+ .mount = nfsd_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("nfsd");
+
+ #ifdef CONFIG_PROC_FS
+ static int create_proc_exports_entry(void)
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 6a66fc0..cfdadae 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -948,7 +948,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ } else {
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ }
+
+@@ -1052,7 +1052,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+
+ /* Write the data. */
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
++ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
+ set_fs(oldfs);
+ if (host_err < 0)
+ goto out_nfserr;
+@@ -1593,7 +1593,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+ */
+
+ oldfs = get_fs(); set_fs(KERNEL_DS);
+- host_err = inode->i_op->readlink(dentry, buf, *lenp);
++ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
+ set_fs(oldfs);
+
+ if (host_err < 0)
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 97bfbdd..e7f644a 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -1370,6 +1370,7 @@ struct file_system_type nilfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("nilfs2");
+
+ static void nilfs_inode_init_once(void *obj)
+ {
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 1c98f53..41e6a04 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -410,6 +410,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
+ nilfs->ns_r_segments_percentage =
+ le32_to_cpu(sbp->s_r_segments_percentage);
++ if (nilfs->ns_r_segments_percentage < 1 ||
++ nilfs->ns_r_segments_percentage > 99) {
++ printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
++ return -EINVAL;
++ }
++
+ nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
+ nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
+ return 0;
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index 0eb059ec..e086089 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
+
+ int register_nls(struct nls_table * nls)
+ {
+- struct nls_table ** tmp = &tables;
++ struct nls_table *tmp = tables;
+
+ if (nls->next)
+ return -EBUSY;
+
+ spin_lock(&nls_lock);
+- while (*tmp) {
+- if (nls == *tmp) {
++ while (tmp) {
++ if (nls == tmp) {
+ spin_unlock(&nls_lock);
+ return -EBUSY;
+ }
+- tmp = &(*tmp)->next;
++ tmp = tmp->next;
+ }
+- nls->next = tables;
++ pax_open_kernel();
++ *(struct nls_table **)&nls->next = tables;
++ pax_close_kernel();
+ tables = nls;
+ spin_unlock(&nls_lock);
+ return 0;
+@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
+
+ int unregister_nls(struct nls_table * nls)
+ {
+- struct nls_table ** tmp = &tables;
++ struct nls_table * const * tmp = &tables;
+
+ spin_lock(&nls_lock);
+ while (*tmp) {
+ if (nls == *tmp) {
+- *tmp = nls->next;
++ pax_open_kernel();
++ *(struct nls_table **)tmp = nls->next;
++ pax_close_kernel();
+ spin_unlock(&nls_lock);
+ return 0;
+ }
+diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
+index 7424929..35f6be5 100644
+--- a/fs/nls/nls_euc-jp.c
++++ b/fs/nls/nls_euc-jp.c
+@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
+ p_nls = load_nls("cp932");
+
+ if (p_nls) {
+- table.charset2upper = p_nls->charset2upper;
+- table.charset2lower = p_nls->charset2lower;
++ pax_open_kernel();
++ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
++ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
++ pax_close_kernel();
+ return register_nls(&table);
+ }
+
+diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
+index e7bc1d7..06bd4bb 100644
+--- a/fs/nls/nls_koi8-ru.c
++++ b/fs/nls/nls_koi8-ru.c
+@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
+ p_nls = load_nls("koi8-u");
+
+ if (p_nls) {
+- table.charset2upper = p_nls->charset2upper;
+- table.charset2lower = p_nls->charset2lower;
++ pax_open_kernel();
++ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
++ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
++ pax_close_kernel();
+ return register_nls(&table);
+ }
+
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9860f6b..55df672 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -277,7 +277,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ goto out_close_fd;
+
+ ret = -EFAULT;
+- if (copy_to_user(buf, &fanotify_event_metadata,
++ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
++ copy_to_user(buf, &fanotify_event_metadata,
+ fanotify_event_metadata.event_len))
+ goto out_kill_access_response;
+
+diff --git a/fs/notify/notification.c b/fs/notify/notification.c
+index ee18815..7aa5d01 100644
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
+ * get set to 0 so it will never get 'freed'
+ */
+ static struct fsnotify_event *q_overflow_event;
+-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
++static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+
+ /**
+ * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
+@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+ */
+ u32 fsnotify_get_cookie(void)
+ {
+- return atomic_inc_return(&fsnotify_sync_cookie);
++ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
+ }
+ EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
+
+diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
+index 99e3610..02c1068 100644
+--- a/fs/ntfs/dir.c
++++ b/fs/ntfs/dir.c
+@@ -1329,7 +1329,7 @@ find_next_index_buffer:
+ ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
+ ~(s64)(ndir->itype.index.block_size - 1)));
+ /* Bounds checks. */
+- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
++ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+ ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
+ "inode 0x%lx or driver bug.", vdir->i_ino);
+ goto err_out;
+diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
+index c587e2d..48a16cd 100644
+--- a/fs/ntfs/file.c
++++ b/fs/ntfs/file.c
+@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
+ char *addr;
+ size_t total = 0;
+ unsigned len;
+- int left;
++ unsigned left;
+
+ do {
+ len = PAGE_CACHE_SIZE - ofs;
+diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
+index b52706d..b9a9f9d 100644
+--- a/fs/ntfs/super.c
++++ b/fs/ntfs/super.c
+@@ -661,7 +661,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+ if (!silent)
+ ntfs_error(sb, "Primary boot sector is invalid.");
+ } else if (!silent)
+- ntfs_error(sb, read_err_str, "primary");
++ ntfs_error(sb, read_err_str, "%s", "primary");
+ if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
+ if (bh_primary)
+ brelse(bh_primary);
+@@ -677,7 +677,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+ goto hotfix_primary_boot_sector;
+ brelse(bh_backup);
+ } else if (!silent)
+- ntfs_error(sb, read_err_str, "backup");
++ ntfs_error(sb, read_err_str, "%s", "backup");
+ /* Try to read NT3.51- backup boot sector. */
+ if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
+ if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
+@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
+ "sector.");
+ brelse(bh_backup);
+ } else if (!silent)
+- ntfs_error(sb, read_err_str, "backup");
++ ntfs_error(sb, read_err_str, "%s", "backup");
+ /* We failed. Cleanup and return. */
+ if (bh_primary)
+ brelse(bh_primary);
+@@ -3072,6 +3072,7 @@ static struct file_system_type ntfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ntfs");
+
+ /* Stable names for the slab caches. */
+ static const char ntfs_index_ctx_cache_name[] = "ntfs_index_ctx_cache";
+diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
+index b420767..bbf1094 100644
+--- a/fs/ocfs2/dlmfs/dlmfs.c
++++ b/fs/ocfs2/dlmfs/dlmfs.c
+@@ -662,6 +662,7 @@ static struct file_system_type dlmfs_fs_type = {
+ .mount = dlmfs_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("ocfs2_dlmfs");
+
+ static int __init init_dlmfs_fs(void)
+ {
+diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
+index 210c352..a174f83 100644
+--- a/fs/ocfs2/localalloc.c
++++ b/fs/ocfs2/localalloc.c
+@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
+ goto bail;
+ }
+
+- atomic_inc(&osb->alloc_stats.moves);
++ atomic_inc_unchecked(&osb->alloc_stats.moves);
+
+ bail:
+ if (handle)
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index d355e6e..578d905 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -235,11 +235,11 @@ enum ocfs2_vol_state
+
+ struct ocfs2_alloc_stats
+ {
+- atomic_t moves;
+- atomic_t local_data;
+- atomic_t bitmap_data;
+- atomic_t bg_allocs;
+- atomic_t bg_extends;
++ atomic_unchecked_t moves;
++ atomic_unchecked_t local_data;
++ atomic_unchecked_t bitmap_data;
++ atomic_unchecked_t bg_allocs;
++ atomic_unchecked_t bg_extends;
+ };
+
+ enum ocfs2_local_alloc_state
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index b7e74b5..19c6536 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&osb->alloc_stats.bg_extends);
++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
+
+ /* You should never ask for this much metadata */
+ BUG_ON(bits_wanted >
+@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ *suballoc_loc = res.sr_bg_blkno;
+ *suballoc_bit_start = res.sr_bit_offset;
+@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+ trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
+ res->sr_bits);
+
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res->sr_bits != 1);
+
+@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
+ mlog_errno(status);
+ goto bail;
+ }
+- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res.sr_bits != 1);
+
+@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+ cluster_start,
+ num_clusters);
+ if (!status)
+- atomic_inc(&osb->alloc_stats.local_data);
++ atomic_inc_unchecked(&osb->alloc_stats.local_data);
+ } else {
+ if (min_clusters > (osb->bitmap_cpg - 1)) {
+ /* The only paths asking for contiguousness
+@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+ ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+ res.sr_bg_blkno,
+ res.sr_bit_offset);
+- atomic_inc(&osb->alloc_stats.bitmap_data);
++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
+ *num_clusters = res.sr_bits;
+ }
+ }
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 4994f8b..04a9180 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
+ "%10s => GlobalAllocs: %d LocalAllocs: %d "
+ "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
+ "Stats",
+- atomic_read(&osb->alloc_stats.bitmap_data),
+- atomic_read(&osb->alloc_stats.local_data),
+- atomic_read(&osb->alloc_stats.bg_allocs),
+- atomic_read(&osb->alloc_stats.moves),
+- atomic_read(&osb->alloc_stats.bg_extends));
++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
++ atomic_read_unchecked(&osb->alloc_stats.local_data),
++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
++ atomic_read_unchecked(&osb->alloc_stats.moves),
++ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %u Descriptor: %llu Size: %u bits "
+@@ -1270,6 +1270,7 @@ static struct file_system_type ocfs2_fs_type = {
+ .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
+ .next = NULL
+ };
++MODULE_ALIAS_FS("ocfs2");
+
+ static int ocfs2_check_set_options(struct super_block *sb,
+ struct mount_options *options)
+@@ -2119,11 +2120,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
+ spin_lock_init(&osb->osb_xattr_lock);
+ ocfs2_init_steal_slots(osb);
+
+- atomic_set(&osb->alloc_stats.moves, 0);
+- atomic_set(&osb->alloc_stats.local_data, 0);
+- atomic_set(&osb->alloc_stats.bitmap_data, 0);
+- atomic_set(&osb->alloc_stats.bg_allocs, 0);
+- atomic_set(&osb->alloc_stats.bg_extends, 0);
++ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
+
+ /* Copy the blockcheck stats from the superblock probe */
+ osb->osb_ecc_stats = *stats;
+diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
+index 5d22872..523db20 100644
+--- a/fs/ocfs2/symlink.c
++++ b/fs/ocfs2/symlink.c
+@@ -142,7 +142,7 @@ bail:
+
+ static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *link = nd_get_link(nd);
++ const char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
+ }
+diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
+index e043c4c..f99d456 100644
+--- a/fs/omfs/inode.c
++++ b/fs/omfs/inode.c
+@@ -570,6 +570,7 @@ static struct file_system_type omfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("omfs");
+
+ static int __init init_omfs_fs(void)
+ {
+diff --git a/fs/open.c b/fs/open.c
+index b8485d3..e18561a 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -31,6 +31,8 @@
+ #include <linux/ima.h>
+ #include <linux/dnotify.h>
+
++#define CREATE_TRACE_POINTS
++#include <trace/events/fs.h>
+ #include "internal.h"
+
+ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
+ error = locks_verify_truncate(inode, NULL, length);
+ if (!error)
+ error = security_path_truncate(&path);
++
++ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
++ error = -EACCES;
++
+ if (!error)
+ error = do_truncate(path.dentry, length, 0, NULL);
+
+@@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
+ if (__mnt_is_readonly(path.mnt))
+ res = -EROFS;
+
++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
++ res = -EACCES;
++
+ out_path_release:
+ path_put(&path);
+ out:
+@@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
+ if (error)
+ goto dput_and_out;
+
++ gr_log_chdir(path.dentry, path.mnt);
++
+ set_fs_pwd(current->fs, &path);
+
+ dput_and_out:
+@@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ goto out_putf;
+
+ error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
++
++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
++ error = -EPERM;
++
++ if (!error)
++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
++
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+@@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
+ if (error)
+ goto dput_and_out;
+
++ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
++ goto dput_and_out;
++
+ set_fs_root(current->fs, &path);
++
++ gr_handle_chroot_chdir(&path);
++
+ error = 0;
+ dput_and_out:
+ path_put(&path);
+@@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
+ if (error)
+ return error;
+ mutex_lock(&inode->i_mutex);
++
++ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
++ error = -EACCES;
++ goto out_unlock;
++ }
++ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
++ error = -EACCES;
++ goto out_unlock;
++ }
++
+ error = security_path_chmod(path->dentry, path->mnt, mode);
+ if (error)
+ goto out_unlock;
+@@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ int error;
+ struct iattr newattrs;
+
++ if (!gr_acl_handle_chown(path->dentry, path->mnt))
++ return -EACCES;
++
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ newattrs.ia_valid |= ATTR_UID;
+@@ -988,6 +1025,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
+ } else {
+ fsnotify_open(f);
+ fd_install(fd, f);
++ trace_do_sys_open(tmp, flags, mode);
+ }
+ }
+ putname(tmp);
+diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
+index e4e0ff7..458929b 100644
+--- a/fs/openpromfs/inode.c
++++ b/fs/openpromfs/inode.c
+@@ -434,6 +434,7 @@ static struct file_system_type openprom_fs_type = {
+ .mount = openprom_mount,
+ .kill_sb = kill_anon_super,
+ };
++MODULE_ALIAS_FS("openpromfs");
+
+ static void op_inode_init_once(void *data)
+ {
+diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
+index 6296b40..417c00f 100644
+--- a/fs/partitions/efi.c
++++ b/fs/partitions/efi.c
+@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+ if (!gpt)
+ return NULL;
+
++ if (!le32_to_cpu(gpt->num_partition_entries))
++ return NULL;
++ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
++ if (!pte)
++ return NULL;
++
+ count = le32_to_cpu(gpt->num_partition_entries) *
+ le32_to_cpu(gpt->sizeof_partition_entry);
+- if (!count)
+- return NULL;
+- pte = kzalloc(count, GFP_KERNEL);
+- if (!pte)
+- return NULL;
+-
+ if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+ (u8 *) pte,
+ count) < count) {
+diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
+index bd8ae78..539d250 100644
+--- a/fs/partitions/ldm.c
++++ b/fs/partitions/ldm.c
+@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
+ goto found;
+ }
+
+- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
++ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
+ if (!f) {
+ ldm_crit ("Out of memory.");
+ return false;
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 8ca88fc..d1f8b8a 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -437,9 +437,9 @@ redo:
+ }
+ if (bufs) /* More to do? */
+ continue;
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ /* syscall merging: Usually we must not sleep
+ * if O_NONBLOCK is set, or if we got some data.
+ * But if a writer sleeps in kernel space, then
+@@ -503,7 +503,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ goto out;
+@@ -552,7 +552,7 @@ redo1:
+ for (;;) {
+ int bufs;
+
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -643,9 +643,9 @@ redo2:
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+ do_wakeup = 0;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+ out:
+ mutex_unlock(&inode->i_mutex);
+@@ -712,7 +712,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+ mask = 0;
+ if (filp->f_mode & FMODE_READ) {
+ mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
+- if (!pipe->writers && filp->f_version != pipe->w_counter)
++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
+ mask |= POLLHUP;
+ }
+
+@@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+ * Most Unices do not set POLLERR for FIFOs but on Linux they
+ * behave exactly like pipes for poll().
+ */
+- if (!pipe->readers)
++ if (!atomic_read(&pipe->readers))
+ mask |= POLLERR;
+ }
+
+@@ -736,10 +736,10 @@ pipe_release(struct inode *inode, int decr, int decw)
+
+ mutex_lock(&inode->i_mutex);
+ pipe = inode->i_pipe;
+- pipe->readers -= decr;
+- pipe->writers -= decw;
++ atomic_sub(decr, &pipe->readers);
++ atomic_sub(decw, &pipe->writers);
+
+- if (!pipe->readers && !pipe->writers) {
++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
+ free_pipe_info(inode);
+ } else {
+ wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
+@@ -829,7 +829,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -846,7 +846,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
+
+ if (inode->i_pipe) {
+ ret = 0;
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -867,9 +867,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
+ if (inode->i_pipe) {
+ ret = 0;
+ if (filp->f_mode & FMODE_READ)
+- inode->i_pipe->readers++;
++ atomic_inc(&inode->i_pipe->readers);
+ if (filp->f_mode & FMODE_WRITE)
+- inode->i_pipe->writers++;
++ atomic_inc(&inode->i_pipe->writers);
+ }
+
+ mutex_unlock(&inode->i_mutex);
+@@ -961,7 +961,7 @@ void free_pipe_info(struct inode *inode)
+ inode->i_pipe = NULL;
+ }
+
+-static struct vfsmount *pipe_mnt __read_mostly;
++struct vfsmount *pipe_mnt __read_mostly;
+
+ /*
+ * pipefs_dname() is called from d_path().
+@@ -991,7 +991,8 @@ static struct inode * get_pipe_inode(void)
+ goto fail_iput;
+ inode->i_pipe = pipe;
+
+- pipe->readers = pipe->writers = 1;
++ atomic_set(&pipe->readers, 1);
++ atomic_set(&pipe->writers, 1);
+ inode->i_fop = &rdwr_pipefifo_fops;
+
+ /*
+diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
+index 15af622..0e9f4467 100644
+--- a/fs/proc/Kconfig
++++ b/fs/proc/Kconfig
+@@ -30,12 +30,12 @@ config PROC_FS
+
+ config PROC_KCORE
+ bool "/proc/kcore support" if !ARM
+- depends on PROC_FS && MMU
++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
+
+ config PROC_VMCORE
+ bool "/proc/vmcore support"
+- depends on PROC_FS && CRASH_DUMP
+- default y
++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
++ default n
+ help
+ Exports the dump image of crashed kernel in ELF format.
+
+@@ -59,8 +59,8 @@ config PROC_SYSCTL
+ limited in memory.
+
+ config PROC_PAGE_MONITOR
+- default y
+- depends on PROC_FS && MMU
++ default n
++ depends on PROC_FS && MMU && !GRKERNSEC
+ bool "Enable /proc page monitoring" if EXPERT
+ help
+ Various /proc files exist to monitor process memory utilization:
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 439b5a1..61db155 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -60,6 +60,7 @@
+ #include <linux/tty.h>
+ #include <linux/string.h>
+ #include <linux/mman.h>
++#include <linux/grsecurity.h>
+ #include <linux/proc_fs.h>
+ #include <linux/ioport.h>
+ #include <linux/uaccess.h>
+@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+ seq_putc(m, '\n');
+ }
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline void task_pax(struct seq_file *m, struct task_struct *p)
++{
++ if (p->mm)
++ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
++ else
++ seq_printf(m, "PaX:\t-----\n");
++}
++#endif
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ task_cpus_allowed(m, task);
+ cpuset_task_status_allowed(m, task);
+ task_context_switch_counts(m, task);
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ task_pax(m, task);
++#endif
++
++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
++ task_grsec_rbac(m, task);
++#endif
++
+ return 0;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task, int whole)
+ {
+@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ char tcomm[sizeof(task->comm)];
+ unsigned long flags;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (current->exec_id != m->exec_id) {
++ gr_log_badprocpid("stat");
++ return 0;
++ }
++#endif
++
+ state = *get_task_state(task);
+ vsize = eip = esp = 0;
+ permitted = ptrace_may_access(task, PTRACE_MODE_READ);
+@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ gtime = task->gtime;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (PAX_RAND_FLAGS(mm)) {
++ eip = 0;
++ esp = 0;
++ wchan = 0;
++ }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ wchan = 0;
++ eip =0;
++ esp =0;
++#endif
++
+ /* scale priority and nice values from timeslices to -20..20 */
+ /* to make it look like a "normal" Unix priority/nice value */
+ priority = task_prio(task);
+@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ vsize,
+ mm ? get_mm_rss(mm) : 0,
+ rsslim,
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
++#else
+ mm ? (permitted ? mm->start_code : 1) : 0,
+ mm ? (permitted ? mm->end_code : 1) : 0,
+ (permitted && mm) ? mm->start_stack : 0,
++#endif
+ esp,
+ eip,
+ /* The signal information here is obsolete.
+@@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+ {
+ unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
+- struct mm_struct *mm = get_task_mm(task);
++ struct mm_struct *mm;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (current->exec_id != m->exec_id) {
++ gr_log_badprocpid("statm");
++ return 0;
++ }
++#endif
++ mm = get_task_mm(task);
+ if (mm) {
+ size = task_statm(mm, &shared, &text, &data, &resident);
+ mmput(mm);
+@@ -544,3 +608,10 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+
+ return 0;
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct *task, char *buffer)
++{
++ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
++}
++#endif
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 1ace83d..83b9247 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -107,6 +107,22 @@ struct pid_entry {
+ union proc_op op;
+ };
+
++struct getdents_callback {
++ struct linux_dirent __user * current_dir;
++ struct linux_dirent __user * previous;
++ struct file * file;
++ int count;
++ int error;
++};
++
++static int gr_fake_filldir(void * __buf, const char *name, int namlen,
++ loff_t offset, u64 ino, unsigned int d_type)
++{
++ struct getdents_callback * buf = (struct getdents_callback *) __buf;
++ buf->error = -EINVAL;
++ return 0;
++}
++
+ #define NOD(NAME, MODE, IOP, FOP, OP) { \
+ .name = (NAME), \
+ .len = sizeof(NAME) - 1, \
+@@ -194,31 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
+ return result;
+ }
+
+-static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+-{
+- struct mm_struct *mm;
+- int err;
+-
+- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+- if (err)
+- return ERR_PTR(err);
+-
+- mm = get_task_mm(task);
+- if (mm && mm != current->mm &&
+- !ptrace_may_access(task, mode)) {
+- mmput(mm);
+- mm = ERR_PTR(-EACCES);
+- }
+- mutex_unlock(&task->signal->cred_guard_mutex);
+-
+- return mm;
+-}
+-
+-struct mm_struct *mm_for_maps(struct task_struct *task)
+-{
+- return mm_access(task, PTRACE_MODE_READ);
+-}
+-
+ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+ {
+ int res = 0;
+@@ -229,6 +220,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+ if (!mm->arg_end)
+ goto out_mm; /* Shh! No looking before we're done */
+
++ if (gr_acl_handle_procpidmem(task))
++ goto out_mm;
++
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > PAGE_SIZE)
+@@ -256,12 +250,28 @@ out:
+ return res;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ {
+- struct mm_struct *mm = mm_for_maps(task);
++ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
+ int res = PTR_ERR(mm);
+ if (mm && !IS_ERR(mm)) {
+ unsigned int nwords = 0;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ /* allow if we're currently ptracing this task */
++ if (PAX_RAND_FLAGS(mm) &&
++ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
++ mmput(mm);
++ return 0;
++ }
++#endif
++
+ do {
+ nwords += 2;
+ } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+@@ -275,7 +285,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
+ }
+
+
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /*
+ * Provides a wchan file via kallsyms in a proper one-value-per-file format.
+ * Returns the resolved symbol. If that fails, simply return the address.
+@@ -314,7 +324,7 @@ static void unlock_trace(struct task_struct *task)
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ }
+
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+
+ #define MAX_STACK_TRACE_DEPTH 64
+
+@@ -505,7 +515,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
+ return count;
+ }
+
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+ {
+ long nr;
+@@ -534,7 +544,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
+ /************************************************************************/
+
+ /* permission checks */
+-static int proc_fd_access_allowed(struct inode *inode)
++static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
+ {
+ struct task_struct *task;
+ int allowed = 0;
+@@ -544,7 +554,10 @@ static int proc_fd_access_allowed(struct inode *inode)
+ */
+ task = get_proc_task(inode);
+ if (task) {
+- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
++ if (log)
++ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
++ else
++ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+ put_task_struct(task);
+ }
+ return allowed;
+@@ -761,7 +774,7 @@ static const struct file_operations proc_single_file_operations = {
+ .release = single_release,
+ };
+
+-static int mem_open(struct inode* inode, struct file* file)
++static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
+ {
+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ struct mm_struct *mm;
+@@ -769,7 +782,12 @@ static int mem_open(struct inode* inode, struct file* file)
+ if (!task)
+ return -ESRCH;
+
+- mm = mm_access(task, PTRACE_MODE_ATTACH);
++ if (gr_acl_handle_procpidmem(task)) {
++ put_task_struct(task);
++ return -EPERM;
++ }
++
++ mm = mm_access(task, mode);
+ put_task_struct(task);
+
+ if (IS_ERR(mm))
+@@ -782,11 +800,24 @@ static int mem_open(struct inode* inode, struct file* file)
+ mmput(mm);
+ }
+
++ file->private_data = mm;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ file->f_version = current->exec_id;
++#endif
++
++ return 0;
++}
++
++static int mem_open(struct inode *inode, struct file *file)
++{
++ int ret;
++ ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
++
+ /* OK to pass negative loff_t, we can catch out-of-range */
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
+- file->private_data = mm;
+
+- return 0;
++ return ret;
+ }
+
+ static ssize_t mem_rw(struct file *file, char __user *buf,
+@@ -797,6 +828,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+ ssize_t copied;
+ char *page;
+
++#ifdef CONFIG_GRKERNSEC
++ if (write)
++ return -EPERM;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (file->f_version != current->exec_id) {
++ gr_log_badprocpid("mem");
++ return 0;
++ }
++#endif
++
+ if (!mm)
+ return 0;
+
+@@ -809,7 +851,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
+ goto free;
+
+ while (count > 0) {
+- int this_len = min_t(int, count, PAGE_SIZE);
++ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
+
+ if (write && copy_from_user(page, buf, this_len)) {
+ copied = -EFAULT;
+@@ -885,42 +927,49 @@ static const struct file_operations proc_mem_operations = {
+ .release = mem_release,
+ };
+
++static int environ_open(struct inode *inode, struct file *file)
++{
++ return __mem_open(inode, file, PTRACE_MODE_READ);
++}
++
+ static ssize_t environ_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ char *page;
+ unsigned long src = *ppos;
+- int ret = -ESRCH;
+- struct mm_struct *mm;
++ ssize_t ret = -ESRCH;
++ struct mm_struct *mm = file->private_data;
+
+- if (!task)
+- goto out_no_task;
++ if (!mm)
++ return 0;
++
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (file->f_version != current->exec_id) {
++ gr_log_badprocpid("environ");
++ return 0;
++ }
++#endif
+
+- ret = -ENOMEM;
+ page = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!page)
+- goto out;
+-
+-
+- mm = mm_for_maps(task);
+- ret = PTR_ERR(mm);
+- if (!mm || IS_ERR(mm))
+- goto out_free;
++ return -ENOMEM;
+
+ ret = 0;
++ if (!atomic_inc_not_zero(&mm->mm_users))
++ goto free;
+ while (count > 0) {
+- int this_len, retval, max_len;
++ size_t this_len, max_len;
++ int retval;
++
++ if (src >= (mm->env_end - mm->env_start))
++ break;
+
+ this_len = mm->env_end - (mm->env_start + src);
+
+- if (this_len <= 0)
+- break;
++ max_len = min_t(size_t, PAGE_SIZE, count);
++ this_len = min(max_len, this_len);
+
+- max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+- this_len = (this_len > max_len) ? max_len : this_len;
+-
+- retval = access_process_vm(task, (mm->env_start + src),
++ retval = access_remote_vm(mm, (mm->env_start + src),
+ page, this_len, 0);
+
+ if (retval <= 0) {
+@@ -939,19 +988,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ count -= retval;
+ }
+ *ppos = src;
+-
+ mmput(mm);
+-out_free:
++
++free:
+ free_page((unsigned long) page);
+-out:
+- put_task_struct(task);
+-out_no_task:
+ return ret;
+ }
+
+ static const struct file_operations proc_environ_operations = {
++ .open = environ_open,
+ .read = environ_read,
+ .llseek = generic_file_llseek,
++ .release = mem_release,
+ };
+
+ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
+@@ -1519,7 +1567,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+ path_put(&nd->path);
+
+ /* Are we allowed to snoop on the tasks file descriptors? */
+- if (!proc_fd_access_allowed(inode))
++ if (!proc_fd_access_allowed(inode,0))
+ goto out;
+
+ error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
+@@ -1558,8 +1606,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
+ struct path path;
+
+ /* Are we allowed to snoop on the tasks file descriptors? */
+- if (!proc_fd_access_allowed(inode))
+- goto out;
++ /* logging this is needed for learning on chromium to work properly,
++ but we don't want to flood the logs from 'ps' which does a readlink
++ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
++ CAP_SYS_PTRACE as it's not necessary for its basic functionality
++ */
++ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
++ if (!proc_fd_access_allowed(inode,0))
++ goto out;
++ } else {
++ if (!proc_fd_access_allowed(inode,1))
++ goto out;
++ }
+
+ error = PROC_I(inode)->op.proc_get_link(inode, &path);
+ if (error)
+@@ -1624,7 +1682,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = grsec_proc_gid;
++#else
+ inode->i_gid = cred->egid;
++#endif
+ rcu_read_unlock();
+ }
+ security_task_to_inode(task, inode);
+@@ -1642,6 +1704,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ struct inode *inode = dentry->d_inode;
+ struct task_struct *task;
+ const struct cred *cred;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *tmpcred = current_cred();
++#endif
+
+ generic_fillattr(inode, stat);
+
+@@ -1649,13 +1714,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ stat->uid = 0;
+ stat->gid = 0;
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
++
++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
++ rcu_read_unlock();
++ return -ENOENT;
++ }
++
+ if (task) {
++ cred = __task_cred(task);
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ || in_group_p(grsec_proc_gid)
++#endif
++ ) {
++#endif
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+ task_dumpable(task)) {
+- cred = __task_cred(task);
+ stat->uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ stat->gid = grsec_proc_gid;
++#else
+ stat->gid = cred->egid;
++#endif
+ }
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ } else {
++ rcu_read_unlock();
++ return -ENOENT;
++ }
++#endif
+ }
+ rcu_read_unlock();
+ return 0;
+@@ -1692,11 +1785,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
+
+ if (task) {
+ if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
++#endif
+ task_dumpable(task)) {
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = grsec_proc_gid;
++#else
+ inode->i_gid = cred->egid;
++#endif
+ rcu_read_unlock();
+ } else {
+ inode->i_uid = 0;
+@@ -1814,7 +1916,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
+ int fd = proc_fd(inode);
+
+ if (task) {
+- files = get_files_struct(task);
++ if (!gr_acl_handle_procpidmem(task))
++ files = get_files_struct(task);
+ put_task_struct(task);
+ }
+ if (files) {
+@@ -2082,11 +2185,21 @@ static const struct file_operations proc_fd_operations = {
+ */
+ static int proc_fd_permission(struct inode *inode, int mask)
+ {
++ struct task_struct *task;
+ int rv = generic_permission(inode, mask);
+- if (rv == 0)
+- return 0;
++
+ if (task_pid(current) == proc_pid(inode))
+ rv = 0;
++
++ task = get_proc_task(inode);
++ if (task == NULL)
++ return rv;
++
++ if (gr_acl_handle_procpidmem(task))
++ rv = -EACCES;
++
++ put_task_struct(task);
++
+ return rv;
+ }
+
+@@ -2196,6 +2309,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
+ if (!task)
+ goto out_no_task;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ /*
+ * Yes, it does not scale. And it should not. Don't add
+ * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2240,6 +2356,9 @@ static int proc_pident_readdir(struct file *filp,
+ if (!task)
+ goto out_no_task;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out;
++
+ ret = 0;
+ i = filp->f_pos;
+ switch (i) {
+@@ -2510,7 +2629,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+ static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
+ }
+@@ -2708,7 +2827,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+ REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
+ #endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ INF("syscall", S_IRUGO, proc_pid_syscall),
+ #endif
+ INF("cmdline", S_IRUGO, proc_pid_cmdline),
+@@ -2733,10 +2852,10 @@ static const struct pid_entry tgid_base_stuff[] = {
+ #ifdef CONFIG_SECURITY
+ DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ INF("wchan", S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ ONE("stack", S_IRUGO, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+@@ -2770,6 +2889,9 @@ static const struct pid_entry tgid_base_stuff[] = {
+ #ifdef CONFIG_HARDWALL
+ INF("hardwall", S_IRUGO, proc_pid_hardwall),
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
++#endif
+ };
+
+ static int proc_tgid_base_readdir(struct file * filp,
+@@ -2895,7 +3017,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
+ if (!inode)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ inode->i_gid = grsec_proc_gid;
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
++#else
+ inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+ inode->i_op = &proc_tgid_base_inode_operations;
+ inode->i_fop = &proc_tgid_base_operations;
+ inode->i_flags|=S_IMMUTABLE;
+@@ -2937,7 +3066,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
+ if (!task)
+ goto out;
+
++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
++ goto out_put_task;
++
+ result = proc_pid_instantiate(dir, dentry, task, NULL);
++out_put_task:
+ put_task_struct(task);
+ out:
+ return result;
+@@ -3002,6 +3135,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ {
+ unsigned int nr;
+ struct task_struct *reaper;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *tmpcred = current_cred();
++ const struct cred *itercred;
++#endif
++ filldir_t __filldir = filldir;
+ struct tgid_iter iter;
+ struct pid_namespace *ns;
+
+@@ -3025,8 +3163,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ for (iter = next_tgid(ns, iter);
+ iter.task;
+ iter.tgid += 1, iter = next_tgid(ns, iter)) {
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ rcu_read_lock();
++ itercred = __task_cred(iter.task);
++#endif
++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(grsec_proc_gid)
++#endif
++ )
++#endif
++ )
++ __filldir = &gr_fake_filldir;
++ else
++ __filldir = filldir;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ rcu_read_unlock();
++#endif
+ filp->f_pos = iter.tgid + TGID_OFFSET;
+- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
++ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
+ put_task_struct(iter.task);
+ goto out;
+ }
+@@ -3054,7 +3211,7 @@ static const struct pid_entry tid_base_stuff[] = {
+ REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
+ #endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+ INF("syscall", S_IRUGO, proc_pid_syscall),
+ #endif
+ INF("cmdline", S_IRUGO, proc_pid_cmdline),
+@@ -3078,10 +3235,10 @@ static const struct pid_entry tid_base_stuff[] = {
+ #ifdef CONFIG_SECURITY
+ DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
+ #endif
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ INF("wchan", S_IRUGO, proc_pid_wchan),
+ #endif
+-#ifdef CONFIG_STACKTRACE
++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ ONE("stack", S_IRUGO, proc_pid_stack),
+ #endif
+ #ifdef CONFIG_SCHEDSTATS
+diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
+index 82676e3..5f8518a 100644
+--- a/fs/proc/cmdline.c
++++ b/fs/proc/cmdline.c
+@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
+
+ static int __init proc_cmdline_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
++#else
+ proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
++#endif
+ return 0;
+ }
+ module_init(proc_cmdline_init);
+diff --git a/fs/proc/devices.c b/fs/proc/devices.c
+index b143471..bb105e5 100644
+--- a/fs/proc/devices.c
++++ b/fs/proc/devices.c
+@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
+
+ static int __init proc_devices_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
++#else
+ proc_create("devices", 0, NULL, &proc_devinfo_operations);
++#endif
+ return 0;
+ }
+ module_init(proc_devices_init);
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 00f08b3..2f14f30 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -18,12 +18,18 @@
+ #include <linux/module.h>
+ #include <linux/sysctl.h>
+ #include <linux/slab.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+
+ #include "internal.h"
+
++#ifdef CONFIG_PROC_SYSCTL
++extern const struct inode_operations proc_sys_inode_operations;
++extern const struct inode_operations proc_sys_dir_operations;
++#endif
++
+ static void proc_evict_inode(struct inode *inode)
+ {
+ struct proc_dir_entry *de;
+@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
+ ns_ops = PROC_I(inode)->ns_ops;
+ if (ns_ops && ns_ops->put)
+ ns_ops->put(PROC_I(inode)->ns);
++
++#ifdef CONFIG_PROC_SYSCTL
++ if (inode->i_op == &proc_sys_inode_operations ||
++ inode->i_op == &proc_sys_dir_operations)
++ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
++#endif
++
+ }
+
+ static struct kmem_cache * proc_inode_cachep;
+@@ -438,7 +451,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ if (de->mode) {
+ inode->i_mode = de->mode;
+ inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = grsec_proc_gid;
++#else
+ inode->i_gid = de->gid;
++#endif
+ }
+ if (de->size)
+ inode->i_size = de->size;
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 7838e5c..29697de 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -28,8 +28,6 @@ struct vmalloc_info {
+ unsigned long largest_chunk;
+ };
+
+-extern struct mm_struct *mm_for_maps(struct task_struct *);
+-
+ #ifdef CONFIG_MMU
+ #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+ extern void get_vmalloc_info(struct vmalloc_info *vmi);
+@@ -51,6 +49,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
+ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
++#endif
+ extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
+
+ extern const struct file_operations proc_maps_operations;
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index d245cb2..f4e8498 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+ * the addresses in the elf_phdr on our list.
+ */
+ start = kc_offset_to_vaddr(*fpos - elf_buflen);
+- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
++ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
++ if (tsz > buflen)
+ tsz = buflen;
+-
++
+ while (buflen) {
+ struct kcore_list *m;
+
+@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+ kfree(elf_buf);
+ } else {
+ if (kern_addr_valid(start)) {
+- unsigned long n;
++ char *elf_buf;
++ mm_segment_t oldfs;
+
+- n = copy_to_user(buffer, (char *)start, tsz);
+- /*
+- * We cannot distingush between fault on source
+- * and fault on destination. When this happens
+- * we clear too and hope it will trigger the
+- * EFAULT again.
+- */
+- if (n) {
+- if (clear_user(buffer + tsz - n,
+- n))
++ elf_buf = kmalloc(tsz, GFP_KERNEL);
++ if (!elf_buf)
++ return -ENOMEM;
++ oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
++ set_fs(oldfs);
++ if (copy_to_user(buffer, elf_buf, tsz)) {
++ kfree(elf_buf);
+ return -EFAULT;
++ }
+ }
++ set_fs(oldfs);
++ kfree(elf_buf);
+ } else {
+ if (clear_user(buffer, tsz))
+ return -EFAULT;
+@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+
+ static int open_kcore(struct inode *inode, struct file *filp)
+ {
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++ return -EPERM;
++#endif
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (kcore_need_update)
+diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
+index 80e4645..53e5fcf 100644
+--- a/fs/proc/meminfo.c
++++ b/fs/proc/meminfo.c
+@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
+ vmi.used >> 10,
+ vmi.largest_chunk >> 10
+ #ifdef CONFIG_MEMORY_FAILURE
+- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
+ #endif
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
+index b1822dd..df622cb 100644
+--- a/fs/proc/nommu.c
++++ b/fs/proc/nommu.c
+@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
+ if (len < 1)
+ len = 1;
+ seq_printf(m, "%*c", len, ' ');
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ }
+
+ seq_putc(m, '\n');
+diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
+index f738024..226e98e 100644
+--- a/fs/proc/proc_net.c
++++ b/fs/proc/proc_net.c
+@@ -23,6 +23,7 @@
+ #include <linux/nsproxy.h>
+ #include <net/net_namespace.h>
+ #include <linux/seq_file.h>
++#include <linux/grsecurity.h>
+
+ #include "internal.h"
+
+@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
+ struct task_struct *task;
+ struct nsproxy *ns;
+ struct net *net = NULL;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *cred = current_cred();
++#endif
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ if (cred->fsuid)
++ return net;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (cred->fsuid && !in_group_p(grsec_proc_gid))
++ return net;
++#endif
+
+ rcu_read_lock();
+ task = pid_task(proc_pid(dir), PIDTYPE_PID);
+@@ -228,7 +240,7 @@ static __net_exit void proc_net_ns_exit(struct net *net)
+ kfree(net->proc_net);
+ }
+
+-static struct pernet_operations __net_initdata proc_net_ns_ops = {
++static struct pernet_operations __net_initconst proc_net_ns_ops = {
+ .init = proc_net_ns_init,
+ .exit = proc_net_ns_exit,
+ };
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 0be1aa4..21298e5 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -7,13 +7,16 @@
+ #include <linux/proc_fs.h>
+ #include <linux/security.h>
+ #include <linux/namei.h>
++#include <linux/nsproxy.h>
+ #include "internal.h"
+
++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
++
+ static const struct dentry_operations proc_sys_dentry_operations;
+ static const struct file_operations proc_sys_file_operations;
+-static const struct inode_operations proc_sys_inode_operations;
++const struct inode_operations proc_sys_inode_operations;
+ static const struct file_operations proc_sys_dir_file_operations;
+-static const struct inode_operations proc_sys_dir_operations;
++const struct inode_operations proc_sys_dir_operations;
+
+ void proc_sys_poll_notify(struct ctl_table_poll *poll)
+ {
+@@ -128,8 +131,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+
+ err = NULL;
+ d_set_d_op(dentry, &proc_sys_dentry_operations);
++
++ gr_handle_proc_create(dentry, inode);
++
+ d_add(dentry, inode);
+
++ if (gr_handle_sysctl(p, MAY_EXEC))
++ err = ERR_PTR(-ENOENT);
++
+ out:
+ if (h)
+ sysctl_head_finish(h);
+@@ -162,6 +171,17 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+ if (!table->proc_handler)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC
++ error = -EPERM;
++ if (write) {
++ if (current->nsproxy->net_ns != table->extra2) {
++ if (!capable(CAP_SYS_ADMIN))
++ goto out;
++ } else if (!nsown_capable(CAP_NET_ADMIN))
++ goto out;
++ }
++#endif
++
+ /* careful: calling conventions are nasty here */
+ res = count;
+ error = table->proc_handler(table, write, buf, &res, ppos);
+@@ -259,6 +279,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
+ return -ENOMEM;
+ } else {
+ d_set_d_op(child, &proc_sys_dentry_operations);
++
++ gr_handle_proc_create(child, inode);
++
+ d_add(child, inode);
+ }
+ } else {
+@@ -287,6 +310,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
+ if (*pos < file->f_pos)
+ continue;
+
++ if (gr_handle_sysctl(table, 0))
++ continue;
++
+ res = proc_sys_fill_cache(file, dirent, filldir, head, table);
+ if (res)
+ return res;
+@@ -412,6 +438,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
++ if (table && gr_handle_sysctl(table, MAY_EXEC))
++ return -ENOENT;
++
+ generic_fillattr(inode, stat);
+ if (table)
+ stat->mode = (stat->mode & S_IFMT) | table->mode;
+@@ -434,13 +463,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
+ .llseek = generic_file_llseek,
+ };
+
+-static const struct inode_operations proc_sys_inode_operations = {
++const struct inode_operations proc_sys_inode_operations = {
+ .permission = proc_sys_permission,
+ .setattr = proc_sys_setattr,
+ .getattr = proc_sys_getattr,
+ };
+
+-static const struct inode_operations proc_sys_dir_operations = {
++const struct inode_operations proc_sys_dir_operations = {
+ .lookup = proc_sys_lookup,
+ .permission = proc_sys_permission,
+ .setattr = proc_sys_setattr,
+diff --git a/fs/proc/root.c b/fs/proc/root.c
+index 03102d9..4ae347e 100644
+--- a/fs/proc/root.c
++++ b/fs/proc/root.c
+@@ -121,7 +121,15 @@ void __init proc_root_init(void)
+ #ifdef CONFIG_PROC_DEVICETREE
+ proc_device_tree_init();
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
++#endif
++#else
+ proc_mkdir("bus", NULL);
++#endif
+ proc_sys_init();
+ }
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index ef1740d..9a18b87 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -11,12 +11,19 @@
+ #include <linux/rmap.h>
+ #include <linux/swap.h>
+ #include <linux/swapops.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/elf.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include "internal.h"
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
++ (_mm->pax_flags & MF_PAX_RANDMMAP || \
++ _mm->pax_flags & MF_PAX_SEGMEXEC))
++#endif
++
+ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ {
+ unsigned long data, text, lib, swap;
+@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ "VmExe:\t%8lu kB\n"
+ "VmLib:\t%8lu kB\n"
+ "VmPTE:\t%8lu kB\n"
+- "VmSwap:\t%8lu kB\n",
+- hiwater_vm << (PAGE_SHIFT-10),
++ "VmSwap:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++ ,hiwater_vm << (PAGE_SHIFT-10),
+ (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+ mm->locked_vm << (PAGE_SHIFT-10),
+ mm->pinned_vm << (PAGE_SHIFT-10),
+@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ data << (PAGE_SHIFT-10),
+ mm->stack_vm << (PAGE_SHIFT-10), text, lib,
+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
+- swap << (PAGE_SHIFT-10));
++ swap << (PAGE_SHIFT-10)
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
++ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
++#else
++ , mm->context.user_cs_base
++ , mm->context.user_cs_limit
++#endif
++#endif
++
++ );
+ }
+
+ unsigned long task_vsize(struct mm_struct *mm)
+@@ -125,7 +149,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ if (!priv->task)
+ return ERR_PTR(-ESRCH);
+
+- mm = mm_for_maps(priv->task);
++ mm = mm_access(priv->task, PTRACE_MODE_READ);
+ if (!mm || IS_ERR(mm))
+ return mm;
+ down_read(&mm->mmap_sem);
+@@ -227,13 +251,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ }
+
+- /* We don't show the stack guard page in /proc/maps */
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
++ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
++#else
+ start = vma->vm_start;
+- if (stack_guard_page_start(vma, start))
+- start += PAGE_SIZE;
+ end = vma->vm_end;
+- if (stack_guard_page_end(vma, end))
+- end -= PAGE_SIZE;
++#endif
+
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+ start,
+@@ -242,7 +266,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+ flags & VM_MAYSHARE ? 's' : 'p',
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
++#else
+ pgoff,
++#endif
+ MAJOR(dev), MINOR(dev), ino, &len);
+
+ /*
+@@ -251,7 +279,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ */
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "\n");
++ seq_path(m, &file->f_path, "\n\\");
+ } else {
+ const char *name = arch_vma_name(vma);
+ if (!name) {
+@@ -259,8 +287,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
+ if (vma->vm_start <= mm->brk &&
+ vma->vm_end >= mm->start_brk) {
+ name = "[heap]";
+- } else if (vma->vm_start <= mm->start_stack &&
+- vma->vm_end >= mm->start_stack) {
++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
++ (vma->vm_start <= mm->start_stack &&
++ vma->vm_end >= mm->start_stack)) {
+ name = "[stack]";
+ }
+ } else {
+@@ -281,6 +310,13 @@ static int show_map(struct seq_file *m, void *v)
+ struct proc_maps_private *priv = m->private;
+ struct task_struct *task = priv->task;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (current->exec_id != m->exec_id) {
++ gr_log_badprocpid("maps");
++ return 0;
++ }
++#endif
++
+ show_map_vma(m, vma);
+
+ if (m->count < m->size) /* vma is copied successfully */
+@@ -437,12 +473,23 @@ static int show_smap(struct seq_file *m, void *v)
+ .private = &mss,
+ };
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (current->exec_id != m->exec_id) {
++ gr_log_badprocpid("smaps");
++ return 0;
++ }
++#endif
+ memset(&mss, 0, sizeof mss);
+- mss.vma = vma;
+- /* mmap_sem is held in m_start */
+- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+-
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
++#endif
++ mss.vma = vma;
++ /* mmap_sem is held in m_start */
++ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ }
++#endif
+ show_map_vma(m, vma);
+
+ seq_printf(m,
+@@ -460,7 +507,11 @@ static int show_smap(struct seq_file *m, void *v)
+ "KernelPageSize: %8lu kB\n"
+ "MMUPageSize: %8lu kB\n"
+ "Locked: %8lu kB\n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
++#else
+ (vma->vm_end - vma->vm_start) >> 10,
++#endif
+ mss.resident >> 10,
+ (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
+ mss.shared_clean >> 10,
+@@ -798,7 +849,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ if (!pm.buffer)
+ goto out_task;
+
+- mm = mm_for_maps(task);
++ mm = mm_access(task, PTRACE_MODE_READ);
+ ret = PTR_ERR(mm);
+ if (!mm || IS_ERR(mm))
+ goto out_free;
+@@ -1024,6 +1075,13 @@ static int show_numa_map(struct seq_file *m, void *v)
+ int n;
+ char buffer[50];
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (current->exec_id != m->exec_id) {
++ gr_log_badprocpid("numa_maps");
++ return 0;
++ }
++#endif
++
+ if (!mm)
+ return 0;
+
+@@ -1041,11 +1099,15 @@ static int show_numa_map(struct seq_file *m, void *v)
+ mpol_to_str(buffer, sizeof(buffer), pol, 0);
+ mpol_cond_put(pol);
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
++#else
+ seq_printf(m, "%08lx %s", vma->vm_start, buffer);
++#endif
+
+ if (file) {
+ seq_printf(m, " file=");
+- seq_path(m, &file->f_path, "\n\t= ");
++ seq_path(m, &file->f_path, "\n\t\\= ");
+ } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ seq_printf(m, " heap");
+ } else if (vma->vm_start <= mm->start_stack &&
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 980de54..78b2faa 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
+ else
+ bytes += kobjsize(mm);
+
+- if (current->fs && current->fs->users > 1)
++ if (current->fs && atomic_read(&current->fs->users) > 1)
+ sbytes += kobjsize(current->fs);
+ else
+ bytes += kobjsize(current->fs);
+@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
+
+ if (file) {
+ pad_len_spaces(m, len);
+- seq_path(m, &file->f_path, "");
++ seq_path(m, &file->f_path, "\n\\");
+ } else if (mm) {
+ if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
+@@ -201,7 +201,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ if (!priv->task)
+ return ERR_PTR(-ESRCH);
+
+- mm = mm_for_maps(priv->task);
++ mm = mm_access(priv->task, PTRACE_MODE_READ);
+ if (!mm || IS_ERR(mm)) {
+ put_task_struct(priv->task);
+ priv->task = NULL;
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index b0f450a..8ba3e5d 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -97,9 +97,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
+ nr_bytes = count;
+
+ /* If pfn is not ram, return zeros for sparse dump files */
+- if (pfn_is_ram(pfn) == 0)
+- memset(buf, 0, nr_bytes);
+- else {
++ if (pfn_is_ram(pfn) == 0) {
++ if (userbuf) {
++ if (clear_user((char __force_user *)buf, nr_bytes))
++ return -EFAULT;
++ } else
++ memset(buf, 0, nr_bytes);
++ } else {
+ tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+ offset, userbuf);
+ if (tmp < 0)
+@@ -184,7 +188,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
+ tsz = nr_bytes;
+
+ while (buflen) {
+- tmp = read_from_oldmem(buffer, tsz, &start, 1);
++ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
+ if (tmp < 0)
+ return tmp;
+ buflen -= tsz;
+diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
+index 3bdd214..e570832 100644
+--- a/fs/qnx4/inode.c
++++ b/fs/qnx4/inode.c
+@@ -473,6 +473,7 @@ static struct file_system_type qnx4_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("qnx4");
+
+ static int __init init_qnx4_fs(void)
+ {
+diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
+index d67908b..d13f6a6 100644
+--- a/fs/quota/netlink.c
++++ b/fs/quota/netlink.c
+@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
+ void quota_send_warning(short type, unsigned int id, dev_t dev,
+ const char warntype)
+ {
+- static atomic_t seq;
++ static atomic_unchecked_t seq;
+ struct sk_buff *skb;
+ void *msg_head;
+ int ret;
+@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
+ "VFS: Not enough memory to send quota warning.\n");
+ return;
+ }
+- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
++ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
+ &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+ if (!msg_head) {
+ printk(KERN_ERR
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 5ad4248..492b277 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -956,6 +956,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
+ if (retval > 0) {
+ add_rchar(current, retval);
+ add_wchar(current, retval);
++ fsnotify_access(in_file);
++ fsnotify_modify(out_file);
+ }
+
+ inc_syscr(current);
+diff --git a/fs/readdir.c b/fs/readdir.c
+index 356f715..c918d38 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -17,6 +17,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/unistd.h>
++#include <linux/namei.h>
+
+ #include <asm/uaccess.h>
+
+@@ -67,6 +68,7 @@ struct old_linux_dirent {
+
+ struct readdir_callback {
+ struct old_linux_dirent __user * dirent;
++ struct file * file;
+ int result;
+ };
+
+@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
+ buf->result = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, dirent,
+@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+
+ buf.result = 0;
+ buf.dirent = dirent;
++ buf.file = file;
+
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (buf.result)
+@@ -142,6 +149,7 @@ struct linux_dirent {
+ struct getdents_callback {
+ struct linux_dirent __user * current_dir;
+ struct linux_dirent __user * previous;
++ struct file * file;
+ int count;
+ int error;
+ };
+@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+ buf->error = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
++ buf.file = file;
+
+ error = vfs_readdir(file, filldir, &buf);
+ if (error >= 0)
+@@ -229,6 +242,7 @@ out:
+ struct getdents_callback64 {
+ struct linux_dirent64 __user * current_dir;
+ struct linux_dirent64 __user * previous;
++ struct file *file;
+ int count;
+ int error;
+ };
+@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+
+ buf.current_dir = dirent;
+ buf.previous = NULL;
++ buf.file = file;
+ buf.count = count;
+ buf.error = 0;
+
+@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+- typeof(lastdirent->d_off) d_off = file->f_pos;
++ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
+ if (__put_user(d_off, &lastdirent->d_off))
+ error = -EFAULT;
+ else
+diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
+index 133e935..77359db 100644
+--- a/fs/reiserfs/dir.c
++++ b/fs/reiserfs/dir.c
+@@ -204,6 +204,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
+ next_pos = deh_offset(deh) + 1;
+
+ if (item_moved(&tmp_ih, &path_to_entry)) {
++ set_cpu_key_k_offset(&pos_key,
++ next_pos);
+ goto research;
+ }
+ } /* for */
+diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
+index 60c0804..d814f98 100644
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
+ return;
+ }
+
+- atomic_inc(&(fs_generation(tb->tb_sb)));
++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
+ do_balance_starts(tb);
+
+ /* balance leaf returns 0 except if combining L R and S into
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index fe677c0..2a15fb2 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1816,11 +1816,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+ TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
+ memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
+ args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
+- if (insert_inode_locked4(inode, args.objectid,
+- reiserfs_find_actor, &args) < 0) {
++
++ reiserfs_write_unlock(inode->i_sb);
++ err = insert_inode_locked4(inode, args.objectid,
++ reiserfs_find_actor, &args);
++ reiserfs_write_lock(inode->i_sb);
++ if (err) {
+ err = -EINVAL;
+ goto out_bad_inode;
+ }
++
+ if (old_format_only(sb))
+ /* not a perfect generation count, as object ids can be reused, but
+ ** this is as good as reiserfs can do right now.
+diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
+index 7a99811..a7c96c4 100644
+--- a/fs/reiserfs/procfs.c
++++ b/fs/reiserfs/procfs.c
+@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
+ "SMALL_TAILS " : "NO_TAILS ",
+ replay_only(sb) ? "REPLAY_ONLY " : "",
+ convert_reiserfs(sb) ? "CONV " : "",
+- atomic_read(&r->s_generation_counter),
++ atomic_read_unchecked(&r->s_generation_counter),
+ SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
+ SF(s_do_balance), SF(s_unneeded_left_neighbor),
+ SF(s_good_search_by_key_reada), SF(s_bmaps),
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 569498a..0886e50f 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -2295,6 +2295,7 @@ struct file_system_type reiserfs_fs_type = {
+ .kill_sb = reiserfs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("reiserfs");
+
+ MODULE_DESCRIPTION("ReiserFS journaled filesystem");
+ MODULE_AUTHOR("Hans Reiser <reiser@namesys.com>");
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 04eecc4..33f74d0 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
+ static int chown_one_xattr(struct dentry *dentry, void *data)
+ {
+ struct iattr *attrs = data;
+- return reiserfs_setattr(dentry, attrs);
++ int ia_valid = attrs->ia_valid;
++ int err;
++
++ /*
++ * We only want the ownership bits. Otherwise, we'll do
++ * things like change a directory to a regular file if
++ * ATTR_MODE is set.
++ */
++ attrs->ia_valid &= (ATTR_UID|ATTR_GID);
++ err = reiserfs_setattr(dentry, attrs);
++ attrs->ia_valid = ia_valid;
++
++ return err;
+ }
+
+ /* No i_mutex, but the inode is unconnected. */
+diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
+index 6da0396..fc338f3 100644
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -429,6 +429,9 @@ int reiserfs_acl_chmod(struct inode *inode)
+ int depth;
+ int error;
+
++ if (IS_PRIVATE(inode))
++ return 0;
++
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+diff --git a/fs/romfs/super.c b/fs/romfs/super.c
+index 8b4089f..2575128 100644
+--- a/fs/romfs/super.c
++++ b/fs/romfs/super.c
+@@ -602,6 +602,7 @@ static struct file_system_type romfs_fs_type = {
+ .kill_sb = romfs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("romfs");
+
+ /*
+ * inode storage initialiser
+diff --git a/fs/select.c b/fs/select.c
+index d33418f..2a5345e 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -20,6 +20,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
++#include <linux/security.h>
+ #include <linux/personality.h> /* for STICKY_TIMEOUTS */
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+ struct poll_list *walk = head;
+ unsigned long todo = nfds;
+
++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
+ if (nfds > rlimit(RLIMIT_NOFILE))
+ return -EINVAL;
+
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index dba43c3..4e25536 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -9,6 +9,7 @@
+ #include <linux/module.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
+ memset(p, 0, sizeof(*p));
+ mutex_init(&p->lock);
+ p->op = op;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ p->exec_id = current->exec_id;
++#endif
+
+ /*
+ * Wrappers around seq_open(e.g. swaps_open) need to be
+@@ -76,7 +80,11 @@ static int traverse(struct seq_file *m, loff_t offset)
+ return 0;
+ }
+ if (!m->buf) {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
++#else
+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++#endif
+ if (!m->buf)
+ return -ENOMEM;
+ }
+@@ -116,7 +124,11 @@ static int traverse(struct seq_file *m, loff_t offset)
+ Eoverflow:
+ m->op->stop(m, p);
+ kfree(m->buf);
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
++#else
+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++#endif
+ return !m->buf ? -ENOMEM : -EAGAIN;
+ }
+
+@@ -132,7 +144,7 @@ Eoverflow:
+ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ {
+ struct seq_file *m = file->private_data;
+- size_t copied = 0;
++ ssize_t copied = 0;
+ loff_t pos;
+ size_t n;
+ void *p;
+@@ -169,7 +181,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ m->version = file->f_version;
+ /* grab buffer if we didn't have one */
+ if (!m->buf) {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
++#else
+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
++#endif
+ if (!m->buf)
+ goto Enomem;
+ }
+@@ -210,7 +226,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ goto Fill;
+ m->op->stop(m, p);
+ kfree(m->buf);
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
++#else
+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
++#endif
+ if (!m->buf)
+ goto Enomem;
+ m->count = 0;
+@@ -549,7 +569,7 @@ static void single_stop(struct seq_file *p, void *v)
+ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
+ void *data)
+ {
+- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
++ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
+ int res = -ENOMEM;
+
+ if (op) {
+diff --git a/fs/splice.c b/fs/splice.c
+index 58ab918..24425c2 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ pipe_lock(pipe);
+
+ for (;;) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -249,9 +249,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ do_wakeup = 0;
+ }
+
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -564,7 +564,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
++ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -579,7 +579,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+ old_fs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- res = vfs_write(file, (const char __user *)buf, count, &pos);
++ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
+ set_fs(old_fs);
+
+ return res;
+@@ -631,7 +631,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ goto err;
+
+ this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+- vec[i].iov_base = (void __user *) page_address(page);
++ vec[i].iov_base = (void __force_user *) page_address(page);
+ vec[i].iov_len = this_len;
+ spd.pages[i] = page;
+ spd.nr_pages++;
+@@ -855,10 +855,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
+ int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
+ {
+ while (!pipe->nrbufs) {
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ return 0;
+
+- if (!pipe->waiting_writers && sd->num_spliced)
++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
+ return 0;
+
+ if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1191,7 +1191,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ * out of the pipe right after the splice_to_pipe(). So set
+ * PIPE_READERS appropriately.
+ */
+- pipe->readers = 1;
++ atomic_set(&pipe->readers, 1);
+
+ current->splice_pipe = pipe;
+ }
+@@ -1459,6 +1459,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+
+ partial[buffers].offset = off;
+ partial[buffers].len = plen;
++ partial[buffers].private = 0;
+
+ off = 0;
+ len -= plen;
+@@ -1744,9 +1745,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+ ret = -ERESTARTSYS;
+ break;
+ }
+- if (!pipe->writers)
++ if (!atomic_read(&pipe->writers))
+ break;
+- if (!pipe->waiting_writers) {
++ if (!atomic_read(&pipe->waiting_writers)) {
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+@@ -1778,7 +1779,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+ pipe_lock(pipe);
+
+ while (pipe->nrbufs >= pipe->buffers) {
+- if (!pipe->readers) {
++ if (!atomic_read(&pipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ ret = -EPIPE;
+ break;
+@@ -1791,9 +1792,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+ ret = -ERESTARTSYS;
+ break;
+ }
+- pipe->waiting_writers++;
++ atomic_inc(&pipe->waiting_writers);
+ pipe_wait(pipe);
+- pipe->waiting_writers--;
++ atomic_dec(&pipe->waiting_writers);
+ }
+
+ pipe_unlock(pipe);
+@@ -1829,14 +1830,14 @@ retry:
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+ break;
+ }
+
+- if (!ipipe->nrbufs && !ipipe->writers)
++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
+ break;
+
+ /*
+@@ -1933,7 +1934,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+ pipe_double_lock(ipipe, opipe);
+
+ do {
+- if (!opipe->readers) {
++ if (!atomic_read(&opipe->readers)) {
+ send_sig(SIGPIPE, current, 0);
+ if (!ret)
+ ret = -EPIPE;
+@@ -1978,7 +1979,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+ * return EAGAIN if we have the potential of some data in the
+ * future, otherwise just return 0
+ */
+- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
+ ret = -EAGAIN;
+
+ pipe_unlock(ipipe);
+diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
+index 4619247..e3910db 100644
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -481,6 +481,7 @@ static struct file_system_type squashfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV
+ };
++MODULE_ALIAS_FS("squashfs");
+
+ static const struct super_operations squashfs_super_ops = {
+ .alloc_inode = squashfs_alloc_inode,
+diff --git a/fs/stat.c b/fs/stat.c
+index 7b21801..ee8fe9b 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+ stat->gid = inode->i_gid;
+ stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
+- stat->atime = inode->i_atime;
+- stat->mtime = inode->i_mtime;
++ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++ stat->atime = inode->i_ctime;
++ stat->mtime = inode->i_ctime;
++ } else {
++ stat->atime = inode->i_atime;
++ stat->mtime = inode->i_mtime;
++ }
+ stat->ctime = inode->i_ctime;
+ stat->blksize = (1 << inode->i_blkbits);
+ stat->blocks = inode->i_blocks;
+@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ if (retval)
+ return retval;
+
+- if (inode->i_op->getattr)
+- return inode->i_op->getattr(mnt, dentry, stat);
++ if (inode->i_op->getattr) {
++ retval = inode->i_op->getattr(mnt, dentry, stat);
++ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++ stat->atime = stat->ctime;
++ stat->mtime = stat->ctime;
++ }
++ return retval;
++ }
+
+ generic_fillattr(inode, stat);
+ return 0;
+diff --git a/fs/super.c b/fs/super.c
+index 2a698f6..056eff7 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -295,19 +295,19 @@ EXPORT_SYMBOL(deactivate_super);
+ * and want to turn it into a full-blown active reference. grab_super()
+ * is called with sb_lock held and drops it. Returns 1 in case of
+ * success, 0 if we had failed (superblock contents was already dead or
+- * dying when grab_super() had been called).
++ * dying when grab_super() had been called). Note that this is only
++ * called for superblocks not in rundown mode (== ones still on ->fs_supers
++ * of their type), so increment of ->s_count is OK here.
+ */
+ static int grab_super(struct super_block *s) __releases(sb_lock)
+ {
+- if (atomic_inc_not_zero(&s->s_active)) {
+- spin_unlock(&sb_lock);
+- return 1;
+- }
+- /* it's going away */
+ s->s_count++;
+ spin_unlock(&sb_lock);
+- /* wait for it to die */
+ down_write(&s->s_umount);
++ if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
++ put_super(s);
++ return 1;
++ }
+ up_write(&s->s_umount);
+ put_super(s);
+ return 0;
+@@ -436,11 +436,6 @@ retry:
+ destroy_super(s);
+ s = NULL;
+ }
+- down_write(&old->s_umount);
+- if (unlikely(!(old->s_flags & MS_BORN))) {
+- deactivate_locked_super(old);
+- goto retry;
+- }
+ return old;
+ }
+ }
+@@ -650,10 +645,10 @@ restart:
+ if (list_empty(&sb->s_instances))
+ continue;
+ if (sb->s_bdev == bdev) {
+- if (grab_super(sb)) /* drops sb_lock */
+- return sb;
+- else
++ if (!grab_super(sb))
+ goto restart;
++ up_write(&sb->s_umount);
++ return sb;
+ }
+ }
+ spin_unlock(&sb_lock);
+diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
+index a475983..3aab767 100644
+--- a/fs/sysfs/bin.c
++++ b/fs/sysfs/bin.c
+@@ -233,13 +233,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ return ret;
+ }
+
+-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
+- void *buf, int len, int write)
++static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
++ void *buf, size_t len, int write)
+ {
+ struct file *file = vma->vm_file;
+ struct bin_buffer *bb = file->private_data;
+ struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
+- int ret;
++ ssize_t ret;
+
+ if (!bb->vm_ops)
+ return -EINVAL;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index e756bc4..684ab5b71 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
+ struct sysfs_dirent *sd;
+ int rc;
+
++#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
++ const char *parent_name = parent_sd->s_name;
++
++ mode = S_IFDIR | S_IRWXU;
++
++ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
++ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
++ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
++ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
++ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++#endif
++
+ /* allocate */
+ sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
+ if (!sd)
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index 779789a..f58193c 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
+
+ struct sysfs_open_dirent {
+ atomic_t refcnt;
+- atomic_t event;
++ atomic_unchecked_t event;
+ wait_queue_head_t poll;
+ struct list_head buffers; /* goes through sysfs_buffer.list */
+ };
+@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
+ if (!sysfs_get_active(attr_sd))
+ return -ENODEV;
+
+- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
++ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
+ count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
+
+ sysfs_put_active(attr_sd);
+@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
+ return -ENOMEM;
+
+ atomic_set(&new_od->refcnt, 0);
+- atomic_set(&new_od->event, 1);
++ atomic_set_unchecked(&new_od->event, 1);
+ init_waitqueue_head(&new_od->poll);
+ INIT_LIST_HEAD(&new_od->buffers);
+ goto retry;
+@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
+
+ sysfs_put_active(attr_sd);
+
+- if (buffer->event != atomic_read(&od->event))
++ if (buffer->event != atomic_read_unchecked(&od->event))
+ goto trigger;
+
+ return DEFAULT_POLLMASK;
+@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
+
+ od = sd->s_attr.open;
+ if (od) {
+- atomic_inc(&od->event);
++ atomic_inc_unchecked(&od->event);
+ wake_up_interruptible(&od->poll);
+ }
+
+diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
+index a7ac78f..02158e1 100644
+--- a/fs/sysfs/symlink.c
++++ b/fs/sysfs/symlink.c
+@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+
+ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+ {
+- char *page = nd_get_link(nd);
++ const char *page = nd_get_link(nd);
+ if (!IS_ERR(page))
+ free_page((unsigned long)page);
+ }
+diff --git a/fs/sysv/super.c b/fs/sysv/super.c
+index f60c196..b2d8fdc 100644
+--- a/fs/sysv/super.c
++++ b/fs/sysv/super.c
+@@ -545,6 +545,7 @@ static struct file_system_type sysv_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("sysv");
+
+ static struct file_system_type v7_fs_type = {
+ .owner = THIS_MODULE,
+@@ -553,6 +554,8 @@ static struct file_system_type v7_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("v7");
++MODULE_ALIAS("v7");
+
+ static int __init init_sysv_fs(void)
+ {
+@@ -586,5 +589,4 @@ static void __exit exit_sysv_fs(void)
+
+ module_init(init_sysv_fs)
+ module_exit(exit_sysv_fs)
+-MODULE_ALIAS("v7");
+ MODULE_LICENSE("GPL");
+diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
+index bb55cdb..e9ebb8a 100644
+--- a/fs/sysv/sysv.h
++++ b/fs/sysv/sysv.h
+@@ -189,7 +189,7 @@ static inline u32 PDP_swab(u32 x)
+ #endif
+ }
+
+-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
++static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+ {
+ if (sbi->s_bytesex == BYTESEX_PDP)
+ return PDP_swab((__force __u32)n);
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
+index 9228950..bbad895 100644
+--- a/fs/ubifs/io.c
++++ b/fs/ubifs/io.c
+@@ -156,7 +156,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
+ return err;
+ }
+
+-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
++int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
+ {
+ int err;
+
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 2f467e5..3222f9b 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -2192,6 +2192,7 @@ static struct file_system_type ubifs_fs_type = {
+ .mount = ubifs_mount,
+ .kill_sb = kill_ubifs_super,
+ };
++MODULE_ALIAS_FS("ubifs");
+
+ /*
+ * Inode slab cache constructor.
+diff --git a/fs/udf/misc.c b/fs/udf/misc.c
+index c175b4d..8f36a16 100644
+--- a/fs/udf/misc.c
++++ b/fs/udf/misc.c
+@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
+
+ u8 udf_tag_checksum(const struct tag *t)
+ {
+- u8 *data = (u8 *)t;
++ const u8 *data = (const u8 *)t;
+ u8 checksum = 0;
+ int i;
+ for (i = 0; i < sizeof(struct tag); ++i)
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index f66439e..9af489f 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -116,6 +116,7 @@ static struct file_system_type udf_fstype = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("udf");
+
+ static struct kmem_cache *udf_inode_cachep;
+
+diff --git a/fs/ufs/super.c b/fs/ufs/super.c
+index 3915ade..00fcbf4 100644
+--- a/fs/ufs/super.c
++++ b/fs/ufs/super.c
+@@ -1484,6 +1484,7 @@ static struct file_system_type ufs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("ufs");
+
+ static int __init init_ufs_fs(void)
+ {
+diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
+index 8d974c4..4c19a7b 100644
+--- a/fs/ufs/swab.h
++++ b/fs/ufs/swab.h
+@@ -22,7 +22,7 @@ enum {
+ BYTESEX_BE
+ };
+
+-static inline u64
++static inline u64 __intentional_overflow(-1)
+ fs64_to_cpu(struct super_block *sbp, __fs64 n)
+ {
+ if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
+ return (__force __fs64)cpu_to_be64(n);
+ }
+
+-static inline u32
++static inline u32 __intentional_overflow(-1)
+ fs32_to_cpu(struct super_block *sbp, __fs32 n)
+ {
+ if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+@@ -76,7 +76,7 @@ fs32_sub(struct super_block *sbp, __fs32 *n, int d)
+ be32_add_cpu((__be32 *)n, -d);
+ }
+
+-static inline u16
++static inline u16 __intentional_overflow(-1)
+ fs16_to_cpu(struct super_block *sbp, __fs16 n)
+ {
+ if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
+diff --git a/fs/utimes.c b/fs/utimes.c
+index ba653f3..06ea4b1 100644
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -1,6 +1,7 @@
+ #include <linux/compiler.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
++#include <linux/security.h>
+ #include <linux/linkage.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
+ goto mnt_drop_write_and_out;
+ }
+ }
++
++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
++ error = -EACCES;
++ goto mnt_drop_write_and_out;
++ }
++
+ mutex_lock(&inode->i_mutex);
+ error = notify_change(path->dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 67583de..328e065 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -225,6 +225,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ return rc;
+ }
+
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ssize_t
++pax_getxattr(struct dentry *dentry, void *value, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ ssize_t error;
++
++ error = inode_permission(inode, MAY_EXEC);
++ if (error)
++ return error;
++
++ if (inode->i_op->getxattr)
++ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
++ else
++ error = -EOPNOTSUPP;
++
++ return error;
++}
++EXPORT_SYMBOL(pax_getxattr);
++#endif
++
+ ssize_t
+ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+ {
+@@ -315,7 +336,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
+ * Extended attribute SET operations
+ */
+ static long
+-setxattr(struct dentry *d, const char __user *name, const void __user *value,
++setxattr(struct path *path, const char __user *name, const void __user *value,
+ size_t size, int flags)
+ {
+ int error;
+@@ -339,7 +360,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
+ return PTR_ERR(kvalue);
+ }
+
+- error = vfs_setxattr(d, kname, kvalue, size, flags);
++ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
++ error = -EACCES;
++ goto out;
++ }
++
++ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
++out:
+ kfree(kvalue);
+ return error;
+ }
+@@ -356,7 +383,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, name, value, size, flags);
++ error = setxattr(&path, name, value, size, flags);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -375,7 +402,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, name, value, size, flags);
++ error = setxattr(&path, name, value, size, flags);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -386,17 +413,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
+ const void __user *,value, size_t, size, int, flags)
+ {
+ struct file *f;
+- struct dentry *dentry;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+- dentry = f->f_path.dentry;
+- audit_inode(NULL, dentry);
++ audit_inode(NULL, f->f_path.dentry);
+ error = mnt_want_write_file(f);
+ if (!error) {
+- error = setxattr(dentry, name, value, size, flags);
++ error = setxattr(&f->f_path, name, value, size, flags);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+@@ -560,7 +585,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
+ * Extended attribute REMOVE operations
+ */
+ static long
+-removexattr(struct dentry *d, const char __user *name)
++removexattr(struct path *path, const char __user *name)
+ {
+ int error;
+ char kname[XATTR_NAME_MAX + 1];
+@@ -571,7 +596,10 @@ removexattr(struct dentry *d, const char __user *name)
+ if (error < 0)
+ return error;
+
+- return vfs_removexattr(d, kname);
++ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
++ return -EACCES;
++
++ return vfs_removexattr(path->dentry, kname);
+ }
+
+ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
+@@ -585,7 +613,7 @@ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(path.dentry, name);
++ error = removexattr(&path, name);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -603,7 +631,7 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(path.dentry, name);
++ error = removexattr(&path, name);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -613,17 +641,17 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
+ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
+ {
+ struct file *f;
+- struct dentry *dentry;
++ struct path *path;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+- dentry = f->f_path.dentry;
+- audit_inode(NULL, dentry);
++ path = &f->f_path;
++ audit_inode(NULL, path->dentry);
+ error = mnt_want_write_file(f);
+ if (!error) {
+- error = removexattr(dentry, name);
++ error = removexattr(path, name);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
+index 8d5a506..7f62712 100644
+--- a/fs/xattr_acl.c
++++ b/fs/xattr_acl.c
+@@ -17,8 +17,8 @@
+ struct posix_acl *
+ posix_acl_from_xattr(const void *value, size_t size)
+ {
+- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
++ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
++ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
+ int count;
+ struct posix_acl *acl;
+ struct posix_acl_entry *acl_e;
+diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
+index d0ab788..827999b 100644
+--- a/fs/xfs/xfs_bmap.c
++++ b/fs/xfs/xfs_bmap.c
+@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
+ int nmap,
+ int ret_nmap);
+ #else
+-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
+ #endif /* DEBUG */
+
+ STATIC int
+diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
+index 79d05e8..e3e5861 100644
+--- a/fs/xfs/xfs_dir2_sf.c
++++ b/fs/xfs/xfs_dir2_sf.c
+@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
+ }
+
+ ino = xfs_dir2_sfe_get_ino(sfp, sfep);
+- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
++ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
++ char name[sfep->namelen];
++ memcpy(name, sfep->name, sfep->namelen);
++ if (filldir(dirent, name, sfep->namelen,
++ off & 0x7fffffff, ino, DT_UNKNOWN)) {
++ *offset = off & 0x7fffffff;
++ return 0;
++ }
++ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
+ *offset = off & 0x7fffffff;
+ return 0;
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index eb519de..a7569b5 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -128,7 +128,7 @@ xfs_find_handle(
+ }
+
+ error = -EFAULT;
+- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
++ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
+ copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
+ goto out_put;
+
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index bd2fb43..86fd3e8d 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -447,7 +447,7 @@ xfs_vn_put_link(
+ struct nameidata *nd,
+ void *p)
+ {
+- char *s = nd_get_link(nd);
++ const char *s = nd_get_link(nd);
+
+ if (!IS_ERR(s))
+ kfree(s);
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 87323f1..dab9d00 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -858,7 +858,7 @@ xfs_rtbuf_get(
+ xfs_buf_t *bp; /* block buffer, result */
+ xfs_inode_t *ip; /* bitmap or summary inode */
+ xfs_bmbt_irec_t map;
+- int nmap;
++ int nmap = 1;
+ int error; /* error value */
+
+ ip = issum ? mp->m_rsumip : mp->m_rbmip;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 8a89949..6776861 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1474,6 +1474,7 @@ static struct file_system_type xfs_fs_type = {
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ };
++MODULE_ALIAS_FS("xfs");
+
+ STATIC int __init
+ xfs_init_zones(void)
+diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
+new file mode 100644
+index 0000000..c4717f9
+--- /dev/null
++++ b/grsecurity/Kconfig
+@@ -0,0 +1,1085 @@
++#
++# grecurity configuration
++#
++menu "Memory Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
++ default y if GRKERNSEC_CONFIG_AUTO
++ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
++ help
++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++ be written to or read from to modify or leak the contents of the running
++ kernel. /dev/port will also not be allowed to be opened and support
++ for /dev/cpu/*/msr will be removed. If you have module
++ support disabled, enabling this will close up five ways that are
++ currently used to insert malicious code into the running kernel.
++
++ Even with all these features enabled, we still highly recommend that
++ you use the RBAC system, as it is still possible for an attacker to
++ modify the running kernel through privileged I/O granted by ioperm/iopl.
++
++ If you are not using XFree86, you may be able to stop this additional
++ case by enabling the 'Disable privileged I/O' option. Though nothing
++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
++ but only to video memory, which is the only writing we allow in this
++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
++ not be allowed to mprotect it with PROT_WRITE later.
++ Enabling this feature will prevent the "cpupower" and "powertop" tools
++ from working.
++
++ It is highly recommended that you say Y here if you meet all the
++ conditions above.
++
++config GRKERNSEC_VM86
++ bool "Restrict VM86 mode"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++ depends on X86_32
++
++ help
++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
++ make use of a special execution mode on 32bit x86 processors called
++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
++ video cards and will still work with this option enabled. The purpose
++ of the option is to prevent exploitation of emulation errors in
++ virtualization of vm86 mode like the one discovered in VMWare in 2009.
++ Nearly all users should be able to enable this option.
++
++config GRKERNSEC_IO
++ bool "Disable privileged I/O"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++ depends on X86
++ select RTC_CLASS
++ select RTC_INTF_DEV
++ select RTC_DRV_CMOS
++
++ help
++ If you say Y here, all ioperm and iopl calls will return an error.
++ Ioperm and iopl can be used to modify the running kernel.
++ Unfortunately, some programs need this access to operate properly,
++ the most notable of which are XFree86 and hwclock. hwclock can be
++ remedied by having RTC support in the kernel, so real-time
++ clock support is enabled if this option is enabled, to ensure
++ that hwclock operates correctly. XFree86 still will not
++ operate correctly with this option enabled, so DO NOT CHOOSE Y
++ IF YOU USE XFree86. If you use XFree86 and you still want to
++ protect your kernel against modification, use the RBAC system.
++
++config GRKERNSEC_JIT_HARDEN
++ bool "Harden BPF JIT against spray attacks"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on BPF_JIT
++ help
++ If you say Y here, the native code generated by the kernel's Berkeley
++ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
++ attacks that attempt to fit attacker-beneficial instructions in
++ 32bit immediate fields of JIT-generated native instructions. The
++ attacker will generally aim to cause an unintended instruction sequence
++ of JIT-generated native code to execute by jumping into the middle of
++ a generated instruction. This feature effectively randomizes the 32bit
++ immediate constants present in the generated code to thwart such attacks.
++
++ If you're using KERNEXEC, it's recommended that you enable this option
++ to supplement the hardening of the kernel.
++
++config GRKERNSEC_PERF_HARDEN
++ bool "Disable unprivileged PERF_EVENTS usage by default"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PERF_EVENTS
++ help
++ If you say Y here, the range of acceptable values for the
++ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
++ default to a new value: 3. When the sysctl is set to this value, no
++ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
++
++ Though PERF_EVENTS can be used legitimately for performance monitoring
++ and low-level application profiling, it is forced on regardless of
++ configuration, has been at fault for several vulnerabilities, and
++ creates new opportunities for side channels and other information leaks.
++
++ This feature puts PERF_EVENTS into a secure default state and permits
++ the administrator to change out of it temporarily if unprivileged
++ application profiling is needed.
++
++config GRKERNSEC_RAND_THREADSTACK
++ bool "Insert random gaps between thread stacks"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PAX_RANDMMAP && !PPC
++ help
++ If you say Y here, a random-sized gap will be enforced between allocated
++ thread stacks. Glibc's NPTL and other threading libraries that
++ pass MAP_STACK to the kernel for thread stack allocation are supported.
++ The implementation currently provides 8 bits of entropy for the gap.
++
++ Many distributions do not compile threaded remote services with the
++ -fstack-check argument to GCC, causing the variable-sized stack-based
++ allocator, alloca(), to not probe the stack on allocation. This
++ permits an unbounded alloca() to skip over any guard page and potentially
++ modify another thread's stack reliably. An enforced random gap
++ reduces the reliability of such an attack and increases the chance
++ that such a read/write to another thread's stack instead lands in
++ an unmapped area, causing a crash and triggering grsecurity's
++ anti-bruteforcing logic.
++
++config GRKERNSEC_PROC_MEMMAP
++ bool "Harden ASLR against information leaks and entropy reduction"
++ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
++ depends on PAX_NOEXEC || PAX_ASLR
++ help
++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++ give no information about the addresses of its mappings if
++ PaX features that rely on random addresses are enabled on the task.
++ In addition to sanitizing this information and disabling other
++ dangerous sources of information, this option causes reads of sensitive
++ /proc/<pid> entries where the file descriptor was opened in a different
++ task than the one performing the read. Such attempts are logged.
++ This option also limits argv/env strings for suid/sgid binaries
++ to 512KB to prevent a complete exhaustion of the stack entropy provided
++ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
++ binaries to prevent alternative mmap layouts from being abused.
++
++ If you use PaX it is essential that you say Y here as it closes up
++ several holes that make full ASLR useless locally.
++
++config GRKERNSEC_BRUTE
++ bool "Deter exploit bruteforcing"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, attempts to bruteforce exploits against forking
++ daemons such as apache or sshd, as well as against suid/sgid binaries
++ will be deterred. When a child of a forking daemon is killed by PaX
++ or crashes due to an illegal instruction or other suspicious signal,
++ the parent process will be delayed 30 seconds upon every subsequent
++ fork until the administrator is able to assess the situation and
++ restart the daemon.
++ In the suid/sgid case, the attempt is logged, the user has all their
++ existing instances of the suid/sgid binary terminated and will
++ be unable to execute any suid/sgid binaries for 15 minutes.
++
++ It is recommended that you also enable signal logging in the auditing
++ section so that logs are generated when a process triggers a suspicious
++ signal.
++ If the sysctl option is enabled, a sysctl option with name
++ "deter_bruteforce" is created.
++
++
++config GRKERNSEC_MODHARDEN
++ bool "Harden module auto-loading"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on MODULES
++ help
++ If you say Y here, module auto-loading in response to use of some
++ feature implemented by an unloaded module will be restricted to
++ root users. Enabling this option helps defend against attacks
++ by unprivileged users who abuse the auto-loading behavior to
++ cause a vulnerable module to load that is then exploited.
++
++ If this option prevents a legitimate use of auto-loading for a
++ non-root user, the administrator can execute modprobe manually
++ with the exact name of the module mentioned in the alert log.
++ Alternatively, the administrator can add the module to the list
++ of modules loaded at boot by modifying init scripts.
++
++ Modification of init scripts will most likely be needed on
++ Ubuntu servers with encrypted home directory support enabled,
++ as the first non-root user logging in will cause the ecb(aes),
++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
++
++config GRKERNSEC_HIDESYM
++ bool "Hide kernel symbols"
++ select PAX_USERCOPY_SLABS
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, getting information on loaded modules, and
++ displaying all kernel symbols through a syscall will be restricted
++ to users with CAP_SYS_MODULE. For software compatibility reasons,
++ /proc/kallsyms will be restricted to the root user. The RBAC
++ system can hide that entry even from root.
++
++ This option also prevents leaking of kernel addresses through
++ several /proc entries.
++
++ Note that this option is only effective provided the following
++ conditions are met:
++ 1) The kernel using grsecurity is not precompiled by some distribution
++ 2) You have also enabled GRKERNSEC_DMESG
++ 3) You are using the RBAC system and hiding other files such as your
++ kernel image and System.map. Alternatively, enabling this option
++ causes the permissions on /boot, /lib/modules, and the kernel
++ source directory to change at compile time to prevent
++ reading by non-root users.
++ If the above conditions are met, this option will aid in providing a
++ useful protection against local kernel exploitation of overflows
++ and arbitrary read/write vulnerabilities.
++
++ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
++ in addition to this feature.
++
++config GRKERNSEC_KERN_LOCKOUT
++ bool "Active kernel exploit response"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on X86 || ARM || PPC || SPARC
++ help
++ If you say Y here, when a PaX alert is triggered due to suspicious
++ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
++ or an OOPS occurs due to bad memory accesses, instead of just
++ terminating the offending process (and potentially allowing
++ a subsequent exploit from the same user), we will take one of two
++ actions:
++ If the user was root, we will panic the system
++ If the user was non-root, we will log the attempt, terminate
++ all processes owned by the user, then prevent them from creating
++ any new processes until the system is restarted
++ This deters repeated kernel exploitation/bruteforcing attempts
++ and is useful for later forensics.
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_RBAC_DEBUG
++ bool
++
++config GRKERNSEC_NO_RBAC
++ bool "Disable RBAC system"
++ help
++ If you say Y here, the /dev/grsec device will be removed from the kernel,
++ preventing the RBAC system from being enabled. You should only say Y
++ here if you have no intention of using the RBAC system, so as to prevent
++ an attacker with root access from misusing the RBAC system to hide files
++ and processes when loadable module support and /dev/[k]mem have been
++ locked down.
++
++config GRKERNSEC_ACL_HIDEKERN
++ bool "Hide kernel processes"
++ help
++ If you say Y here, all kernel threads will be hidden to all
++ processes but those whose subject has the "view hidden processes"
++ flag.
++
++config GRKERNSEC_ACL_MAXTRIES
++ int "Maximum tries before password lockout"
++ default 3
++ help
++ This option enforces the maximum number of times a user can attempt
++ to authorize themselves with the grsecurity RBAC system before being
++ denied the ability to attempt authorization again for a specified time.
++ The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++ int "Time to wait after max password tries, in seconds"
++ default 30
++ help
++ This option specifies the time the user must wait after attempting to
++ authorize to the RBAC system with the maximum number of invalid
++ passwords. The higher the number, the harder it will be to brute-force
++ a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++ bool "Proc restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, the permissions of the /proc filesystem
++ will be altered to enhance system security and privacy. You MUST
++ choose either a user only restriction or a user and group restriction.
++ Depending upon the option you choose, you can either restrict users to
++ see only the processes they themselves run, or choose a group that can
++ view all processes and files normally restricted to root if you choose
++ the "restrict to user only" option. NOTE: If you're running identd or
++ ntpd as a non-root user, you will have to run it as the group you
++ specify here.
++
++config GRKERNSEC_PROC_USER
++ bool "Restrict /proc to user only"
++ depends on GRKERNSEC_PROC
++ help
++ If you say Y here, non-root users will only be able to view their own
++ processes, and restricts them from viewing network-related information,
++ and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++ bool "Allow special group"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++ help
++ If you say Y here, you will be able to select a group that will be
++ able to view all processes and network-related information. If you've
++ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
++ remain hidden. This option is useful if you want to run identd as
++ a non-root user. The group you select may also be chosen at boot time
++ via "grsec_proc_gid=" on the kernel commandline.
++
++config GRKERNSEC_PROC_GID
++ int "GID for special group"
++ depends on GRKERNSEC_PROC_USERGROUP
++ default 1001
++
++config GRKERNSEC_PROC_ADD
++ bool "Additional restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++ help
++ If you say Y here, additional restrictions will be placed on
++ /proc that keep normal users from viewing device information and
++ slabinfo information that could be useful for exploits.
++
++config GRKERNSEC_LINK
++ bool "Linking restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, /tmp race exploits will be prevented, since users
++ will no longer be able to follow symlinks owned by other users in
++ world-writable +t directories (e.g. /tmp), unless the owner of the
++ symlink is the owner of the directory. users will also not be
++ able to hardlink to files they do not own. If the sysctl option is
++ enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_SYMLINKOWN
++ bool "Kernel-enforced SymlinksIfOwnerMatch"
++ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
++ help
++ Apache's SymlinksIfOwnerMatch option has an inherent race condition
++ that prevents it from being used as a security feature. As Apache
++ verifies the symlink by performing a stat() against the target of
++ the symlink before it is followed, an attacker can setup a symlink
++ to point to a same-owned file, then replace the symlink with one
++ that targets another user's file just after Apache "validates" the
++ symlink -- a classic TOCTOU race. If you say Y here, a complete,
++ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
++ will be in place for the group you specify. If the sysctl option
++ is enabled, a sysctl option with name "enforce_symlinksifowner" is
++ created.
++
++config GRKERNSEC_SYMLINKOWN_GID
++ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
++ depends on GRKERNSEC_SYMLINKOWN
++ default 1006
++ help
++ Setting this GID determines what group kernel-enforced
++ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
++ is enabled, a sysctl option with name "symlinkown_gid" is created.
++
++config GRKERNSEC_FIFO
++ bool "FIFO restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (e.g. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++config GRKERNSEC_SYSFS_RESTRICT
++ bool "Sysfs/debugfs restriction"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
++ depends on SYSFS
++ help
++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
++ any filesystem normally mounted under it (e.g. debugfs) will be
++ mostly accessible only by root. These filesystems generally provide access
++ to hardware and debug information that isn't appropriate for unprivileged
++ users of the system. Sysfs and debugfs have also become a large source
++ of new vulnerabilities, ranging from infoleaks to local compromise.
++ There has been very little oversight with an eye toward security involved
++ in adding new exporters of information to these filesystems, so their
++ use is discouraged.
++ For reasons of compatibility, a few directories have been whitelisted
++ for access by non-root users:
++ /sys/fs/selinux
++ /sys/fs/fuse
++ /sys/devices/system/cpu
++
++config GRKERNSEC_ROFS
++ bool "Runtime read-only mount protection"
++ depends on SYSCTL
++ help
++ If you say Y here, a sysctl option with name "romount_protect" will
++ be created. By setting this option to 1 at runtime, filesystems
++ will be protected in the following ways:
++ * No new writable mounts will be allowed
++ * Existing read-only mounts won't be able to be remounted read/write
++ * Write operations will be denied on all block devices
++ This option acts independently of grsec_lock: once it is set to 1,
++ it cannot be turned off. Therefore, please be mindful of the resulting
++ behavior if this option is enabled in an init script on a read-only
++ filesystem.
++ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
++ and GRKERNSEC_IO should be enabled and module loading disabled via
++ config or at runtime.
++ This feature is mainly intended for secure embedded systems.
++
++
++config GRKERNSEC_DEVICE_SIDECHANNEL
++ bool "Eliminate stat/notify-based device sidechannels"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, timing analyses on block or character
++ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
++ will be thwarted for unprivileged users. If a process without
++ CAP_MKNOD stats such a device, the last access and last modify times
++ will match the device's create time. No access or modify events
++ will be triggered through inotify/dnotify/fanotify for such devices.
++ This feature will prevent attacks that may at a minimum
++ allow an attacker to determine the administrator's password length.
++
++config GRKERNSEC_CHROOT
++ bool "Chroot jail restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, you will be able to choose several options that will
++ make breaking out of a chrooted jail much more difficult. If you
++ encounter no software incompatibilities with the following options, it
++ is recommended that you enable each one.
++
++config GRKERNSEC_CHROOT_MOUNT
++ bool "Deny mounts"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ mount or remount filesystems. If the sysctl option is enabled, a
++ sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++ bool "Deny double-chroots"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chroot
++ again outside the chroot. This is a widely used method of breaking
++ out of a chroot jail and should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name
++ "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++ bool "Deny pivot_root in chroot"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to use
++ a function called pivot_root() that was introduced in Linux 2.3.41. It
++ works similar to chroot in that it changes the root filesystem. This
++ function could be misused in a chrooted process to attempt to break out
++ of the chroot, and therefore should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_pivot" is
++ created.
++
++config GRKERNSEC_CHROOT_CHDIR
++ bool "Enforce chdir(\"/\") on all chroots"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the current working directory of all newly-chrooted
++ applications will be set to the the root directory of the chroot.
++ The man page on chroot(2) states:
++ Note that this call does not change the current working
++ directory, so that `.' can be outside the tree rooted at
++ `/'. In particular, the super-user can escape from a
++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++ It is recommended that you say Y here, since it's not known to break
++ any software. If the sysctl option is enabled, a sysctl option with
++ name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++ bool "Deny (f)chmod +s"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chmod
++ or fchmod files to make them have suid or sgid bits. This protects
++ against another published method of breaking a chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_chmod" is
++ created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++ bool "Deny fchdir out of chroot"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, a well-known method of breaking chroots by fchdir'ing
++ to a file descriptor of the chrooting process that points to a directory
++ outside the filesystem will be stopped. If the sysctl option
++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++ bool "Deny mknod"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be allowed to
++ mknod. The problem with using mknod inside a chroot is that it
++ would allow an attacker to create a device entry that is the same
++ as one on the physical root of your system, which could range from
++ anything from the console device to a device for your harddrive (which
++ they could then use to wipe the drive or steal data). It is recommended
++ that you say Y here, unless you run into software incompatibilities.
++ If the sysctl option is enabled, a sysctl option with name
++ "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++ bool "Deny shmat() out of chroot"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to attach
++ to shared memory segments that were created outside of the chroot jail.
++ It is recommended that you say Y here. If the sysctl option is enabled,
++ a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++ bool "Deny access to abstract AF_UNIX sockets out of chroot"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ connect to abstract (meaning not belonging to a filesystem) Unix
++ domain sockets that were bound outside of a chroot. It is recommended
++ that you say Y here. If the sysctl option is enabled, a sysctl option
++ with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++ bool "Protect outside processes"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
++ getsid, or view any process outside of the chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_findtask" is
++ created.
++
++config GRKERNSEC_CHROOT_NICE
++ bool "Restrict priority changes"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to raise
++ the priority of processes in the chroot, or alter the priority of
++ processes outside the chroot. This provides more security than simply
++ removing CAP_SYS_NICE from the process' capability set. If the
++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++ is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++ bool "Deny sysctl writes"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, an attacker in a chroot will not be able to
++ write to sysctl entries, either by sysctl(2) or through a /proc
++ interface. It is strongly recommended that you say Y here. If the
++ sysctl option is enabled, a sysctl option with name
++ "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++ bool "Capability restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the capabilities on all processes within a
++ chroot jail will be lowered to stop module insertion, raw i/o,
++ system and net admin tasks, rebooting the system, modifying immutable
++ files, modifying IPC owned by another, and changing the system time.
++ This is left an option because it can break some apps. Disable this
++ if your chrooted apps are having problems performing those kinds of
++ tasks. If the sysctl option is enabled, a sysctl option with
++ name "chroot_caps" is created.
++
++config GRKERNSEC_CHROOT_INITRD
++ bool "Exempt initrd tasks from restrictions"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
++ help
++ If you say Y here, tasks started prior to init will be exempted from
++ grsecurity's chroot restrictions. This option is mainly meant to
++ resolve Plymouth's performing privileged operations unnecessarily
++ in a chroot.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++ bool "Single group for auditing"
++ help
++ If you say Y here, the exec and chdir logging features will only operate
++ on a group you specify. This option is recommended if you only want to
++ watch certain users instead of having a large amount of logs from the
++ entire system. If the sysctl option is enabled, a sysctl option with
++ name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++ int "GID for auditing"
++ depends on GRKERNSEC_AUDIT_GROUP
++ default 1007
++
++config GRKERNSEC_EXECLOG
++ bool "Exec logging"
++ help
++ If you say Y here, all execve() calls will be logged (since the
++ other exec*() calls are frontends to execve(), all execution
++ will be logged). Useful for shell-servers that like to keep track
++ of their users. If the sysctl option is enabled, a sysctl option with
++ name "exec_logging" is created.
++ WARNING: This option when enabled will produce a LOT of logs, especially
++ on an active system.
++
++config GRKERNSEC_RESLOG
++ bool "Resource logging"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, all attempts to overstep resource limits will
++ be logged with the resource name, the requested size, and the current
++ limit. It is highly recommended that you say Y here. If the sysctl
++ option is enabled, a sysctl option with name "resource_logging" is
++ created. If the RBAC system is enabled, the sysctl value is ignored.
++
++config GRKERNSEC_CHROOT_EXECLOG
++ bool "Log execs within chroot"
++ help
++ If you say Y here, all executions inside a chroot jail will be logged
++ to syslog. This can cause a large amount of logs if certain
++ applications (eg. djb's daemontools) are installed on the system, and
++ is therefore left as an option. If the sysctl option is enabled, a
++ sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_PTRACE
++ bool "Ptrace logging"
++ help
++ If you say Y here, all attempts to attach to a process via ptrace
++ will be logged. If the sysctl option is enabled, a sysctl option
++ with name "audit_ptrace" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++ bool "Chdir logging"
++ help
++ If you say Y here, all chdir() calls will be logged. If the sysctl
++ option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++ bool "(Un)Mount logging"
++ help
++ If you say Y here, all mounts and unmounts will be logged. If the
++ sysctl option is enabled, a sysctl option with name "audit_mount" is
++ created.
++
++config GRKERNSEC_SIGNAL
++ bool "Signal logging"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, certain important signals will be logged, such as
++ SIGSEGV, which will as a result inform you of when a error in a program
++ occurred, which in some cases could mean a possible exploit attempt.
++ If the sysctl option is enabled, a sysctl option with name
++ "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++ bool "Fork failure logging"
++ help
++ If you say Y here, all failed fork() attempts will be logged.
++ This could suggest a fork bomb, or someone attempting to overstep
++ their process limit. If the sysctl option is enabled, a sysctl option
++ with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++ bool "Time change logging"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, any changes of the system clock will be logged.
++ If the sysctl option is enabled, a sysctl option with name
++ "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++ bool "/proc/<pid>/ipaddr support"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, a new entry will be added to each /proc/<pid>
++ directory that contains the IP address of the person using the task.
++ The IP is carried across local TCP and AF_UNIX stream sockets.
++ This information can be useful for IDS/IPSes to perform remote response
++ to a local attack. The entry is readable by only the owner of the
++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++ the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_RWXMAP_LOG
++ bool 'Denied RWX mmap/mprotect logging'
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
++ help
++ If you say Y here, calls to mmap() and mprotect() with explicit
++ usage of PROT_WRITE and PROT_EXEC together will be logged when
++ denied by the PAX_MPROTECT feature. This feature will also
++ log other problematic scenarios that can occur when PAX_MPROTECT
++ is enabled on a binary, like textrels and PT_GNU_STACK. If the
++ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
++ is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_DMESG
++ bool "Dmesg(8) restriction"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, non-root users will not be able to use dmesg(8)
++ to view the contents of the kernel's circular log buffer.
++ The kernel's log buffer often contains kernel addresses and other
++ identifying information useful to an attacker in fingerprinting a
++ system for a targeted exploit.
++ If the sysctl option is enabled, a sysctl option with name "dmesg" is
++ created.
++
++config GRKERNSEC_HARDEN_PTRACE
++ bool "Deter ptrace-based process snooping"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, TTY sniffers and other malicious monitoring
++ programs implemented through ptrace will be defeated. If you
++ have been using the RBAC system, this option has already been
++ enabled for several years for all users, with the ability to make
++ fine-grained exceptions.
++
++ This option only affects the ability of non-root users to ptrace
++ processes that are not a descendent of the ptracing process.
++ This means that strace ./binary and gdb ./binary will still work,
++ but attaching to arbitrary processes will not. If the sysctl
++ option is enabled, a sysctl option with name "harden_ptrace" is
++ created.
++
++config GRKERNSEC_PTRACE_READEXEC
++ bool "Require read access to ptrace sensitive binaries"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, unprivileged users will not be able to ptrace unreadable
++ binaries. This option is useful in environments that
++ remove the read bits (e.g. file mode 4711) from suid binaries to
++ prevent infoleaking of their contents. This option adds
++ consistency to the use of that file mode, as the binary could normally
++ be read out when run without privileges while ptracing.
++
++ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
++ is created.
++
++config GRKERNSEC_SETXID
++ bool "Enforce consistent multithreaded privileges"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
++ help
++ If you say Y here, a change from a root uid to a non-root uid
++ in a multithreaded application will cause the resulting uids,
++ gids, supplementary groups, and capabilities in that thread
++ to be propagated to the other threads of the process. In most
++ cases this is unnecessary, as glibc will emulate this behavior
++ on behalf of the application. Other libcs do not act in the
++ same way, allowing the other threads of the process to continue
++ running with root privileges. If the sysctl option is enabled,
++ a sysctl option with name "consistent_setxid" is created.
++
++config GRKERNSEC_TPE
++ bool "Trusted Path Execution (TPE)"
++ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
++ help
++ If you say Y here, you will be able to choose a gid to add to the
++ supplementary groups of users you want to mark as "untrusted."
++ These users will not be able to execute any files that are not in
++ root-owned directories writable only by root. If the sysctl option
++ is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++ bool "Partially restrict all non-root users"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, all non-root users will be covered under
++ a weaker TPE restriction. This is separate from, and in addition to,
++ the main TPE options that you have selected elsewhere. Thus, if a
++ "trusted" GID is chosen, this restriction applies to even that GID.
++ Under this restriction, all non-root users will only be allowed to
++ execute files in directories they own that are not group or
++ world-writable, or in directories owned by root and writable only by
++ root. If the sysctl option is enabled, a sysctl option with name
++ "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_INVERT
++ bool "Invert GID option"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, the group you specify in the TPE configuration will
++ decide what group TPE restrictions will be *disabled* for. This
++ option is useful if you want TPE restrictions to be applied to most
++ users on the system. If the sysctl option is enabled, a sysctl option
++ with name "tpe_invert" is created. Unlike other sysctl options, this
++ entry will default to on for backward-compatibility.
++
++config GRKERNSEC_TPE_GID
++ int
++ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
++ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
++
++config GRKERNSEC_TPE_UNTRUSTED_GID
++ int "GID for TPE-untrusted users"
++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ Setting this GID determines what group TPE restrictions will be
++ *enabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
++
++config GRKERNSEC_TPE_TRUSTED_GID
++ int "GID for TPE-trusted users"
++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ Setting this GID determines what group TPE restrictions will be
++ *disabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_RANDNET
++ bool "Larger entropy pools"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, the entropy pools used for many features of Linux
++ and grsecurity will be doubled in size. Since several grsecurity
++ features use additional randomness, it is recommended that you say Y
++ here. Saying Y here has a similar effect as modifying
++ /proc/sys/kernel/random/poolsize.
++
++config GRKERNSEC_BLACKHOLE
++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on NET
++ help
++ If you say Y here, neither TCP resets nor ICMP
++ destination-unreachable packets will be sent in response to packets
++ sent to ports for which no associated listening process exists.
++ This feature supports both IPV4 and IPV6 and exempts the
++ loopback interface from blackholing. Enabling this feature
++ makes a host more resilient to DoS attacks and reduces network
++ visibility against scanners.
++
++ The blackhole feature as-implemented is equivalent to the FreeBSD
++ blackhole feature, as it prevents RST responses to all packets, not
++ just SYNs. Under most application behavior this causes no
++ problems, but applications (like haproxy) may not close certain
++ connections in a way that cleanly terminates them on the remote
++ end, leaving the remote host in LAST_ACK state. Because of this
++ side-effect and to prevent intentional LAST_ACK DoSes, this
++ feature also adds automatic mitigation against such attacks.
++ The mitigation drastically reduces the amount of time a socket
++ can spend in LAST_ACK state. If you're using haproxy and not
++ all servers it connects to have this option enabled, consider
++ disabling this feature on the haproxy host.
++
++ If the sysctl option is enabled, two sysctl options with names
++ "ip_blackhole" and "lastack_retries" will be created.
++ While "ip_blackhole" takes the standard zero/non-zero on/off
++ toggle, "lastack_retries" uses the same kinds of values as
++ "tcp_retries1" and "tcp_retries2". The default value of 4
++ prevents a socket from lasting more than 45 seconds in LAST_ACK
++ state.
++
++config GRKERNSEC_NO_SIMULT_CONNECT
++ bool "Disable TCP Simultaneous Connect"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on NET
++ help
++ If you say Y here, a feature by Willy Tarreau will be enabled that
++ removes a weakness in Linux's strict implementation of TCP that
++ allows two clients to connect to each other without either entering
++ a listening state. The weakness allows an attacker to easily prevent
++ a client from connecting to a known server provided the source port
++ for the connection is guessed correctly.
++
++ As the weakness could be used to prevent an antivirus or IPS from
++ fetching updates, or prevent an SSL gateway from fetching a CRL,
++ it should be eliminated by enabling this option. Though Linux is
++ one of few operating systems supporting simultaneous connect, it
++ has no legitimate use in practice and is rarely supported by firewalls.
++
++config GRKERNSEC_SOCKET
++ bool "Socket restrictions"
++ depends on NET
++ help
++ If you say Y here, you will be able to choose from several options.
++ If you assign a GID on your system and add it to the supplementary
++ groups of users you want to restrict socket access to, this patch
++ will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++ bool "Deny any sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine or run server
++ applications from your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++ int "GID to deny all sockets for"
++ depends on GRKERNSEC_SOCKET_ALL
++ default 1004
++ help
++ Here you can choose the GID to disable socket access for. Remember to
++ add the users you want socket access disabled for to the GID
++ specified here. If the sysctl option is enabled, a sysctl option
++ with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++ bool "Deny client sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine, but will be
++ able to run servers. If this option is enabled, all users in the group
++ you specify will have to use passive mode when initiating ftp transfers
++ from the shell on your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++ int "GID to deny client sockets for"
++ depends on GRKERNSEC_SOCKET_CLIENT
++ default 1003
++ help
++ Here you can choose the GID to disable client socket access for.
++ Remember to add the users you want client socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, a sysctl
++ option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++ bool "Deny server sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to run server applications from your machine. If the sysctl
++ option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++ int "GID to deny server sockets for"
++ depends on GRKERNSEC_SOCKET_SERVER
++ default 1002
++ help
++ Here you can choose the GID to disable server socket access for.
++ Remember to add the users you want server socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, a sysctl
++ option with name "socket_server_gid" is created.
++
++endmenu
++
++menu "Physical Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_DENYUSB
++ bool "Deny new USB connections after toggle"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on SYSCTL && USB_SUPPORT
++ help
++ If you say Y here, a new sysctl option with name "deny_new_usb"
++ will be created. Setting its value to 1 will prevent any new
++ USB devices from being recognized by the OS. Any attempted USB
++ device insertion will be logged. This option is intended to be
++ used against custom USB devices designed to exploit vulnerabilities
++ in various USB device drivers.
++
++ For greatest effectiveness, this sysctl should be set after any
++ relevant init scripts. This option is safe to enable in distros
++ as each user can choose whether or not to toggle the sysctl.
++
++config GRKERNSEC_DENYUSB_FORCE
++ bool "Reject all USB devices not connected at boot"
++ select USB
++ depends on GRKERNSEC_DENYUSB
++ help
++ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
++ that doesn't involve a sysctl entry. This option should only be
++ enabled if you're sure you want to deny all new USB connections
++ at runtime and don't want to modify init scripts. This should not
++ be enabled by distros. It forces the core USB code to be built
++ into the kernel image so that all devices connected at boot time
++ can be recognized and new USB device connections can be prevented
++ prior to init running.
++
++endmenu
++
++menu "Sysctl Support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++ bool "Sysctl support"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ If you say Y here, you will be able to change the options that
++ grsecurity runs with at bootup, without having to recompile your
++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
++ to enable (1) or disable (0) various features. All the sysctl entries
++ are mutable until the "grsec_lock" entry is set to a non-zero value.
++ All features enabled in the kernel configuration are disabled at boot
++ if you do not say Y to the "Turn on features by default" option.
++ All options should be set at startup, and the grsec_lock entry should
++ be set to a non-zero value after all the options are set.
++ *THIS IS EXTREMELY IMPORTANT*
++
++config GRKERNSEC_SYSCTL_DISTRO
++ bool "Extra sysctl support for distro makers (READ HELP)"
++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
++ help
++ If you say Y here, additional sysctl options will be created
++ for features that affect processes running as root. Therefore,
++ it is critical when using this option that the grsec_lock entry be
++ enabled after boot. Only distros with prebuilt kernel packages
++ with this option enabled that can ensure grsec_lock is enabled
++ after boot should use this option.
++ *Failure to set grsec_lock after boot makes all grsec features
++ this option covers useless*
++
++ Currently this option creates the following sysctl entries:
++ "Disable Privileged I/O": "disable_priv_io"
++
++config GRKERNSEC_SYSCTL_ON
++ bool "Turn on features by default"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC_SYSCTL
++ help
++ If you say Y here, instead of having all features enabled in the
++ kernel configuration disabled at boot time, the features will be
++ enabled at boot time. It is recommended you say Y here unless
++ there is some reason you would want all sysctl-tunable features to
++ be disabled by default. As mentioned elsewhere, it is important
++ to enable the grsec_lock entry once you have finished modifying
++ the sysctl entries.
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++ int "Seconds in between log messages (minimum)"
++ default 10
++ help
++ This option allows you to enforce the number of seconds between
++ grsecurity log messages. The default should be suitable for most
++ people, however, if you choose to change it, choose a value small enough
++ to allow informative logs to be produced, but large enough to
++ prevent flooding.
++
++config GRKERNSEC_FLOODBURST
++ int "Number of messages in a burst (maximum)"
++ default 6
++ help
++ This option allows you to choose the maximum number of messages allowed
++ within the flood time interval you chose in a separate option. The
++ default should be suitable for most people, however if you find that
++ many of your logs are being interpreted as flooding, you may want to
++ raise this value.
++
++endmenu
+diff --git a/grsecurity/Makefile b/grsecurity/Makefile
+new file mode 100644
+index 0000000..2f8793f
+--- /dev/null
++++ b/grsecurity/Makefile
+@@ -0,0 +1,43 @@
++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
++# during 2001-2009 it has been completely redesigned by Brad Spengler
++# into an RBAC system
++#
++# All code in this directory and various hooks inserted throughout the kernel
++# are copyright Brad Spengler - Open Source Security, Inc., and released
++# under the GPL v2 or higher
++
++KBUILD_CFLAGS += -Werror
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++ grsec_mount.o grsec_sig.o grsec_sysctl.o \
++ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
++ grsec_usb.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++ gracl_learn.o grsec_log.o gracl_policy.o
++ifdef CONFIG_COMPAT
++obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
++endif
++
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifdef CONFIG_NET
++obj-y += grsec_sock.o
++obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
++endif
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
++ifdef CONFIG_GRKERNSEC_HIDESYM
++extra-y := grsec_hidesym.o
++$(obj)/grsec_hidesym.o:
++ @-chmod -f 500 /boot
++ @-chmod -f 500 /lib/modules
++ @-chmod -f 500 /lib64/modules
++ @-chmod -f 500 /lib32/modules
++ @-chmod -f 700 .
++ @echo ' grsec: protected kernel image paths'
++endif
+diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
+new file mode 100644
+index 0000000..180140a
+--- /dev/null
++++ b/grsecurity/gracl.c
+@@ -0,0 +1,2825 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/lglock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/security.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/stop_machine.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++#include <linux/posix-timers.h>
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++#include <linux/magic.h>
++#include <linux/pagemap.h>
++#include "../fs/btrfs/async-thread.h"
++#include "../fs/btrfs/ctree.h"
++#include "../fs/btrfs/btrfs_inode.h"
++#endif
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++#define FOR_EACH_ROLE_START(role) \
++ role = running_polstate.role_list; \
++ while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++ role = role->prev; \
++ }
++
++extern struct path gr_real_root;
++
++static struct gr_policy_state running_polstate;
++struct gr_policy_state *polstate = &running_polstate;
++extern struct gr_alloc_state *current_alloc_state;
++
++extern char *gr_shared_page[4];
++static DEFINE_MUTEX(gr_dev_mutex);
++DEFINE_RWLOCK(gr_inode_lock);
++
++static unsigned int gr_status __read_only = GR_STATUS_INIT;
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt);
++#endif
++
++#ifdef CONFIG_NET
++extern struct vfsmount *sock_mnt;
++#endif
++
++extern struct vfsmount *pipe_mnt;
++extern struct vfsmount *shm_mnt;
++#ifdef CONFIG_HUGETLBFS
++extern struct vfsmount *hugetlbfs_vfsmount;
++#endif
++
++DECLARE_BRLOCK(vfsmount_lock);
++
++extern u16 acl_sp_role_value;
++extern struct acl_object_label *fakefs_obj_rw;
++extern struct acl_object_label *fakefs_obj_rwx;
++
++int gr_acl_is_enabled(void)
++{
++ return (gr_status & GR_READY);
++}
++
++void gr_enable_rbac_system(void)
++{
++ pax_open_kernel();
++ gr_status |= GR_READY;
++ pax_close_kernel();
++}
++
++int gr_rbac_disable(void *unused)
++{
++ pax_open_kernel();
++ gr_status &= ~GR_READY;
++ pax_close_kernel();
++
++ return 0;
++}
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
++ return BTRFS_I(dentry->d_inode)->root->anon_dev;
++ else
++#endif
++ return dentry->d_sb->s_dev;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++ return __get_dev(dentry);
++}
++
++static char gr_task_roletype_to_char(struct task_struct *task)
++{
++ switch (task->role->roletype &
++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
++ GR_ROLE_SPECIAL)) {
++ case GR_ROLE_DEFAULT:
++ return 'D';
++ case GR_ROLE_USER:
++ return 'U';
++ case GR_ROLE_GROUP:
++ return 'G';
++ case GR_ROLE_SPECIAL:
++ return 'S';
++ }
++
++ return 'X';
++}
++
++char gr_roletype_to_char(void)
++{
++ return gr_task_roletype_to_char(current);
++}
++
++__inline__ int
++gr_acl_tpe_check(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++ if (current->role->roletype & GR_ROLE_TPE)
++ return 1;
++ else
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
++ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++int
++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
++{
++ if (likely(lena != lenb))
++ return 0;
++
++ return !memcmp(a, b, lena);
++}
++
++static int prepend(char **buffer, int *buflen, const char *str, int namelen)
++{
++ *buflen -= namelen;
++ if (*buflen < 0)
++ return -ENAMETOOLONG;
++ *buffer -= namelen;
++ memcpy(*buffer, str, namelen);
++ return 0;
++}
++
++static int prepend_name(char **buffer, int *buflen, struct qstr *name)
++{
++ return prepend(buffer, buflen, name->name, name->len);
++}
++
++static int prepend_path(const struct path *path, struct path *root,
++ char **buffer, int *buflen)
++{
++ struct dentry *dentry = path->dentry;
++ struct vfsmount *vfsmnt = path->mnt;
++ bool slash = false;
++ int error = 0;
++
++ while (dentry != root->dentry || vfsmnt != root->mnt) {
++ struct dentry * parent;
++
++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
++ /* Global root? */
++ if (vfsmnt->mnt_parent == vfsmnt) {
++ goto out;
++ }
++ dentry = vfsmnt->mnt_mountpoint;
++ vfsmnt = vfsmnt->mnt_parent;
++ continue;
++ }
++ parent = dentry->d_parent;
++ prefetch(parent);
++ spin_lock(&dentry->d_lock);
++ error = prepend_name(buffer, buflen, &dentry->d_name);
++ spin_unlock(&dentry->d_lock);
++ if (!error)
++ error = prepend(buffer, buflen, "/", 1);
++ if (error)
++ break;
++
++ slash = true;
++ dentry = parent;
++ }
++
++out:
++ if (!error && !slash)
++ error = prepend(buffer, buflen, "/", 1);
++
++ return error;
++}
++
++/* this must be called with vfsmount_lock and rename_lock held */
++
++static char *__our_d_path(const struct path *path, struct path *root,
++ char *buf, int buflen)
++{
++ char *res = buf + buflen;
++ int error;
++
++ prepend(&res, &buflen, "\0", 1);
++ error = prepend_path(path, root, &res, &buflen);
++ if (error)
++ return ERR_PTR(error);
++
++ return res;
++}
++
++static char *
++gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
++{
++ char *retval;
++
++ retval = __our_d_path(path, root, buf, buflen);
++ if (unlikely(IS_ERR(retval)))
++ retval = strcpy(buf, "<path too long>");
++ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
++ retval[1] = '\0';
++
++ return retval;
++}
++
++static char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ struct path path;
++ char *res;
++
++ path.dentry = (struct dentry *)dentry;
++ path.mnt = (struct vfsmount *)vfsmnt;
++
++ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
++ by the RBAC system */
++ res = gen_full_path(&path, &gr_real_root, buf, buflen);
++
++ return res;
++}
++
++static char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct path path;
++ struct path root;
++ struct task_struct *reaper = &init_task;
++
++ path.dentry = (struct dentry *)dentry;
++ path.mnt = (struct vfsmount *)vfsmnt;
++
++ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
++ get_fs_root(reaper->fs, &root);
++
++ br_read_lock(vfsmount_lock);
++ write_seqlock(&rename_lock);
++ res = gen_full_path(&path, &root, buf, buflen);
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
++
++ path_put(&root);
++ return res;
++}
++
++char *
++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ char *ret;
++ br_read_lock(vfsmount_lock);
++ write_seqlock(&rename_lock);
++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++ PAGE_SIZE);
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
++ return ret;
++}
++
++static char *
++gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ char *ret;
++ char *buf;
++ int buflen;
++
++ br_read_lock(vfsmount_lock);
++ write_seqlock(&rename_lock);
++ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
++ buflen = (int)(ret - buf);
++ if (buflen >= 5)
++ prepend(&ret, &buflen, "/proc", 5);
++ else
++ ret = strcpy(buf, "<path too long>");
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
++ return ret;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++__inline__ __u32
++to_gr_audit(const __u32 reqmode)
++{
++ /* masks off auditable permission flags, then shifts them to create
++ auditing flags, and adds the special case of append auditing if
++ we're requesting write */
++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
++}
++
++struct acl_role_label *
++__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
++ const gid_t gid)
++{
++ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
++ struct acl_role_label *match;
++ struct role_allowed_ip *ipp;
++ unsigned int x;
++ u32 curr_ip = task->signal->saved_ip;
++
++ match = state->acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == uid)
++ goto found;
++ }
++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
++ break;
++ match = match->next;
++ }
++found:
++ if (match == NULL) {
++ try_group:
++ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
++ match = state->acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == gid)
++ goto found2;
++ }
++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
++ break;
++ match = match->next;
++ }
++found2:
++ if (match == NULL)
++ match = state->default_role;
++ if (match->allowed_ips == NULL)
++ return match;
++ else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ match = state->default_role;
++ }
++ } else if (match->allowed_ips == NULL) {
++ return match;
++ } else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ goto try_group;
++ }
++
++ return match;
++}
++
++static struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++ const gid_t gid)
++{
++ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++struct acl_subject_label *
++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && (match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static struct acl_object_label *
++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && (match->mode & GR_DELETED))
++ return match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ match = match->next;
++ }
++
++ if (match && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++struct name_entry *
++__lookup_name_entry(const struct gr_policy_state *state, const char *name)
++{
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % state->name_set.n_size;
++ struct name_entry *match;
++
++ match = state->name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
++ match = match->next;
++
++ return match;
++}
++
++static struct name_entry *
++lookup_name_entry(const char *name)
++{
++ return __lookup_name_entry(&running_polstate, name);
++}
++
++static struct name_entry *
++lookup_name_entry_create(const char *name)
++{
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % running_polstate.name_set.n_size;
++ struct name_entry *match;
++
++ match = running_polstate.name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++ !match->deleted))
++ match = match->next;
++
++ if (match && match->deleted)
++ return match;
++
++ match = running_polstate.name_set.n_hash[index];
++
++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
++ match->deleted))
++ match = match->next;
++
++ if (match && !match->deleted)
++ return match;
++ else
++ return NULL;
++}
++
++static struct inodev_entry *
++lookup_inodev_entry(const ino_t ino, const dev_t dev)
++{
++ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
++ struct inodev_entry *match;
++
++ match = running_polstate.inodev_set.i_hash[index];
++
++ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
++ match = match->next;
++
++ return match;
++}
++
++void
++__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
++{
++ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
++ state->inodev_set.i_size);
++ struct inodev_entry **curr;
++
++ entry->prev = NULL;
++
++ curr = &state->inodev_set.i_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = entry;
++
++ entry->next = *curr;
++ *curr = entry;
++
++ return;
++}
++
++static void
++insert_inodev_entry(struct inodev_entry *entry)
++{
++ __insert_inodev_entry(&running_polstate, entry);
++}
++
++void
++insert_acl_obj_label(struct acl_object_label *obj,
++ struct acl_subject_label *subj)
++{
++ unsigned int index =
++ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
++ struct acl_object_label **curr;
++
++ obj->prev = NULL;
++
++ curr = &subj->obj_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = obj;
++
++ obj->next = *curr;
++ *curr = obj;
++
++ return;
++}
++
++void
++insert_acl_subj_label(struct acl_subject_label *obj,
++ struct acl_role_label *role)
++{
++ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
++ struct acl_subject_label **curr;
++
++ obj->prev = NULL;
++
++ curr = &role->subj_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = obj;
++
++ obj->next = *curr;
++ *curr = obj;
++
++ return;
++}
++
++/* derived from glibc fnmatch() 0: match, 1: no match*/
++
++static int
++glob_match(const char *p, const char *n)
++{
++ char c;
++
++ while ((c = *p++) != '\0') {
++ switch (c) {
++ case '?':
++ if (*n == '\0')
++ return 1;
++ else if (*n == '/')
++ return 1;
++ break;
++ case '\\':
++ if (*n != c)
++ return 1;
++ break;
++ case '*':
++ for (c = *p++; c == '?' || c == '*'; c = *p++) {
++ if (*n == '/')
++ return 1;
++ else if (c == '?') {
++ if (*n == '\0')
++ return 1;
++ else
++ ++n;
++ }
++ }
++ if (c == '\0') {
++ return 0;
++ } else {
++ const char *endp;
++
++ if ((endp = strchr(n, '/')) == NULL)
++ endp = n + strlen(n);
++
++ if (c == '[') {
++ for (--p; n < endp; ++n)
++ if (!glob_match(p, n))
++ return 0;
++ } else if (c == '/') {
++ while (*n != '\0' && *n != '/')
++ ++n;
++ if (*n == '/' && !glob_match(p, n + 1))
++ return 0;
++ } else {
++ for (--p; n < endp; ++n)
++ if (*n == c && !glob_match(p, n))
++ return 0;
++ }
++
++ return 1;
++ }
++ case '[':
++ {
++ int not;
++ char cold;
++
++ if (*n == '\0' || *n == '/')
++ return 1;
++
++ not = (*p == '!' || *p == '^');
++ if (not)
++ ++p;
++
++ c = *p++;
++ for (;;) {
++ unsigned char fn = (unsigned char)*n;
++
++ if (c == '\0')
++ return 1;
++ else {
++ if (c == fn)
++ goto matched;
++ cold = c;
++ c = *p++;
++
++ if (c == '-' && *p != ']') {
++ unsigned char cend = *p++;
++
++ if (cend == '\0')
++ return 1;
++
++ if (cold <= fn && fn <= cend)
++ goto matched;
++
++ c = *p++;
++ }
++ }
++
++ if (c == ']')
++ break;
++ }
++ if (!not)
++ return 1;
++ break;
++ matched:
++ while (c != ']') {
++ if (c == '\0')
++ return 1;
++
++ c = *p++;
++ }
++ if (not)
++ return 1;
++ }
++ break;
++ default:
++ if (c != *n)
++ return 1;
++ }
++
++ ++n;
++ }
++
++ if (*n == '\0')
++ return 0;
++
++ if (*n == '/')
++ return 0;
++
++ return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
++{
++ struct acl_object_label *tmp;
++
++ if (*path == NULL)
++ *path = gr_to_filename_nolock(dentry, mnt);
++
++ tmp = globbed;
++
++ while (tmp) {
++ if (!glob_match(tmp->filename, *path))
++ return tmp;
++ tmp = tmp->next;
++ }
++
++ return NULL;
++}
++
++static struct acl_object_label *
++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ const ino_t curr_ino, const dev_t curr_dev,
++ const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++ struct acl_subject_label *tmpsubj;
++ struct acl_object_label *retval;
++ struct acl_object_label *retval2;
++
++ tmpsubj = (struct acl_subject_label *) subj;
++ read_lock(&gr_inode_lock);
++ do {
++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
++ if (retval) {
++ if (checkglob && retval->globbed) {
++ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
++ if (retval2)
++ retval = retval2;
++ }
++ break;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++ read_unlock(&gr_inode_lock);
++
++ return retval;
++}
++
++static __inline__ struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ struct dentry *curr_dentry,
++ const struct acl_subject_label *subj, char **path, const int checkglob)
++{
++ int newglob = checkglob;
++ ino_t inode;
++ dev_t device;
++
++ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
++ as we don't want a / * rule to match instead of the / object
++ don't do this for create lookups that call this function though, since they're looking up
++ on the parent and thus need globbing checks on all paths
++ */
++ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
++ newglob = GR_NO_GLOB;
++
++ spin_lock(&curr_dentry->d_lock);
++ inode = curr_dentry->d_inode->i_ino;
++ device = __get_dev(curr_dentry);
++ spin_unlock(&curr_dentry->d_lock);
++
++ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
++}
++
++static struct acl_object_label *
++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path, const int checkglob)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_object_label *retval;
++ struct dentry *parent;
++
++ br_read_lock(vfsmount_lock);
++ write_seqlock(&rename_lock);
++
++ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
++#ifdef CONFIG_NET
++ mnt == sock_mnt ||
++#endif
++#ifdef CONFIG_HUGETLBFS
++ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
++#endif
++ /* ignore Eric Biederman */
++ IS_PRIVATE(l_dentry->d_inode))) {
++ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
++ goto out;
++ }
++
++ for (;;) {
++ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
++ break;
++
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ parent = dentry->d_parent;
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++ if (retval != NULL)
++ goto out;
++
++ dentry = parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
++
++ /* gr_real_root is pinned so we don't have to hold a reference */
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
++out:
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
++
++ BUG_ON(retval == NULL);
++
++ return retval;
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ char *path = NULL;
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ char *path = NULL;
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
++}
++
++static __inline__ struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path)
++{
++ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
++}
++
++struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_role_label *role)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_subject_label *retval;
++ struct dentry *parent;
++
++ br_read_lock(vfsmount_lock);
++ write_seqlock(&rename_lock);
++
++ for (;;) {
++ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ spin_lock(&dentry->d_lock);
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ __get_dev(dentry), role);
++ read_unlock(&gr_inode_lock);
++ spin_unlock(&dentry->d_lock);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ spin_lock(&dentry->d_lock);
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++ __get_dev(dentry), role);
++ read_unlock(&gr_inode_lock);
++ parent = dentry->d_parent;
++ spin_unlock(&dentry->d_lock);
++
++ if (retval != NULL)
++ goto out;
++
++ dentry = parent;
++ }
++
++ spin_lock(&dentry->d_lock);
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
++ __get_dev(dentry), role);
++ read_unlock(&gr_inode_lock);
++ spin_unlock(&dentry->d_lock);
++
++ if (unlikely(retval == NULL)) {
++ /* gr_real_root is pinned, we don't need to hold a reference */
++ read_lock(&gr_inode_lock);
++ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
++ __get_dev(gr_real_root.dentry), role);
++ read_unlock(&gr_inode_lock);
++ }
++out:
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
++
++ BUG_ON(retval == NULL);
++
++ return retval;
++}
++
++void
++assign_special_role(const char *rolename)
++{
++ struct acl_object_label *obj;
++ struct acl_role_label *r;
++ struct acl_role_label *assigned = NULL;
++ struct task_struct *tsk;
++ struct file *filp;
++
++ FOR_EACH_ROLE_START(r)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ assigned = r;
++ break;
++ }
++ FOR_EACH_ROLE_END(r)
++
++ if (!assigned)
++ return;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++
++ tsk = current->real_parent;
++ if (tsk == NULL)
++ goto out_unlock;
++
++ filp = tsk->exec_file;
++ if (filp == NULL)
++ goto out_unlock;
++
++ tsk->is_writable = 0;
++ tsk->inherited = 0;
++
++ tsk->acl_sp_role = 1;
++ tsk->acl_role_id = ++acl_sp_role_value;
++ tsk->role = assigned;
++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
++ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
++#endif
++
++out_unlock:
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return;
++}
++
++
++static void
++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
++
++ return;
++}
++
++static void
++gr_log_learn_sysctl(const char *path, const __u32 mode)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
++
++ return;
++}
++
++static void
++gr_log_learn_id_change(const char type, const unsigned int real,
++ const unsigned int effective, const unsigned int fs)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++
++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++ type, real, effective, fs, &task->signal->saved_ip);
++
++ return;
++}
++
++static void
++gr_set_proc_res(struct task_struct *task)
++{
++ struct acl_subject_label *proc;
++ unsigned short i;
++
++ proc = task->acl;
++
++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
++ return;
++
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(proc->resmask & (1U << i)))
++ continue;
++
++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
++
++ if (i == RLIMIT_CPU)
++ update_rlimit_cpu(task, proc->res[i].rlim_cur);
++ }
++
++ return;
++}
++
++/* both of the below must be called with
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++*/
++
++struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
++{
++ char *tmpname;
++ struct acl_subject_label *tmpsubj;
++ struct file *filp;
++ struct name_entry *nmatch;
++
++ filp = task->exec_file;
++ if (filp == NULL)
++ return NULL;
++
++ /* the following is to apply the correct subject
++ on binaries running when the RBAC system
++ is enabled, when the binaries have been
++ replaced or deleted since their execution
++ -----
++ when the RBAC system starts, the inode/dev
++ from exec_file will be one the RBAC system
++ is unaware of. It only knows the inode/dev
++ of the present file on disk, or the absence
++ of it.
++ */
++
++ if (filename)
++ nmatch = __lookup_name_entry(state, filename);
++ else {
++ preempt_disable();
++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
++
++ nmatch = __lookup_name_entry(state, tmpname);
++ preempt_enable();
++ }
++ tmpsubj = NULL;
++ if (nmatch) {
++ if (nmatch->deleted)
++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
++ else
++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
++ }
++ /* this also works for the reload case -- if we don't match a potentially inherited subject
++ then we fall back to a normal lookup based on the binary's ino/dev
++ */
++ if (tmpsubj == NULL)
++ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
++
++ return tmpsubj;
++}
++
++static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
++{
++ return __gr_get_subject_for_task(&running_polstate, task, filename);
++}
++
++void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
++{
++ struct acl_object_label *obj;
++ struct file *filp;
++
++ filp = task->exec_file;
++
++ task->acl = subj;
++ task->is_writable = 0;
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
++#endif
++}
++
++static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
++{
++ __gr_apply_subject_to_task(&running_polstate, task, subj);
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++ const struct vfsmount * mnt)
++{
++ __u32 retval = mode;
++ struct acl_subject_label *curracl;
++ struct acl_object_label *currobj;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ curracl = current->acl;
++
++ currobj = chk_obj_label(dentry, mnt, curracl);
++ retval = currobj->mode & mode;
++
++ /* if we're opening a specified transfer file for writing
++ (e.g. /dev/initctl), then transfer our role to init
++ */
++ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
++ current->role->roletype & GR_ROLE_PERSIST)) {
++ struct task_struct *task = init_pid_ns.child_reaper;
++
++ if (task->role != current->role) {
++ struct acl_subject_label *subj;
++
++ task->acl_sp_role = 0;
++ task->acl_role_id = current->acl_role_id;
++ task->role = current->role;
++ rcu_read_lock();
++ read_lock(&grsec_exec_file_lock);
++ subj = gr_get_subject_for_task(task, NULL);
++ gr_apply_subject_to_task(task, subj);
++ read_unlock(&grsec_exec_file_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
++ }
++ }
++
++ if (unlikely
++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ retval = new_mode;
++
++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
++ new_mode |= GR_INHERIT;
++
++ if (!(mode & GR_NOLEARN))
++ gr_log_learn(dentry, mnt, new_mode);
++ }
++
++ return retval;
++}
++
++struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
++ const struct dentry *parent,
++ const struct vfsmount *mnt)
++{
++ struct name_entry *match;
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *curracl;
++ char *path;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return NULL;
++
++ preempt_disable();
++ path = gr_to_filename_rbac(new_dentry, mnt);
++ match = lookup_name_entry_create(path);
++
++ curracl = current->acl;
++
++ if (match) {
++ read_lock(&gr_inode_lock);
++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++ read_unlock(&gr_inode_lock);
++
++ if (matchpo) {
++ preempt_enable();
++ return matchpo;
++ }
++ }
++
++ // lookup parent
++
++ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++
++ preempt_enable();
++ return matchpo;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++ const struct vfsmount * mnt, const __u32 mode)
++{
++ struct acl_object_label *matchpo;
++ __u32 retval;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ matchpo = gr_get_create_object(new_dentry, parent, mnt);
++
++ retval = matchpo->mode & mode;
++
++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(new_dentry, mnt, new_mode);
++ return new_mode;
++ }
++
++ return retval;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++ struct acl_object_label *obj;
++ __u32 oldmode, newmode;
++ __u32 needmode;
++ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
++ GR_DELETE | GR_INHERIT;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (GR_CREATE | GR_LINK);
++
++ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++ oldmode = obj->mode;
++
++ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
++ newmode = obj->mode;
++
++ needmode = newmode & checkmodes;
++
++ // old name for hardlink must have at least the permissions of the new name
++ if ((oldmode & needmode) != needmode)
++ goto bad;
++
++ // if old name had restrictions/auditing, make sure the new name does as well
++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
++
++ // don't allow hardlinking of suid/sgid/fcapped files without permission
++ if (is_privileged_binary(old_dentry))
++ needmode |= GR_SETID;
++
++ if ((newmode & needmode) != needmode)
++ goto bad;
++
++ // enforce minimum permissions
++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
++ return newmode;
++bad:
++ needmode = oldmode;
++ if (is_privileged_binary(old_dentry))
++ needmode |= GR_SETID;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
++ return (GR_CREATE | GR_LINK);
++ } else if (newmode & GR_SUPPRESS)
++ return GR_SUPPRESS;
++ else
++ return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY) || !task))
++ return 0;
++
++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++ task->acl != current->acl)
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++ struct task_struct *p;
++ int ret = 0;
++
++ if (unlikely(!(gr_status & GR_READY) || !pid))
++ return ret;
++
++ read_lock(&tasklist_lock);
++ do_each_pid_task(pid, type, p) {
++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
++ p->acl != current->acl) {
++ ret = 1;
++ goto out;
++ }
++ } while_each_pid_task(pid, type, p);
++out:
++ read_unlock(&tasklist_lock);
++
++ return ret;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++ struct task_struct *p = current;
++
++ tsk->inherited = p->inherited;
++ tsk->acl_sp_role = 0;
++ tsk->acl_role_id = p->acl_role_id;
++ tsk->acl = p->acl;
++ tsk->role = p->role;
++ tsk->signal->used_accept = 0;
++ tsk->signal->curr_ip = p->signal->curr_ip;
++ tsk->signal->saved_ip = p->signal->saved_ip;
++ if (p->exec_file)
++ get_file(p->exec_file);
++ tsk->exec_file = p->exec_file;
++ tsk->is_writable = p->is_writable;
++ if (unlikely(p->signal->used_accept)) {
++ p->signal->curr_ip = 0;
++ p->signal->saved_ip = 0;
++ }
++
++ return;
++}
++
++extern int gr_process_kernel_setuid_ban(struct user_struct *user);
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ uid_t *uidlist;
++ int curuid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
++ struct user_struct *user;
++
++ if (real == -1)
++ goto skipit;
++
++ user = find_user(real);
++ if (user == NULL)
++ goto skipit;
++
++ if (gr_process_kernel_setuid_ban(user)) {
++ /* for find_user */
++ free_uid(user);
++ return 1;
++ }
++
++ /* for find_user */
++ free_uid(user);
++
++skipit:
++#endif
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ gr_log_learn_id_change('u', real, effective, fs);
++
++ num = current->acl->user_trans_num;
++ uidlist = current->acl->user_transitions;
++
++ if (uidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->user_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ realok = 1;
++ if (effective == curuid)
++ effectiveok = 1;
++ if (fs == curuid)
++ fsok = 1;
++ }
++ } else if (current->acl->user_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ break;
++ if (effective == curuid)
++ break;
++ if (fs == curuid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++ return 1;
++ }
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ gid_t *gidlist;
++ int curgid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ gr_log_learn_id_change('g', real, effective, fs);
++
++ num = current->acl->group_trans_num;
++ gidlist = current->acl->group_transitions;
++
++ if (gidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->group_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ realok = 1;
++ if (effective == curgid)
++ effectiveok = 1;
++ if (fs == curgid)
++ fsok = 1;
++ }
++ } else if (current->acl->group_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ break;
++ if (effective == curgid)
++ break;
++ if (fs == curgid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++ return 1;
++ }
++}
++
++extern int gr_acl_is_capable(const int cap);
++
++void
++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
++{
++ struct acl_role_label *role = task->role;
++ struct acl_subject_label *subj = NULL;
++ struct acl_object_label *obj;
++ struct file *filp;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ filp = task->exec_file;
++
++ /* kernel process, we'll give them the kernel role */
++ if (unlikely(!filp)) {
++ task->role = running_polstate.kernel_role;
++ task->acl = running_polstate.kernel_role->root_label;
++ return;
++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
++ /* save the current ip at time of role lookup so that the proper
++ IP will be learned for role_allowed_ip */
++ task->signal->saved_ip = task->signal->curr_ip;
++ role = lookup_acl_role_label(task, uid, gid);
++ }
++
++ /* don't change the role if we're not a privileged process */
++ if (role && task->role != role &&
++ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
++ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
++ return;
++
++ /* perform subject lookup in possibly new role
++ we can use this result below in the case where role == task->role
++ */
++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
++
++ /* if we changed uid/gid, but result in the same role
++ and are using inheritance, don't lose the inherited subject
++ if current subject is other than what normal lookup
++ would result in, we arrived via inheritance, don't
++ lose subject
++ */
++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
++ (subj == task->acl)))
++ task->acl = subj;
++
++ /* leave task->inherited unaffected */
++
++ task->role = role;
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++
++ gr_set_proc_res(task);
++
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++ const int unsafe_flags)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *newacl;
++ struct acl_object_label *obj;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ newacl = chk_subj_label(dentry, mnt, task->role);
++
++ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
++ did an exec
++ */
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
++ (task->parent->acl->mode & GR_POVERRIDE))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ goto skip_check;
++ }
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
++ !(task->role->roletype & GR_ROLE_GOD) &&
++ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
++ if (unsafe_flags & LSM_UNSAFE_SHARE)
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
++ else
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
++ return -EACCES;
++ }
++
++skip_check:
++
++ obj = chk_obj_label(dentry, mnt, task->acl);
++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if (!(task->acl->mode & GR_INHERITLEARN) &&
++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
++ if (obj->nested)
++ task->acl = obj->nested;
++ else
++ task->acl = newacl;
++ task->inherited = 0;
++ } else {
++ task->inherited = 1;
++ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
++ }
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(dentry, mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res(task);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ return 0;
++}
++
++/* always called with valid inodev ptr */
++static void
++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
++{
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *matchps;
++ struct acl_subject_label *subj;
++ struct acl_role_label *role;
++ unsigned int x;
++
++ FOR_EACH_ROLE_START(role)
++ FOR_EACH_SUBJECT_START(role, subj, x)
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
++ FOR_EACH_SUBJECT_END(subj,x)
++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
++ /* nested subjects aren't in the role's subj_hash table */
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
++ FOR_EACH_NESTED_SUBJECT_END(subj)
++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
++ matchps->mode |= GR_DELETED;
++ FOR_EACH_ROLE_END(role)
++
++ inodev->nentry->deleted = 1;
++
++ return;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ struct inodev_entry *inodev;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ write_lock(&gr_inode_lock);
++ inodev = lookup_inodev_entry(ino, dev);
++ if (inodev != NULL)
++ do_handle_delete(inodev, ino, dev);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static void
++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_subject_label *subj)
++{
++ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
++ struct acl_object_label *match;
++
++ match = subj->obj_hash[index];
++
++ while (match && (match->inode != oldinode ||
++ match->device != olddevice ||
++ !(match->mode & GR_DELETED)))
++ match = match->next;
++
++ if (match && (match->inode == oldinode)
++ && (match->device == olddevice)
++ && (match->mode & GR_DELETED)) {
++ if (match->prev == NULL) {
++ subj->obj_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->inode = newinode;
++ match->device = newdevice;
++ match->mode &= ~GR_DELETED;
++
++ insert_acl_obj_label(match, subj);
++ }
++
++ return;
++}
++
++static void
++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_role_label *role)
++{
++ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
++ struct acl_subject_label *match;
++
++ match = role->subj_hash[index];
++
++ while (match && (match->inode != oldinode ||
++ match->device != olddevice ||
++ !(match->mode & GR_DELETED)))
++ match = match->next;
++
++ if (match && (match->inode == oldinode)
++ && (match->device == olddevice)
++ && (match->mode & GR_DELETED)) {
++ if (match->prev == NULL) {
++ role->subj_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->inode = newinode;
++ match->device = newdevice;
++ match->mode &= ~GR_DELETED;
++
++ insert_acl_subj_label(match, role);
++ }
++
++ return;
++}
++
++static void
++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice)
++{
++ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
++ struct inodev_entry *match;
++
++ match = running_polstate.inodev_set.i_hash[index];
++
++ while (match && (match->nentry->inode != oldinode ||
++ match->nentry->device != olddevice || !match->nentry->deleted))
++ match = match->next;
++
++ if (match && (match->nentry->inode == oldinode)
++ && (match->nentry->device == olddevice) &&
++ match->nentry->deleted) {
++ if (match->prev == NULL) {
++ running_polstate.inodev_set.i_hash[index] = match->next;
++ if (match->next != NULL)
++ match->next->prev = NULL;
++ } else {
++ match->prev->next = match->next;
++ if (match->next != NULL)
++ match->next->prev = match->prev;
++ }
++ match->prev = NULL;
++ match->next = NULL;
++ match->nentry->inode = newinode;
++ match->nentry->device = newdevice;
++ match->nentry->deleted = 0;
++
++ insert_inodev_entry(match);
++ }
++
++ return;
++}
++
++static void
++__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
++{
++ struct acl_subject_label *subj;
++ struct acl_role_label *role;
++ unsigned int x;
++
++ FOR_EACH_ROLE_START(role)
++ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
++
++ FOR_EACH_NESTED_SUBJECT_START(role, subj)
++ if ((subj->inode == ino) && (subj->device == dev)) {
++ subj->inode = ino;
++ subj->device = dev;
++ }
++ /* nested subjects aren't in the role's subj_hash table */
++ update_acl_obj_label(matchn->inode, matchn->device,
++ ino, dev, subj);
++ FOR_EACH_NESTED_SUBJECT_END(subj)
++ FOR_EACH_SUBJECT_START(role, subj, x)
++ update_acl_obj_label(matchn->inode, matchn->device,
++ ino, dev, subj);
++ FOR_EACH_SUBJECT_END(subj,x)
++ FOR_EACH_ROLE_END(role)
++
++ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
++
++ return;
++}
++
++static void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++ const struct vfsmount *mnt)
++{
++ ino_t ino = dentry->d_inode->i_ino;
++ dev_t dev = __get_dev(dentry);
++
++ __do_handle_create(matchn, ino, dev);
++
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
++
++ if (unlikely((unsigned long)matchn)) {
++ write_lock(&gr_inode_lock);
++ do_handle_create(matchn, dentry, mnt);
++ write_unlock(&gr_inode_lock);
++ }
++ preempt_enable();
++
++ return;
++}
++
++void
++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
++
++ if (unlikely((unsigned long)matchn)) {
++ write_lock(&gr_inode_lock);
++ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
++ write_unlock(&gr_inode_lock);
++ }
++ preempt_enable();
++
++ return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ struct name_entry *matchn;
++ struct inodev_entry *inodev;
++ struct inode *inode = new_dentry->d_inode;
++ ino_t old_ino = old_dentry->d_inode->i_ino;
++ dev_t old_dev = __get_dev(old_dentry);
++
++ /* vfs_rename swaps the name and parent link for old_dentry and
++ new_dentry
++ at this point, old_dentry has the new name, parent link, and inode
++ for the renamed file
++ if a file is being replaced by a rename, new_dentry has the inode
++ and name for the replaced file
++ */
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
++
++ /* we wouldn't have to check d_inode if it weren't for
++ NFS silly-renaming
++ */
++
++ write_lock(&gr_inode_lock);
++ if (unlikely(replace && inode)) {
++ ino_t new_ino = inode->i_ino;
++ dev_t new_dev = __get_dev(new_dentry);
++
++ inodev = lookup_inodev_entry(new_ino, new_dev);
++ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
++ do_handle_delete(inodev, new_ino, new_dev);
++ }
++
++ inodev = lookup_inodev_entry(old_ino, old_dev);
++ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
++ do_handle_delete(inodev, old_ino, old_dev);
++
++ if (unlikely((unsigned long)matchn))
++ do_handle_create(matchn, old_dentry, mnt);
++
++ write_unlock(&gr_inode_lock);
++ preempt_enable();
++
++ return;
++}
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ struct acl_subject_label *acl;
++ const struct cred *cred;
++
++ if (unlikely((gr_status & GR_READY) &&
++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
++ goto skip_reslog;
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ gr_log_resource(task, res, wanted, gt);
++#endif
++ skip_reslog:
++
++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
++ return;
++
++ acl = task->acl;
++
++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
++ !(acl->resmask & (1U << (unsigned short) res))))
++ return;
++
++ if (wanted >= acl->res[res].rlim_cur) {
++ unsigned long res_add;
++
++ res_add = wanted;
++ switch (res) {
++ case RLIMIT_CPU:
++ res_add += GR_RLIM_CPU_BUMP;
++ break;
++ case RLIMIT_FSIZE:
++ res_add += GR_RLIM_FSIZE_BUMP;
++ break;
++ case RLIMIT_DATA:
++ res_add += GR_RLIM_DATA_BUMP;
++ break;
++ case RLIMIT_STACK:
++ res_add += GR_RLIM_STACK_BUMP;
++ break;
++ case RLIMIT_CORE:
++ res_add += GR_RLIM_CORE_BUMP;
++ break;
++ case RLIMIT_RSS:
++ res_add += GR_RLIM_RSS_BUMP;
++ break;
++ case RLIMIT_NPROC:
++ res_add += GR_RLIM_NPROC_BUMP;
++ break;
++ case RLIMIT_NOFILE:
++ res_add += GR_RLIM_NOFILE_BUMP;
++ break;
++ case RLIMIT_MEMLOCK:
++ res_add += GR_RLIM_MEMLOCK_BUMP;
++ break;
++ case RLIMIT_AS:
++ res_add += GR_RLIM_AS_BUMP;
++ break;
++ case RLIMIT_LOCKS:
++ res_add += GR_RLIM_LOCKS_BUMP;
++ break;
++ case RLIMIT_SIGPENDING:
++ res_add += GR_RLIM_SIGPENDING_BUMP;
++ break;
++ case RLIMIT_MSGQUEUE:
++ res_add += GR_RLIM_MSGQUEUE_BUMP;
++ break;
++ case RLIMIT_NICE:
++ res_add += GR_RLIM_NICE_BUMP;
++ break;
++ case RLIMIT_RTPRIO:
++ res_add += GR_RLIM_RTPRIO_BUMP;
++ break;
++ case RLIMIT_RTTIME:
++ res_add += GR_RLIM_RTTIME_BUMP;
++ break;
++ }
++
++ acl->res[res].rlim_cur = res_add;
++
++ if (wanted > acl->res[res].rlim_max)
++ acl->res[res].rlim_max = res_add;
++
++ /* only log the subject filename, since resource logging is supported for
++ single-subject learning only */
++ rcu_read_lock();
++ cred = __task_cred(task);
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, cred->uid, cred->gid, acl->filename,
++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
++ "", (unsigned long) res, &task->signal->saved_ip);
++ rcu_read_unlock();
++ }
++
++ return;
++}
++
++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *proc;
++ unsigned long flags;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ flags = pax_get_flags(task);
++
++ proc = task->acl;
++
++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
++ flags &= ~MF_PAX_PAGEEXEC;
++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
++ flags &= ~MF_PAX_SEGMEXEC;
++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
++ flags &= ~MF_PAX_RANDMMAP;
++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
++ flags &= ~MF_PAX_EMUTRAMP;
++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
++ flags &= ~MF_PAX_MPROTECT;
++
++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
++ flags |= MF_PAX_PAGEEXEC;
++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
++ flags |= MF_PAX_SEGMEXEC;
++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
++ flags |= MF_PAX_RANDMMAP;
++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
++ flags |= MF_PAX_EMUTRAMP;
++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
++ flags |= MF_PAX_MPROTECT;
++
++ pax_set_flags(task, flags);
++
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++/* Eric Biederman likes breaking userland ABI and every inode-based security
++ system to save 35kb of memory */
++
++/* we modify the passed in filename, but adjust it back before returning */
++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
++{
++ struct name_entry *nmatch;
++ char *p, *lastp = NULL;
++ struct acl_object_label *obj = NULL, *tmp;
++ struct acl_subject_label *tmpsubj;
++ char c = '\0';
++
++ read_lock(&gr_inode_lock);
++
++ p = name + len - 1;
++ do {
++ nmatch = lookup_name_entry(name);
++ if (lastp != NULL)
++ *lastp = c;
++
++ if (nmatch == NULL)
++ goto next_component;
++ tmpsubj = current->acl;
++ do {
++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
++ if (obj != NULL) {
++ tmp = obj->globbed;
++ while (tmp) {
++ if (!glob_match(tmp->filename, name)) {
++ obj = tmp;
++ goto found_obj;
++ }
++ tmp = tmp->next;
++ }
++ goto found_obj;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++next_component:
++ /* end case */
++ if (p == name)
++ break;
++
++ while (*p != '/')
++ p--;
++ if (p == name)
++ lastp = p + 1;
++ else {
++ lastp = p;
++ p--;
++ }
++ c = *lastp;
++ *lastp = '\0';
++ } while (1);
++found_obj:
++ read_unlock(&gr_inode_lock);
++ /* obj returned will always be non-null */
++ return obj;
++}
++
++/* returns 0 when allowing, non-zero on error
++ op of 0 is used for readdir, so we don't log the names of hidden files
++*/
++__u32
++gr_handle_sysctl(const struct ctl_table *table, const int op)
++{
++ struct ctl_table *tmp;
++ const char *proc_sys = "/proc/sys";
++ char *path;
++ struct acl_object_label *obj;
++ unsigned short len = 0, pos = 0, depth = 0, i;
++ __u32 err = 0;
++ __u32 mode = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ /* for now, ignore operations on non-sysctl entries if it's not a
++ readdir*/
++ if (table->child != NULL && op != 0)
++ return 0;
++
++ mode |= GR_FIND;
++ /* it's only a read if it's an entry, read on dirs is for readdir */
++ if (op & MAY_READ)
++ mode |= GR_READ;
++ if (op & MAY_WRITE)
++ mode |= GR_WRITE;
++
++ preempt_disable();
++
++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++
++ /* it's only a read/write if it's an actual entry, not a dir
++ (which are opened for readdir)
++ */
++
++ /* convert the requested sysctl entry into a pathname */
++
++ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
++ len += strlen(tmp->procname);
++ len++;
++ depth++;
++ }
++
++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
++ /* deny */
++ goto out;
++ }
++
++ memset(path, 0, PAGE_SIZE);
++
++ memcpy(path, proc_sys, strlen(proc_sys));
++
++ pos += strlen(proc_sys);
++
++ for (; depth > 0; depth--) {
++ path[pos] = '/';
++ pos++;
++ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
++ if (depth == i) {
++ memcpy(path + pos, tmp->procname,
++ strlen(tmp->procname));
++ pos += strlen(tmp->procname);
++ }
++ i++;
++ }
++ }
++
++ obj = gr_lookup_by_name(path, pos);
++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
++
++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
++ ((err & mode) != mode))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ err = 0;
++ gr_log_learn_sysctl(path, new_mode);
++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
++ err = -ENOENT;
++ } else if (!(err & GR_FIND)) {
++ err = -ENOENT;
++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "");
++ err = -EACCES;
++ } else if ((err & mode) != mode) {
++ err = -EACCES;
++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "");
++ err = 0;
++ } else
++ err = 0;
++
++ out:
++ preempt_enable();
++
++ return err;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ struct file *filp;
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++#endif
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ filp = task->exec_file;
++
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->real_parent;
++ }
++
++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 1;
++ }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (!(gr_status & GR_READY)) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 0;
++ }
++#endif
++
++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++
++ if (retmode & GR_NOPTRACE)
++ return 1;
++
++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++ && (current->acl != task->acl || (current->acl != current->role->root_label
++ && current->pid != task->pid)))
++ return 1;
++
++ return 0;
++}
++
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ if (!(current->role->roletype & GR_ROLE_GOD))
++ return;
++
++ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
++ p->role->rolename, gr_task_roletype_to_char(p),
++ p->acl->filename);
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++#endif
++ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
++ read_lock(&tasklist_lock);
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->real_parent;
++ }
++
++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
++ read_unlock(&tasklist_lock);
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++ read_unlock(&tasklist_lock);
++ }
++
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ if (!(gr_status & GR_READY))
++ return 0;
++#endif
++
++ read_lock(&grsec_exec_file_lock);
++ if (unlikely(!task->exec_file)) {
++ read_unlock(&grsec_exec_file_lock);
++ return 0;
++ }
++
++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
++ read_unlock(&grsec_exec_file_lock);
++
++ if (retmode & GR_NOPTRACE) {
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++
++ if (retmode & GR_PTRACERD) {
++ switch (request) {
++ case PTRACE_SEIZE:
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
++ case PTRACE_SETREGS:
++ case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_X86
++ case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++ case PTRACE_SETVRREGS:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++ } else if (!(current->acl->mode & GR_POVERRIDE) &&
++ !(current->role->roletype & GR_ROLE_GOD) &&
++ (current->acl != task->acl)) {
++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
++ return 1;
++ }
++
++ return 0;
++}
++
++static int is_writable_mmap(const struct file *filp)
++{
++ struct task_struct *task = current;
++ struct acl_object_label *obj, *obj2;
++
++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
++ task->role->root_label);
++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ if (is_writable_mmap(file))
++ return 0;
++
++ mode =
++ gr_search_file(file->f_path.dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_path.mnt);
++
++ if (!gr_tpe_allow(file))
++ return 0;
++
++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 1;
++ }
++
++ return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ if (is_writable_mmap(file))
++ return 0;
++
++ mode =
++ gr_search_file(file->f_path.dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_path.mnt);
++
++ if (!gr_tpe_allow(file))
++ return 0;
++
++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
++ return 1;
++ }
++
++ return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ unsigned long runtime;
++ unsigned long cputime;
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ struct timespec timeval;
++
++ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++ !(task->acl->mode & GR_PROCACCT)))
++ return;
++
++ do_posix_clock_monotonic_gettime(&timeval);
++ runtime = timeval.tv_sec - task->start_time.tv_sec;
++ wday = runtime / (3600 * 24);
++ runtime -= wday * (3600 * 24);
++ whr = runtime / 3600;
++ runtime -= whr * 3600;
++ wmin = runtime / 60;
++ runtime -= wmin * 60;
++ wsec = runtime;
++
++ cputime = (task->utime + task->stime) / HZ;
++ cday = cputime / (3600 * 24);
++ cputime -= cday * (3600 * 24);
++ chr = cputime / 3600;
++ cputime -= chr * 3600;
++ cmin = cputime / 60;
++ cputime -= cmin * 60;
++ csec = cputime;
++
++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
++
++ return;
++}
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++ struct task_struct *task;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ const struct cred *cred;
++#endif
++ int ret = 0;
++
++ /* restrict taskstats viewing to un-chrooted root users
++ who have the 'view' subject flag if the RBAC system is enabled
++ */
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ task = find_task_by_vpid(pid);
++ if (task) {
++#ifdef CONFIG_GRKERNSEC_CHROOT
++ if (proc_is_chrooted(task))
++ ret = -EACCES;
++#endif
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ cred = __task_cred(task);
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ if (cred->uid != 0)
++ ret = -EACCES;
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (cred->uid != 0 && !groups_search(cred->group_info, grsec_proc_gid))
++ ret = -EACCES;
++#endif
++#endif
++ if (gr_status & GR_READY) {
++ if (!(task->acl->mode & GR_VIEW))
++ ret = -EACCES;
++ }
++ } else
++ ret = -ENOENT;
++
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ return ret;
++}
++#endif
++
++/* AUXV entries are filled via a descendant of search_binary_handler
++ after we've already applied the subject for the target
++*/
++int gr_acl_enable_at_secure(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (current->acl->mode & GR_ATSECURE)
++ return 1;
++
++ return 0;
++}
++
++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
++{
++ struct task_struct *task = current;
++ struct dentry *dentry = file->f_path.dentry;
++ struct vfsmount *mnt = file->f_path.mnt;
++ struct acl_object_label *obj, *tmp;
++ struct acl_subject_label *subj;
++ unsigned int bufsize;
++ int is_not_root;
++ char *path;
++ dev_t dev = __get_dev(dentry);
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 1;
++
++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
++ return 1;
++
++ /* ignore Eric Biederman */
++ if (IS_PRIVATE(dentry->d_inode))
++ return 1;
++
++ subj = task->acl;
++ read_lock(&gr_inode_lock);
++ do {
++ obj = lookup_acl_obj_label(ino, dev, subj);
++ if (obj != NULL) {
++ read_unlock(&gr_inode_lock);
++ return (obj->mode & GR_FIND) ? 1 : 0;
++ }
++ } while ((subj = subj->parent_subject));
++ read_unlock(&gr_inode_lock);
++
++ /* this is purely an optimization since we're looking for an object
++ for the directory we're doing a readdir on
++ if it's possible for any globbed object to match the entry we're
++ filling into the directory, then the object we find here will be
++ an anchor point with attached globbed objects
++ */
++ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
++ if (obj->globbed == NULL)
++ return (obj->mode & GR_FIND) ? 1 : 0;
++
++ is_not_root = ((obj->filename[0] == '/') &&
++ (obj->filename[1] == '\0')) ? 0 : 1;
++ bufsize = PAGE_SIZE - namelen - is_not_root;
++
++ /* check bufsize > PAGE_SIZE || bufsize == 0 */
++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
++ return 1;
++
++ preempt_disable();
++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ bufsize);
++
++ bufsize = strlen(path);
++
++ /* if base is "/", don't append an additional slash */
++ if (is_not_root)
++ *(path + bufsize) = '/';
++ memcpy(path + bufsize + is_not_root, name, namelen);
++ *(path + bufsize + namelen + is_not_root) = '\0';
++
++ tmp = obj->globbed;
++ while (tmp) {
++ if (!glob_match(tmp->filename, path)) {
++ preempt_enable();
++ return (tmp->mode & GR_FIND) ? 1 : 0;
++ }
++ tmp = tmp->next;
++ }
++ preempt_enable();
++ return (obj->mode & GR_FIND) ? 1 : 0;
++}
++
++void gr_put_exec_file(struct task_struct *task)
++{
++ struct file *filp;
++
++ write_lock(&grsec_exec_file_lock);
++ filp = task->exec_file;
++ task->exec_file = NULL;
++ write_unlock(&grsec_exec_file_lock);
++
++ if (filp)
++ fput(filp);
++
++ return;
++}
++
++
++#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
++EXPORT_SYMBOL(gr_acl_is_enabled);
++#endif
++EXPORT_SYMBOL(gr_learn_resource);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
++
+diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
+new file mode 100644
+index 0000000..18ffbbd
+--- /dev/null
++++ b/grsecurity/gracl_alloc.c
+@@ -0,0 +1,105 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
++struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
++
++static __inline__ int
++alloc_pop(void)
++{
++ if (current_alloc_state->alloc_stack_next == 1)
++ return 0;
++
++ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
++
++ current_alloc_state->alloc_stack_next--;
++
++ return 1;
++}
++
++static __inline__ int
++alloc_push(void *buf)
++{
++ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
++ return 1;
++
++ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
++
++ current_alloc_state->alloc_stack_next++;
++
++ return 0;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++ void *ret = NULL;
++
++ if (!len || len > PAGE_SIZE)
++ goto out;
++
++ ret = kmalloc(len, GFP_KERNEL);
++
++ if (ret) {
++ if (alloc_push(ret)) {
++ kfree(ret);
++ ret = NULL;
++ }
++ }
++
++out:
++ return ret;
++}
++
++void *
++acl_alloc_num(unsigned long num, unsigned long len)
++{
++ if (!len || (num > (PAGE_SIZE / len)))
++ return NULL;
++
++ return acl_alloc(num * len);
++}
++
++void
++acl_free_all(void)
++{
++ if (!current_alloc_state->alloc_stack)
++ return;
++
++ while (alloc_pop()) ;
++
++ if (current_alloc_state->alloc_stack) {
++ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++ kfree(current_alloc_state->alloc_stack);
++ else
++ vfree(current_alloc_state->alloc_stack);
++ }
++
++ current_alloc_state->alloc_stack = NULL;
++ current_alloc_state->alloc_stack_size = 1;
++ current_alloc_state->alloc_stack_next = 1;
++
++ return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++ if ((size * sizeof (void *)) <= PAGE_SIZE)
++ current_alloc_state->alloc_stack =
++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++ else
++ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++ current_alloc_state->alloc_stack_size = size;
++ current_alloc_state->alloc_stack_next = 1;
++
++ if (!current_alloc_state->alloc_stack)
++ return 0;
++ else
++ return 1;
++}
+diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
+new file mode 100644
+index 0000000..955ddfb
+--- /dev/null
++++ b/grsecurity/gracl_cap.c
+@@ -0,0 +1,101 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern const char *captab_log[];
++extern int captab_log_entries;
++
++int
++gr_acl_is_capable(const int cap)
++{
++ struct task_struct *task = current;
++ const struct cred *cred = current_cred();
++ struct acl_subject_label *curracl;
++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++ kernel_cap_t cap_audit = __cap_empty_set;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = task->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++ cap_audit = curracl->cap_invert_audit;
++
++ while ((curracl = curracl->parent_subject)) {
++ /* if the cap isn't specified in the current computed mask but is specified in the
++ current level subject, and is lowered in the current level subject, then add
++ it to the set of dropped capabilities
++ otherwise, add the current level subject's mask to the current computed mask
++ */
++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++ cap_raise(cap_mask, cap);
++ if (cap_raised(curracl->cap_lower, cap))
++ cap_raise(cap_drop, cap);
++ if (cap_raised(curracl->cap_invert_audit, cap))
++ cap_raise(cap_audit, cap);
++ }
++ }
++
++ if (!cap_raised(cap_drop, cap)) {
++ if (cap_raised(cap_audit, cap))
++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
++ return 1;
++ }
++
++ curracl = task->acl;
++
++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
++ && cap_raised(cred->cap_effective, cap)) {
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, cred->uid,
++ cred->gid, task->exec_file ?
++ gr_to_filename(task->exec_file->f_path.dentry,
++ task->exec_file->f_path.mnt) : curracl->filename,
++ curracl->filename, 0UL,
++ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
++ return 1;
++ }
++
++ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
++ return 0;
++}
++
++int
++gr_acl_is_capable_nolog(const int cap)
++{
++ struct acl_subject_label *curracl;
++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = current->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ /* if the cap isn't specified in the current computed mask but is specified in the
++ current level subject, and is lowered in the current level subject, then add
++ it to the set of dropped capabilities
++ otherwise, add the current level subject's mask to the current computed mask
++ */
++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
++ cap_raise(cap_mask, cap);
++ if (cap_raised(curracl->cap_lower, cap))
++ cap_raise(cap_drop, cap);
++ }
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ return 0;
++}
++
+diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
+new file mode 100644
+index 0000000..ca25605
+--- /dev/null
++++ b/grsecurity/gracl_compat.c
+@@ -0,0 +1,270 @@
++#include <linux/kernel.h>
++#include <linux/gracl.h>
++#include <linux/compat.h>
++#include <linux/gracl_compat.h>
++
++#include <asm/uaccess.h>
++
++int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
++{
++ struct gr_arg_wrapper_compat uwrapcompat;
++
++ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
++ return -EFAULT;
++
++ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
++ (uwrapcompat.version != 0x2901)) ||
++ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
++ return -EINVAL;
++
++ uwrap->arg = compat_ptr(uwrapcompat.arg);
++ uwrap->version = uwrapcompat.version;
++ uwrap->size = sizeof(struct gr_arg);
++
++ return 0;
++}
++
++int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++ struct gr_arg_compat argcompat;
++
++ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
++ return -EFAULT;
++
++ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
++ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
++ arg->role_db.num_roles = argcompat.role_db.num_roles;
++ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
++ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
++ arg->role_db.num_objects = argcompat.role_db.num_objects;
++
++ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
++ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
++ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
++ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
++ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
++ arg->segv_device = argcompat.segv_device;
++ arg->segv_inode = argcompat.segv_inode;
++ arg->segv_uid = argcompat.segv_uid;
++ arg->num_sprole_pws = argcompat.num_sprole_pws;
++ arg->mode = argcompat.mode;
++
++ return 0;
++}
++
++int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++ struct acl_object_label_compat objcompat;
++
++ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
++ return -EFAULT;
++
++ obj->filename = compat_ptr(objcompat.filename);
++ obj->inode = objcompat.inode;
++ obj->device = objcompat.device;
++ obj->mode = objcompat.mode;
++
++ obj->nested = compat_ptr(objcompat.nested);
++ obj->globbed = compat_ptr(objcompat.globbed);
++
++ obj->prev = compat_ptr(objcompat.prev);
++ obj->next = compat_ptr(objcompat.next);
++
++ return 0;
++}
++
++int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++ unsigned int i;
++ struct acl_subject_label_compat subjcompat;
++
++ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
++ return -EFAULT;
++
++ subj->filename = compat_ptr(subjcompat.filename);
++ subj->inode = subjcompat.inode;
++ subj->device = subjcompat.device;
++ subj->mode = subjcompat.mode;
++ subj->cap_mask = subjcompat.cap_mask;
++ subj->cap_lower = subjcompat.cap_lower;
++ subj->cap_invert_audit = subjcompat.cap_invert_audit;
++
++ for (i = 0; i < GR_NLIMITS; i++) {
++ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
++ subj->res[i].rlim_cur = RLIM_INFINITY;
++ else
++ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
++ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
++ subj->res[i].rlim_max = RLIM_INFINITY;
++ else
++ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
++ }
++ subj->resmask = subjcompat.resmask;
++
++ subj->user_trans_type = subjcompat.user_trans_type;
++ subj->group_trans_type = subjcompat.group_trans_type;
++ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
++ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
++ subj->user_trans_num = subjcompat.user_trans_num;
++ subj->group_trans_num = subjcompat.group_trans_num;
++
++ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
++ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
++ subj->ip_type = subjcompat.ip_type;
++ subj->ips = compat_ptr(subjcompat.ips);
++ subj->ip_num = subjcompat.ip_num;
++ subj->inaddr_any_override = subjcompat.inaddr_any_override;
++
++ subj->crashes = subjcompat.crashes;
++ subj->expires = subjcompat.expires;
++
++ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
++ subj->hash = compat_ptr(subjcompat.hash);
++ subj->prev = compat_ptr(subjcompat.prev);
++ subj->next = compat_ptr(subjcompat.next);
++
++ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
++ subj->obj_hash_size = subjcompat.obj_hash_size;
++ subj->pax_flags = subjcompat.pax_flags;
++
++ return 0;
++}
++
++int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++ struct acl_role_label_compat rolecompat;
++
++ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
++ return -EFAULT;
++
++ role->rolename = compat_ptr(rolecompat.rolename);
++ role->uidgid = rolecompat.uidgid;
++ role->roletype = rolecompat.roletype;
++
++ role->auth_attempts = rolecompat.auth_attempts;
++ role->expires = rolecompat.expires;
++
++ role->root_label = compat_ptr(rolecompat.root_label);
++ role->hash = compat_ptr(rolecompat.hash);
++
++ role->prev = compat_ptr(rolecompat.prev);
++ role->next = compat_ptr(rolecompat.next);
++
++ role->transitions = compat_ptr(rolecompat.transitions);
++ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
++ role->domain_children = compat_ptr(rolecompat.domain_children);
++ role->domain_child_num = rolecompat.domain_child_num;
++
++ role->umask = rolecompat.umask;
++
++ role->subj_hash = compat_ptr(rolecompat.subj_hash);
++ role->subj_hash_size = rolecompat.subj_hash_size;
++
++ return 0;
++}
++
++int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++ struct role_allowed_ip_compat roleip_compat;
++
++ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
++ return -EFAULT;
++
++ roleip->addr = roleip_compat.addr;
++ roleip->netmask = roleip_compat.netmask;
++
++ roleip->prev = compat_ptr(roleip_compat.prev);
++ roleip->next = compat_ptr(roleip_compat.next);
++
++ return 0;
++}
++
++int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
++{
++ struct role_transition_compat trans_compat;
++
++ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
++ return -EFAULT;
++
++ trans->rolename = compat_ptr(trans_compat.rolename);
++
++ trans->prev = compat_ptr(trans_compat.prev);
++ trans->next = compat_ptr(trans_compat.next);
++
++ return 0;
++
++}
++
++int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++ struct gr_hash_struct_compat hash_compat;
++
++ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
++ return -EFAULT;
++
++ hash->table = compat_ptr(hash_compat.table);
++ hash->nametable = compat_ptr(hash_compat.nametable);
++ hash->first = compat_ptr(hash_compat.first);
++
++ hash->table_size = hash_compat.table_size;
++ hash->used_size = hash_compat.used_size;
++
++ hash->type = hash_compat.type;
++
++ return 0;
++}
++
++int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
++{
++ compat_uptr_t ptrcompat;
++
++ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
++ return -EFAULT;
++
++ *(void **)ptr = compat_ptr(ptrcompat);
++
++ return 0;
++}
++
++int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++ struct acl_ip_label_compat ip_compat;
++
++ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
++ return -EFAULT;
++
++ ip->iface = compat_ptr(ip_compat.iface);
++ ip->addr = ip_compat.addr;
++ ip->netmask = ip_compat.netmask;
++ ip->low = ip_compat.low;
++ ip->high = ip_compat.high;
++ ip->mode = ip_compat.mode;
++ ip->type = ip_compat.type;
++
++ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
++
++ ip->prev = compat_ptr(ip_compat.prev);
++ ip->next = compat_ptr(ip_compat.next);
++
++ return 0;
++}
++
++int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++ struct sprole_pw_compat pw_compat;
++
++ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
++ return -EFAULT;
++
++ pw->rolename = compat_ptr(pw_compat.rolename);
++ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
++ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
++
++ return 0;
++}
++
++size_t get_gr_arg_wrapper_size_compat(void)
++{
++ return sizeof(struct gr_arg_wrapper_compat);
++}
++
+diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
+new file mode 100644
+index 0000000..0805fd9
+--- /dev/null
++++ b/grsecurity/gracl_fs.c
+@@ -0,0 +1,437 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/stat.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++umode_t
++gr_acl_umask(void)
++{
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ return current->role->umask;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return GR_FIND;
++
++ mode =
++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++ return mode;
++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
++ return 0;
++ } else if (unlikely(!(mode & GR_FIND)))
++ return 0;
++
++ return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ int acc_mode)
++{
++ __u32 reqmode = GR_FIND;
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return reqmode;
++
++ if (acc_mode & MAY_APPEND)
++ reqmode |= GR_APPEND;
++ else if (acc_mode & MAY_WRITE)
++ reqmode |= GR_WRITE;
++ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
++ const int imode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ if (acc_mode & MAY_APPEND)
++ reqmode |= GR_APPEND;
++ // if a directory was required or the directory already exists, then
++ // don't count this open as a read
++ if ((acc_mode & MAY_READ) &&
++ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
++ reqmode |= GR_READ;
++ if ((open_flags & O_CREAT) &&
++ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
++ reqmode |= GR_SETID;
++
++ mode =
++ gr_check_create(dentry, p_dentry, p_mnt,
++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 mode, reqmode = GR_FIND;
++
++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
++ reqmode |= GR_EXEC;
++ if (fmode & S_IWOTH)
++ reqmode |= GR_WRITE;
++ if (fmode & S_IROTH)
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "");
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "");
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
++{
++ __u32 mode;
++
++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
++
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
++ return mode;
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
++ return 0;
++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
++ return 0;
++
++ return (reqmode);
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ umode_t *modeptr)
++{
++ umode_t mode;
++
++ *modeptr &= ~gr_acl_umask();
++ mode = *modeptr;
++
++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
++ return 1;
++
++ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_CHMOD_ACL_MSG);
++ } else {
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++ GR_UNIXCONNECT_ACL_MSG);
++}
++
++/* hardlinks require at minimum create and link permission,
++ any additional privilege required is based on the
++ privilege of the file being linked to
++*/
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ __u32 mode;
++ __u32 needmode = GR_CREATE | GR_LINK;
++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
++
++ mode =
++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++ old_mnt);
++
++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_CREATE | GR_AUDIT_CREATE |
++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
++{
++ __u32 mode;
++
++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
++ return mode;
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
++ return 0;
++ } else if (unlikely((mode & (reqmode)) != (reqmode)))
++ return 0;
++
++ return (reqmode);
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
++ reqmode |= GR_SETID;
++
++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt)
++{
++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname)
++{
++ __u32 comp1, comp2;
++ int error = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (!new_dentry->d_inode) {
++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, old_mnt);
++ } else {
++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++ GR_CREATE | GR_DELETE |
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, parent_mnt);
++ comp2 =
++ gr_search_file(old_dentry,
++ GR_READ | GR_WRITE | GR_AUDIT_READ |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++ }
++
++ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++ && !(comp2 & GR_SUPPRESS)) {
++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
++ error = -EACCES;
++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++ error = -EACCES;
++
++ return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ u16 id;
++ char *rolename;
++
++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
++ !(current->role->roletype & GR_ROLE_PERSIST))) {
++ id = current->acl_role_id;
++ rolename = current->role->rolename;
++ gr_set_acls(1);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
++ }
++
++ gr_put_exec_file(current);
++ return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (task != current && task->acl->mode & GR_PROTPROCFD)
++ return -EACCES;
++
++ return 0;
++}
+diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
+new file mode 100644
+index 0000000..35f8064
+--- /dev/null
++++ b/grsecurity/gracl_ip.c
+@@ -0,0 +1,386 @@
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND 0x01
++#define GR_CONNECT 0x02
++#define GR_INVERT 0x04
++#define GR_BINDOVERRIDE 0x08
++#define GR_CONNECTOVERRIDE 0x10
++#define GR_SOCK_FAMILY 0x20
++
++static const char * gr_protocols[IPPROTO_MAX] = {
++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++ };
++
++static const char * gr_socktypes[SOCK_MAX] = {
++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
++ "unknown:7", "unknown:8", "unknown:9", "packet"
++ };
++
++static const char * gr_sockfamilies[AF_MAX+1] = {
++ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
++ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
++ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
++ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
++ };
++
++const char *
++gr_proto_to_name(unsigned char proto)
++{
++ return gr_protocols[proto];
++}
++
++const char *
++gr_socktype_to_name(unsigned char type)
++{
++ return gr_socktypes[type];
++}
++
++const char *
++gr_sockfamily_to_name(unsigned char family)
++{
++ return gr_sockfamilies[family];
++}
++
++extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++ struct acl_subject_label *curr;
++ const struct cred *cred = current_cred();
++
++ if (unlikely(!gr_acl_is_enabled()))
++ goto exit;
++
++ if ((domain < 0) || (type < 0) || (protocol < 0) ||
++ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
++ goto exit; // let the kernel handle it
++
++ curr = current->acl;
++
++ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
++ /* the family is allowed, if this is PF_INET allow it only if
++ the extra sock type/protocol checks pass */
++ if (domain == PF_INET)
++ goto inet_check;
++ goto exit;
++ } else {
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
++ &current->signal->saved_ip);
++ goto exit;
++ }
++ goto exit_fail;
++ }
++
++inet_check:
++ /* the rest of this checking is for IPv4 only */
++ if (!curr->ips)
++ goto exit;
++
++ if ((curr->ip_type & (1U << type)) &&
++ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
++ goto exit;
++
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ /* we don't place acls on raw sockets , and sometimes
++ dgram/ip sockets are opened for ioctl and not
++ bind/connect, so we'll fake a bind learn log */
++ if (type == SOCK_RAW || type == SOCK_PACKET) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &fakeip, 0, type,
++ protocol, GR_CONNECT, &current->signal->saved_ip);
++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &fakeip, 0, type,
++ protocol, GR_BIND, &current->signal->saved_ip);
++ }
++ /* we'll log when they use connect or bind */
++ goto exit;
++ }
++
++exit_fail:
++ if (domain == PF_INET)
++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
++ gr_socktype_to_name(type), gr_proto_to_name(protocol));
++ else if (rcu_access_pointer(net_families[domain]) != NULL)
++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
++ gr_socktype_to_name(type), protocol);
++
++ return 0;
++exit:
++ return 1;
++}
++
++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
++{
++ if ((ip->mode & mode) &&
++ (ip_port >= ip->low) &&
++ (ip_port <= ip->high) &&
++ ((ntohl(ip_addr) & our_netmask) ==
++ (ntohl(our_addr) & our_netmask))
++ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
++ && (ip->type & (1U << type))) {
++ if (ip->mode & GR_INVERT)
++ return 2; // specifically denied
++ else
++ return 1; // allowed
++ }
++
++ return 0; // not specifically allowed, may continue parsing
++}
++
++static int
++gr_search_connectbind(const int full_mode, struct sock *sk,
++ struct sockaddr_in *addr, const int type)
++{
++ char iface[IFNAMSIZ] = {0};
++ struct acl_subject_label *curr;
++ struct acl_ip_label *ip;
++ struct inet_sock *isk;
++ struct net_device *dev;
++ struct in_device *idev;
++ unsigned long i;
++ int ret;
++ int mode = full_mode & (GR_BIND | GR_CONNECT);
++ __u32 ip_addr = 0;
++ __u32 our_addr;
++ __u32 our_netmask;
++ char *p;
++ __u16 ip_port = 0;
++ const struct cred *cred = current_cred();
++
++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++ return 0;
++
++ curr = current->acl;
++ isk = inet_sk(sk);
++
++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
++ addr->sin_addr.s_addr = curr->inaddr_any_override;
++ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
++ struct sockaddr_in saddr;
++ int err;
++
++ saddr.sin_family = AF_INET;
++ saddr.sin_addr.s_addr = curr->inaddr_any_override;
++ saddr.sin_port = isk->inet_sport;
++
++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++ if (err)
++ return err;
++
++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
++ if (err)
++ return err;
++ }
++
++ if (!curr->ips)
++ return 0;
++
++ ip_addr = addr->sin_addr.s_addr;
++ ip_port = ntohs(addr->sin_port);
++
++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, cred->uid,
++ cred->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_path.dentry,
++ current->exec_file->f_path.mnt) :
++ curr->filename, curr->filename,
++ &ip_addr, ip_port, type,
++ sk->sk_protocol, mode, &current->signal->saved_ip);
++ return 0;
++ }
++
++ for (i = 0; i < curr->ip_num; i++) {
++ ip = *(curr->ips + i);
++ if (ip->iface != NULL) {
++ strncpy(iface, ip->iface, IFNAMSIZ - 1);
++ p = strchr(iface, ':');
++ if (p != NULL)
++ *p = '\0';
++ dev = dev_get_by_name(sock_net(sk), iface);
++ if (dev == NULL)
++ continue;
++ idev = in_dev_get(dev);
++ if (idev == NULL) {
++ dev_put(dev);
++ continue;
++ }
++ rcu_read_lock();
++ for_ifa(idev) {
++ if (!strcmp(ip->iface, ifa->ifa_label)) {
++ our_addr = ifa->ifa_address;
++ our_netmask = 0xffffffff;
++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++ if (ret == 1) {
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ return 0;
++ } else if (ret == 2) {
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ goto denied;
++ }
++ }
++ } endfor_ifa(idev);
++ rcu_read_unlock();
++ in_dev_put(idev);
++ dev_put(dev);
++ } else {
++ our_addr = ip->addr;
++ our_netmask = ip->netmask;
++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
++ if (ret == 1)
++ return 0;
++ else if (ret == 2)
++ goto denied;
++ }
++ }
++
++denied:
++ if (mode == GR_BIND)
++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++ else if (mode == GR_CONNECT)
++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
++
++ return -EACCES;
++}
++
++int
++gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
++{
++ /* always allow disconnection of dgram sockets with connect */
++ if (addr->sin_family == AF_UNSPEC)
++ return 0;
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
++}
++
++int gr_search_listen(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct sockaddr_in addr;
++
++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++ addr.sin_port = inet_sk(sk)->inet_sport;
++
++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int gr_search_accept(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct sockaddr_in addr;
++
++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
++ addr.sin_port = inet_sk(sk)->inet_sport;
++
++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
++{
++ if (addr)
++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++ else {
++ struct sockaddr_in sin;
++ const struct inet_sock *inet = inet_sk(sk);
++
++ sin.sin_addr.s_addr = inet->inet_daddr;
++ sin.sin_port = inet->inet_dport;
++
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++ }
++}
++
++int
++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
++{
++ struct sockaddr_in sin;
++
++ if (unlikely(skb->len < sizeof (struct udphdr)))
++ return 0; // skip this packet
++
++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
++ sin.sin_port = udp_hdr(skb)->source;
++
++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
++}
+diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
+new file mode 100644
+index 0000000..25f54ef
+--- /dev/null
++++ b/grsecurity/gracl_learn.c
+@@ -0,0 +1,207 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static DEFINE_SPINLOCK(gr_learn_lock);
++static DEFINE_MUTEX(gr_learn_user_mutex);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++ uses a semaphore around the userspace copying, and the other kernel contexts
++ use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
++{
++ DECLARE_WAITQUEUE(wait, current);
++ ssize_t retval = 0;
++
++ add_wait_queue(&learn_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++ do {
++ mutex_lock(&gr_learn_user_mutex);
++ spin_lock(&gr_learn_lock);
++ if (learn_buffer_len)
++ break;
++ spin_unlock(&gr_learn_lock);
++ mutex_unlock(&gr_learn_user_mutex);
++ if (file->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto out;
++ }
++ if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ goto out;
++ }
++
++ schedule();
++ } while (1);
++
++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++ learn_buffer_user_len = learn_buffer_len;
++ retval = learn_buffer_len;
++ learn_buffer_len = 0;
++
++ spin_unlock(&gr_learn_lock);
++
++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++ retval = -EFAULT;
++
++ mutex_unlock(&gr_learn_user_mutex);
++out:
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&learn_wait, &wait);
++ return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++ poll_wait(file, &learn_wait, wait);
++
++ if (learn_buffer_len)
++ return (POLLIN | POLLRDNORM);
++
++ return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++ char *tmp;
++
++ mutex_lock(&gr_learn_user_mutex);
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ if (tmp)
++ vfree(tmp);
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ mutex_unlock(&gr_learn_user_mutex);
++
++ return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++ va_list args;
++ unsigned int len;
++
++ if (!gr_learn_attached)
++ return;
++
++ spin_lock(&gr_learn_lock);
++
++ /* leave a gap at the end so we know when it's "full" but don't have to
++ compute the exact length of the string we're trying to append
++ */
++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++ return;
++ }
++ if (learn_buffer == NULL) {
++ spin_unlock(&gr_learn_lock);
++ return;
++ }
++
++ va_start(args, fmt);
++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++ va_end(args);
++
++ learn_buffer_len += len + 1;
++
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++
++ return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ && gr_learn_attached)
++ return -EBUSY;
++ if (file->f_mode & FMODE_READ) {
++ int retval = 0;
++ mutex_lock(&gr_learn_user_mutex);
++ if (learn_buffer == NULL)
++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer_user == NULL)
++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer == NULL) {
++ retval = -ENOMEM;
++ goto out_error;
++ }
++ if (learn_buffer_user == NULL) {
++ retval = -ENOMEM;
++ goto out_error;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 1;
++out_error:
++ mutex_unlock(&gr_learn_user_mutex);
++ return retval;
++ }
++ return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ) {
++ char *tmp = NULL;
++ mutex_lock(&gr_learn_user_mutex);
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ if (tmp)
++ vfree(tmp);
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 0;
++ mutex_unlock(&gr_learn_user_mutex);
++ }
++
++ return 0;
++}
++
++const struct file_operations grsec_fops = {
++ .read = read_learn,
++ .write = write_grsec_handler,
++ .open = open_learn,
++ .release = close_learn,
++ .poll = poll_learn,
++};
+diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
+new file mode 100644
+index 0000000..b4a4084
+--- /dev/null
++++ b/grsecurity/gracl_policy.c
+@@ -0,0 +1,1781 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/lglock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/sysctl.h>
++#include <linux/netdevice.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/security.h>
++#include <linux/grinternal.h>
++#include <linux/pid_namespace.h>
++#include <linux/stop_machine.h>
++#include <linux/fdtable.h>
++#include <linux/percpu.h>
++#include <linux/lglock.h>
++#include <linux/hugetlb.h>
++#include <linux/posix-timers.h>
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++extern struct gr_policy_state *polstate;
++
++#define FOR_EACH_ROLE_START(role) \
++ role = polstate->role_list; \
++ while (role) {
++
++#define FOR_EACH_ROLE_END(role) \
++ role = role->prev; \
++ }
++
++struct path gr_real_root;
++
++extern struct gr_alloc_state *current_alloc_state;
++
++u16 acl_sp_role_value;
++
++static DEFINE_MUTEX(gr_dev_mutex);
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++static struct gr_arg gr_usermode;
++static unsigned char gr_system_salt[GR_SALT_LEN];
++static unsigned char gr_system_sum[GR_SHA_LEN];
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++struct acl_object_label *fakefs_obj_rw;
++struct acl_object_label *fakefs_obj_rwx;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern void gr_remove_uid(uid_t uid);
++extern int gr_find_uid(uid_t uid);
++
++extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
++extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
++extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
++extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
++extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
++extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
++extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
++extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
++extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
++extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
++extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
++extern void assign_special_role(const char *rolename);
++extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
++extern int gr_rbac_disable(void *unused);
++extern void gr_enable_rbac_system(void);
++
++static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
++{
++ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
++{
++ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
++{
++ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
++{
++ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
++{
++ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
++{
++ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
++{
++ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
++{
++ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
++ return -EFAULT;
++
++ return 0;
++}
++
++int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
++{
++ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
++{
++ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
++ return -EFAULT;
++
++ if (((uwrap->version != GRSECURITY_VERSION) &&
++ (uwrap->version != 0x2901)) ||
++ (uwrap->size != sizeof(struct gr_arg)))
++ return -EINVAL;
++
++ return 0;
++}
++
++static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
++{
++ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
++ return -EFAULT;
++
++ return 0;
++}
++
++static size_t get_gr_arg_wrapper_size_normal(void)
++{
++ return sizeof(struct gr_arg_wrapper);
++}
++
++#ifdef CONFIG_COMPAT
++extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
++extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
++extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
++extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
++extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
++extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
++extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
++extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
++extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
++extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
++extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
++extern size_t get_gr_arg_wrapper_size_compat(void);
++
++int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
++int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
++int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
++int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
++int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
++int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
++int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
++int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
++int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
++int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
++int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
++size_t (* get_gr_arg_wrapper_size)(void) __read_only;
++
++#else
++#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
++#define copy_gr_arg copy_gr_arg_normal
++#define copy_gr_hash_struct copy_gr_hash_struct_normal
++#define copy_acl_object_label copy_acl_object_label_normal
++#define copy_acl_subject_label copy_acl_subject_label_normal
++#define copy_acl_role_label copy_acl_role_label_normal
++#define copy_acl_ip_label copy_acl_ip_label_normal
++#define copy_pointer_from_array copy_pointer_from_array_normal
++#define copy_sprole_pw copy_sprole_pw_normal
++#define copy_role_transition copy_role_transition_normal
++#define copy_role_allowed_ip copy_role_allowed_ip_normal
++#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
++#endif
++
++static struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
++ struct subject_map *match;
++
++ match = polstate->subj_map_set.s_hash[index];
++
++ while (match && match->user != userp)
++ match = match->next;
++
++ if (match != NULL)
++ return match->kernel;
++ else
++ return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
++ struct subject_map **curr;
++
++ subjmap->prev = NULL;
++
++ curr = &polstate->subj_map_set.s_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = subjmap;
++
++ subjmap->next = *curr;
++ *curr = subjmap;
++
++ return;
++}
++
++static void
++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
++{
++ unsigned int index =
++ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
++ struct acl_role_label **curr;
++ struct acl_role_label *tmp, *tmp2;
++
++ curr = &polstate->acl_role_set.r_hash[index];
++
++ /* simple case, slot is empty, just set it to our role */
++ if (*curr == NULL) {
++ *curr = role;
++ } else {
++ /* example:
++ 1 -> 2 -> 3 (adding 2 -> 3 to here)
++ 2 -> 3
++ */
++ /* first check to see if we can already be reached via this slot */
++ tmp = *curr;
++ while (tmp && tmp != role)
++ tmp = tmp->next;
++ if (tmp == role) {
++ /* we don't need to add ourselves to this slot's chain */
++ return;
++ }
++ /* we need to add ourselves to this chain, two cases */
++ if (role->next == NULL) {
++ /* simple case, append the current chain to our role */
++ role->next = *curr;
++ *curr = role;
++ } else {
++ /* 1 -> 2 -> 3 -> 4
++ 2 -> 3 -> 4
++ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
++ */
++ /* trickier case: walk our role's chain until we find
++ the role for the start of the current slot's chain */
++ tmp = role;
++ tmp2 = *curr;
++ while (tmp->next && tmp->next != tmp2)
++ tmp = tmp->next;
++ if (tmp->next == tmp2) {
++ /* from example above, we found 3, so just
++ replace this slot's chain with ours */
++ *curr = role;
++ } else {
++ /* we didn't find a subset of our role's chain
++ in the current slot's chain, so append their
++ chain to ours, and set us as the first role in
++ the slot's chain
++
++ we could fold this case with the case above,
++ but making it explicit for clarity
++ */
++ tmp->next = tmp2;
++ *curr = role;
++ }
++ }
++ }
++
++ return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++ int i;
++
++ if (polstate->role_list == NULL) {
++ polstate->role_list = role;
++ role->prev = NULL;
++ } else {
++ role->prev = polstate->role_list;
++ polstate->role_list = role;
++ }
++
++ /* used for hash chains */
++ role->next = NULL;
++
++ if (role->roletype & GR_ROLE_DOMAIN) {
++ for (i = 0; i < role->domain_child_num; i++)
++ __insert_acl_role_label(role, role->domain_children[i]);
++ } else
++ __insert_acl_role_label(role, role->uidgid);
++}
++
++static int
++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
++{
++ struct name_entry **curr, *nentry;
++ struct inodev_entry *ientry;
++ unsigned int len = strlen(name);
++ unsigned int key = full_name_hash(name, len);
++ unsigned int index = key % polstate->name_set.n_size;
++
++ curr = &polstate->name_set.n_hash[index];
++
++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
++ curr = &((*curr)->next);
++
++ if (*curr != NULL)
++ return 1;
++
++ nentry = acl_alloc(sizeof (struct name_entry));
++ if (nentry == NULL)
++ return 0;
++ ientry = acl_alloc(sizeof (struct inodev_entry));
++ if (ientry == NULL)
++ return 0;
++ ientry->nentry = nentry;
++
++ nentry->key = key;
++ nentry->name = name;
++ nentry->inode = inode;
++ nentry->device = device;
++ nentry->len = len;
++ nentry->deleted = deleted;
++
++ nentry->prev = NULL;
++ curr = &polstate->name_set.n_hash[index];
++ if (*curr != NULL)
++ (*curr)->prev = nentry;
++ nentry->next = *curr;
++ *curr = nentry;
++
++ /* insert us into the table searchable by inode/dev */
++ __insert_inodev_entry(polstate, ientry);
++
++ return 1;
++}
++
++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
++
++static void *
++create_table(__u32 * len, int elementsize)
++{
++ unsigned int table_sizes[] = {
++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++ 4194301, 8388593, 16777213, 33554393, 67108859
++ };
++ void *newtable = NULL;
++ unsigned int pwr = 0;
++
++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++ table_sizes[pwr] <= *len)
++ pwr++;
++
++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
++ return newtable;
++
++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
++ newtable =
++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
++ else
++ newtable = vmalloc(table_sizes[pwr] * elementsize);
++
++ *len = table_sizes[pwr];
++
++ return newtable;
++}
++
++static int
++init_variables(const struct gr_arg *arg, bool reload)
++{
++ struct task_struct *reaper = init_pid_ns.child_reaper;
++ unsigned int stacksize;
++
++ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
++ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
++ polstate->name_set.n_size = arg->role_db.num_objects;
++ polstate->inodev_set.i_size = arg->role_db.num_objects;
++
++ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
++ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
++ return 1;
++
++ if (!reload) {
++ if (!gr_init_uidset())
++ return 1;
++ }
++
++ /* set up the stack that holds allocation info */
++
++ stacksize = arg->role_db.num_pointers + 5;
++
++ if (!acl_alloc_stack_init(stacksize))
++ return 1;
++
++ if (!reload) {
++ /* grab reference for the real root dentry and vfsmount */
++ get_fs_root(reaper->fs, &gr_real_root);
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
++#endif
++
++ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
++ if (fakefs_obj_rw == NULL)
++ return 1;
++ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
++
++ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
++ if (fakefs_obj_rwx == NULL)
++ return 1;
++ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
++ }
++
++ polstate->subj_map_set.s_hash =
++ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
++ polstate->acl_role_set.r_hash =
++ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
++ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
++ polstate->inodev_set.i_hash =
++ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
++
++ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
++ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
++ return 1;
++
++ memset(polstate->subj_map_set.s_hash, 0,
++ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
++ memset(polstate->acl_role_set.r_hash, 0,
++ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
++ memset(polstate->name_set.n_hash, 0,
++ sizeof (struct name_entry *) * polstate->name_set.n_size);
++ memset(polstate->inodev_set.i_hash, 0,
++ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
++
++ return 0;
++}
++
++/* free information not needed after startup
++ currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++ __u32 i;
++
++ if (polstate->subj_map_set.s_hash) {
++ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
++ if (polstate->subj_map_set.s_hash[i]) {
++ kfree(polstate->subj_map_set.s_hash[i]);
++ polstate->subj_map_set.s_hash[i] = NULL;
++ }
++ }
++
++ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
++ PAGE_SIZE)
++ kfree(polstate->subj_map_set.s_hash);
++ else
++ vfree(polstate->subj_map_set.s_hash);
++ }
++
++ return;
++}
++
++static void
++free_variables(bool reload)
++{
++ struct acl_subject_label *s;
++ struct acl_role_label *r;
++ struct task_struct *task, *task2;
++ unsigned int x;
++
++ if (!reload) {
++ gr_clear_learn_entries();
++
++ read_lock(&tasklist_lock);
++ do_each_thread(task2, task) {
++ task->acl_sp_role = 0;
++ task->acl_role_id = 0;
++ task->inherited = 0;
++ task->acl = NULL;
++ task->role = NULL;
++ } while_each_thread(task2, task);
++ read_unlock(&tasklist_lock);
++
++ kfree(fakefs_obj_rw);
++ fakefs_obj_rw = NULL;
++ kfree(fakefs_obj_rwx);
++ fakefs_obj_rwx = NULL;
++
++ /* release the reference to the real root dentry and vfsmount */
++ path_put(&gr_real_root);
++ memset(&gr_real_root, 0, sizeof(gr_real_root));
++ }
++
++ /* free all object hash tables */
++
++ FOR_EACH_ROLE_START(r)
++ if (r->subj_hash == NULL)
++ goto next_role;
++ FOR_EACH_SUBJECT_START(r, s, x)
++ if (s->obj_hash == NULL)
++ break;
++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ FOR_EACH_SUBJECT_END(s, x)
++ FOR_EACH_NESTED_SUBJECT_START(r, s)
++ if (s->obj_hash == NULL)
++ break;
++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ FOR_EACH_NESTED_SUBJECT_END(s)
++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++ kfree(r->subj_hash);
++ else
++ vfree(r->subj_hash);
++ r->subj_hash = NULL;
++next_role:
++ FOR_EACH_ROLE_END(r)
++
++ acl_free_all();
++
++ if (polstate->acl_role_set.r_hash) {
++ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++ PAGE_SIZE)
++ kfree(polstate->acl_role_set.r_hash);
++ else
++ vfree(polstate->acl_role_set.r_hash);
++ }
++ if (polstate->name_set.n_hash) {
++ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(polstate->name_set.n_hash);
++ else
++ vfree(polstate->name_set.n_hash);
++ }
++
++ if (polstate->inodev_set.i_hash) {
++ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
++ PAGE_SIZE)
++ kfree(polstate->inodev_set.i_hash);
++ else
++ vfree(polstate->inodev_set.i_hash);
++ }
++
++ if (!reload)
++ gr_free_uidset();
++
++ memset(&polstate->name_set, 0, sizeof (struct name_db));
++ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
++ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
++ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++ polstate->default_role = NULL;
++ polstate->kernel_role = NULL;
++ polstate->role_list = NULL;
++
++ return;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
++
++static int alloc_and_copy_string(char **name, unsigned int maxlen)
++{
++ unsigned int len = strnlen_user(*name, maxlen);
++ char *tmp;
++
++ if (!len || len >= maxlen)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, *name, len))
++ return -EFAULT;
++
++ tmp[len-1] = '\0';
++ *name = tmp;
++
++ return 0;
++}
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++ struct acl_object_label *g_tmp, **guser;
++ int error;
++
++ if (obj->globbed == NULL)
++ return 0;
++
++ guser = &obj->globbed;
++ while (*guser) {
++ g_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ if (g_tmp == NULL)
++ return -ENOMEM;
++
++ if (copy_acl_object_label(g_tmp, *guser))
++ return -EFAULT;
++
++ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
++ if (error)
++ return error;
++
++ *guser = g_tmp;
++ guser = &(g_tmp->next);
++ }
++
++ return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++ struct acl_role_label *role)
++{
++ struct acl_object_label *o_tmp;
++ int ret;
++
++ while (userp) {
++ if ((o_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label))) == NULL)
++ return -ENOMEM;
++
++ if (copy_acl_object_label(o_tmp, userp))
++ return -EFAULT;
++
++ userp = o_tmp->prev;
++
++ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
++ if (ret)
++ return ret;
++
++ insert_acl_obj_label(o_tmp, subj);
++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
++ return -ENOMEM;
++
++ ret = copy_user_glob(o_tmp);
++ if (ret)
++ return ret;
++
++ if (o_tmp->nested) {
++ int already_copied;
++
++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
++ if (IS_ERR(o_tmp->nested))
++ return PTR_ERR(o_tmp->nested);
++
++ /* insert into nested subject list if we haven't copied this one yet
++ to prevent duplicate entries */
++ if (!already_copied) {
++ o_tmp->nested->next = role->hash->first;
++ role->hash->first = o_tmp->nested;
++ }
++ }
++ }
++
++ return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++ struct acl_subject_label s_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_acl_subject_label(&s_tmp, userp))
++ break;
++
++ userp = s_tmp.prev;
++ }
++
++ return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++ ruserip = rolep->allowed_ips;
++
++ while (ruserip) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_allowed_ip *)
++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++ return -ENOMEM;
++
++ if (copy_role_allowed_ip(rtmp, ruserip))
++ return -EFAULT;
++
++ ruserip = rtmp->prev;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->allowed_ips = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!ruserip)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++ struct role_transition *rusertp, *rtmp = NULL, *rlast;
++ int error;
++
++ rusertp = rolep->transitions;
++
++ while (rusertp) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_transition *)
++ acl_alloc(sizeof (struct role_transition))) == NULL)
++ return -ENOMEM;
++
++ if (copy_role_transition(rtmp, rusertp))
++ return -EFAULT;
++
++ rusertp = rtmp->prev;
++
++ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
++ if (error)
++ return error;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->transitions = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!rusertp)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static __u32 count_user_objs(const struct acl_object_label __user *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_acl_object_label(&o_tmp, userp))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
++{
++ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++ __u32 num_objs;
++ struct acl_ip_label **i_tmp, *i_utmp2;
++ struct gr_hash_struct ghash;
++ struct subject_map *subjmap;
++ unsigned int i_num;
++ int err;
++
++ if (already_copied != NULL)
++ *already_copied = 0;
++
++ s_tmp = lookup_subject_map(userp);
++
++ /* we've already copied this subject into the kernel, just return
++ the reference to it, and don't copy it over again
++ */
++ if (s_tmp) {
++ if (already_copied != NULL)
++ *already_copied = 1;
++ return(s_tmp);
++ }
++
++ if ((s_tmp = (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++ if (subjmap == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap->user = userp;
++ subjmap->kernel = s_tmp;
++ insert_subj_map_entry(subjmap);
++
++ if (copy_acl_subject_label(s_tmp, userp))
++ return ERR_PTR(-EFAULT);
++
++ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
++ if (err)
++ return ERR_PTR(err);
++
++ if (!strcmp(s_tmp->filename, "/"))
++ role->root_label = s_tmp;
++
++ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
++ return ERR_PTR(-EFAULT);
++
++ /* copy user and group transition tables */
++
++ if (s_tmp->user_trans_num) {
++ uid_t *uidlist;
++
++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
++ if (uidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->user_transitions = uidlist;
++ }
++
++ if (s_tmp->group_trans_num) {
++ gid_t *gidlist;
++
++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
++ if (gidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->group_transitions = gidlist;
++ }
++
++ /* set up object hash table */
++ num_objs = count_user_objs(ghash.first);
++
++ s_tmp->obj_hash_size = num_objs;
++ s_tmp->obj_hash =
++ (struct acl_object_label **)
++ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
++
++ if (!s_tmp->obj_hash)
++ return ERR_PTR(-ENOMEM);
++
++ memset(s_tmp->obj_hash, 0,
++ s_tmp->obj_hash_size *
++ sizeof (struct acl_object_label *));
++
++ /* add in objects */
++ err = copy_user_objs(ghash.first, s_tmp, role);
++
++ if (err)
++ return ERR_PTR(err);
++
++ /* set pointer for parent subject */
++ if (s_tmp->parent_subject) {
++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
++
++ if (IS_ERR(s_tmp2))
++ return s_tmp2;
++
++ s_tmp->parent_subject = s_tmp2;
++ }
++
++ /* add in ip acls */
++
++ if (!s_tmp->ip_num) {
++ s_tmp->ips = NULL;
++ goto insert;
++ }
++
++ i_tmp =
++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
++ sizeof (struct acl_ip_label *));
++
++ if (!i_tmp)
++ return ERR_PTR(-ENOMEM);
++
++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++ *(i_tmp + i_num) =
++ (struct acl_ip_label *)
++ acl_alloc(sizeof (struct acl_ip_label));
++ if (!*(i_tmp + i_num))
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
++ return ERR_PTR(-EFAULT);
++
++ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
++ return ERR_PTR(-EFAULT);
++
++ if ((*(i_tmp + i_num))->iface == NULL)
++ continue;
++
++ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
++ if (err)
++ return ERR_PTR(err);
++ }
++
++ s_tmp->ips = i_tmp;
++
++insert:
++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
++ return ERR_PTR(-ENOMEM);
++
++ return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label s_pre;
++ struct acl_subject_label * ret;
++ int err;
++
++ while (userp) {
++ if (copy_acl_subject_label(&s_pre, userp))
++ return -EFAULT;
++
++ ret = do_copy_user_subj(userp, role, NULL);
++
++ err = PTR_ERR(ret);
++ if (IS_ERR(ret))
++ return err;
++
++ insert_acl_subj_label(ret, role);
++
++ userp = s_pre.prev;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++ struct acl_subject_label *subj_list;
++ struct sprole_pw *sptmp;
++ struct gr_hash_struct *ghash;
++ uid_t *domainlist;
++ unsigned int r_num;
++ int err = 0;
++ __u16 i;
++ __u32 num_subjs;
++
++ /* we need a default and kernel role */
++ if (arg->role_db.num_roles < 2)
++ return -EINVAL;
++
++ /* copy special role authentication info from userspace */
++
++ polstate->num_sprole_pws = arg->num_sprole_pws;
++ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
++
++ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
++ return -ENOMEM;
++
++ for (i = 0; i < polstate->num_sprole_pws; i++) {
++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++ if (!sptmp)
++ return -ENOMEM;
++ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
++ return -EFAULT;
++
++ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
++ if (err)
++ return err;
++
++#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
++ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
++#endif
++
++ polstate->acl_special_roles[i] = sptmp;
++ }
++
++ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
++ r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++ if (!r_tmp)
++ return -ENOMEM;
++
++ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
++ return -EFAULT;
++
++ if (copy_acl_role_label(r_tmp, r_utmp2))
++ return -EFAULT;
++
++ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
++ if (err)
++ return err;
++
++ if (!strcmp(r_tmp->rolename, "default")
++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++ polstate->default_role = r_tmp;
++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++ polstate->kernel_role = r_tmp;
++ }
++
++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
++ return -ENOMEM;
++
++ if (copy_gr_hash_struct(ghash, r_tmp->hash))
++ return -EFAULT;
++
++ r_tmp->hash = ghash;
++
++ num_subjs = count_user_subjs(r_tmp->hash->first);
++
++ r_tmp->subj_hash_size = num_subjs;
++ r_tmp->subj_hash =
++ (struct acl_subject_label **)
++ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
++
++ if (!r_tmp->subj_hash)
++ return -ENOMEM;
++
++ err = copy_user_allowedips(r_tmp);
++ if (err)
++ return err;
++
++ /* copy domain info */
++ if (r_tmp->domain_children != NULL) {
++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
++ if (domainlist == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
++ return -EFAULT;
++
++ r_tmp->domain_children = domainlist;
++ }
++
++ err = copy_user_transitions(r_tmp);
++ if (err)
++ return err;
++
++ memset(r_tmp->subj_hash, 0,
++ r_tmp->subj_hash_size *
++ sizeof (struct acl_subject_label *));
++
++ /* acquire the list of subjects, then NULL out
++ the list prior to parsing the subjects for this role,
++ as during this parsing the list is replaced with a list
++ of *nested* subjects for the role
++ */
++ subj_list = r_tmp->hash->first;
++
++ /* set nested subject list to null */
++ r_tmp->hash->first = NULL;
++
++ err = copy_user_subjs(subj_list, r_tmp);
++
++ if (err)
++ return err;
++
++ insert_acl_role_label(r_tmp);
++ }
++
++ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
++ return -EINVAL;
++
++ return err;
++}
++
++static int gracl_reload_apply_policies(void *reload)
++{
++ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
++ struct task_struct *task, *task2;
++ struct acl_role_label *role, *rtmp;
++ struct acl_subject_label *subj;
++ const struct cred *cred;
++ int role_applied;
++ int ret = 0;
++
++ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
++ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
++
++ /* first make sure we'll be able to apply the new policy cleanly */
++ do_each_thread(task2, task) {
++ if (task->exec_file == NULL)
++ continue;
++ role_applied = 0;
++ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
++ /* preserve special roles */
++ FOR_EACH_ROLE_START(role)
++ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
++ rtmp = task->role;
++ task->role = role;
++ role_applied = 1;
++ break;
++ }
++ FOR_EACH_ROLE_END(role)
++ }
++ if (!role_applied) {
++ cred = __task_cred(task);
++ rtmp = task->role;
++ task->role = __lookup_acl_role_label(polstate, task, cred->uid, cred->gid);
++ }
++ /* this handles non-nested inherited subjects, nested subjects will still
++ be dropped currently */
++ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
++ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
++ /* change the role back so that we've made no modifications to the policy */
++ task->role = rtmp;
++
++ if (subj == NULL || task->tmpacl == NULL) {
++ ret = -EINVAL;
++ goto out;
++ }
++ } while_each_thread(task2, task);
++
++ /* now actually apply the policy */
++
++ do_each_thread(task2, task) {
++ if (task->exec_file) {
++ role_applied = 0;
++ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
++ /* preserve special roles */
++ FOR_EACH_ROLE_START(role)
++ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
++ task->role = role;
++ role_applied = 1;
++ break;
++ }
++ FOR_EACH_ROLE_END(role)
++ }
++ if (!role_applied) {
++ cred = __task_cred(task);
++ task->role = __lookup_acl_role_label(polstate, task, cred->uid, cred->gid);
++ }
++ /* this handles non-nested inherited subjects, nested subjects will still
++ be dropped currently */
++ if (!reload_state->oldmode && task->inherited)
++ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
++ else {
++ /* looked up and tagged to the task previously */
++ subj = task->tmpacl;
++ }
++ /* subj will be non-null */
++ __gr_apply_subject_to_task(polstate, task, subj);
++ if (reload_state->oldmode) {
++ task->acl_role_id = 0;
++ task->acl_sp_role = 0;
++ task->inherited = 0;
++ }
++ } else {
++ // it's a kernel process
++ task->role = polstate->kernel_role;
++ task->acl = polstate->kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++ task->acl->mode &= ~GR_PROCFIND;
++#endif
++ }
++ } while_each_thread(task2, task);
++
++ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
++ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
++
++out:
++
++ return ret;
++}
++
++static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
++{
++ struct gr_reload_state new_reload_state = { };
++ int err;
++
++ new_reload_state.oldpolicy_ptr = polstate;
++ new_reload_state.oldalloc_ptr = current_alloc_state;
++ new_reload_state.oldmode = oldmode;
++
++ current_alloc_state = &new_reload_state.newalloc;
++ polstate = &new_reload_state.newpolicy;
++
++ /* everything relevant is now saved off, copy in the new policy */
++ if (init_variables(args, true)) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++ err = -ENOMEM;
++ goto error;
++ }
++
++ err = copy_user_acl(args);
++ free_init_variables();
++ if (err)
++ goto error;
++ /* the new policy is copied in, with the old policy available via saved_state
++ first go through applying roles, making sure to preserve special roles
++ then apply new subjects, making sure to preserve inherited and nested subjects,
++ though currently only inherited subjects will be preserved
++ */
++ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
++ if (err)
++ goto error;
++
++ /* we've now applied the new policy, so restore the old policy state to free it */
++ polstate = &new_reload_state.oldpolicy;
++ current_alloc_state = &new_reload_state.oldalloc;
++ free_variables(true);
++
++ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
++ to running_polstate/current_alloc_state inside stop_machine
++ */
++ err = 0;
++ goto out;
++error:
++ /* on error of loading the new policy, we'll just keep the previous
++ policy set around
++ */
++ free_variables(true);
++
++ /* doesn't affect runtime, but maintains consistent state */
++out:
++ polstate = new_reload_state.oldpolicy_ptr;
++ current_alloc_state = new_reload_state.oldalloc_ptr;
++
++ return err;
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++ int error = 0;
++
++ memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
++ memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
++
++ if (init_variables(args, false)) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
++ error = -ENOMEM;
++ goto out;
++ }
++
++ error = copy_user_acl(args);
++ free_init_variables();
++ if (error)
++ goto out;
++
++ error = gr_set_acls(0);
++ if (error)
++ goto out;
++
++ gr_enable_rbac_system();
++
++ return 0;
++
++out:
++ free_variables(false);
++ return error;
++}
++
++static int
++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
++ unsigned char **sum)
++{
++ struct acl_role_label *r;
++ struct role_allowed_ip *ipp;
++ struct role_transition *trans;
++ unsigned int i;
++ int found = 0;
++ u32 curr_ip = current->signal->curr_ip;
++
++ current->signal->saved_ip = curr_ip;
++
++ /* check transition table */
++
++ for (trans = current->role->transitions; trans; trans = trans->next) {
++ if (!strcmp(rolename, trans->rolename)) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found)
++ return 0;
++
++ /* handle special roles that do not require authentication
++ and check ip */
++
++ FOR_EACH_ROLE_START(r)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ found = 0;
++ if (r->allowed_ips != NULL) {
++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
++ if ((ntohl(curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask))
++ found = 1;
++ }
++ } else
++ found = 2;
++ if (!found)
++ return 0;
++
++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
++ *salt = NULL;
++ *sum = NULL;
++ return 1;
++ }
++ }
++ FOR_EACH_ROLE_END(r)
++
++ for (i = 0; i < polstate->num_sprole_pws; i++) {
++ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
++ *salt = polstate->acl_special_roles[i]->salt;
++ *sum = polstate->acl_special_roles[i]->sum;
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++int gr_check_secure_terminal(struct task_struct *task)
++{
++ struct task_struct *p, *p2, *p3;
++ struct files_struct *files;
++ struct fdtable *fdt;
++ struct file *our_file = NULL, *file;
++ int i;
++
++ if (task->signal->tty == NULL)
++ return 1;
++
++ files = get_files_struct(task);
++ if (files != NULL) {
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ for (i=0; i < fdt->max_fds; i++) {
++ file = fcheck_files(files, i);
++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
++ get_file(file);
++ our_file = file;
++ }
++ }
++ rcu_read_unlock();
++ put_files_struct(files);
++ }
++
++ if (our_file == NULL)
++ return 1;
++
++ read_lock(&tasklist_lock);
++ do_each_thread(p2, p) {
++ files = get_files_struct(p);
++ if (files == NULL ||
++ (p->signal && p->signal->tty == task->signal->tty)) {
++ if (files != NULL)
++ put_files_struct(files);
++ continue;
++ }
++ rcu_read_lock();
++ fdt = files_fdtable(files);
++ for (i=0; i < fdt->max_fds; i++) {
++ file = fcheck_files(files, i);
++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
++ p3 = task;
++ while (task_pid_nr(p3) > 0) {
++ if (p3 == p)
++ break;
++ p3 = p3->real_parent;
++ }
++ if (p3 == p)
++ break;
++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
++ gr_handle_alertkill(p);
++ rcu_read_unlock();
++ put_files_struct(files);
++ read_unlock(&tasklist_lock);
++ fput(our_file);
++ return 0;
++ }
++ }
++ rcu_read_unlock();
++ put_files_struct(files);
++ } while_each_thread(p2, p);
++ read_unlock(&tasklist_lock);
++
++ fput(our_file);
++ return 1;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
++{
++ struct gr_arg_wrapper uwrap;
++ unsigned char *sprole_salt = NULL;
++ unsigned char *sprole_sum = NULL;
++ int error = 0;
++ int error2 = 0;
++ size_t req_count = 0;
++ unsigned char oldmode = 0;
++
++ mutex_lock(&gr_dev_mutex);
++
++ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
++ error = -EPERM;
++ goto out;
++ }
++
++#ifdef CONFIG_COMPAT
++ pax_open_kernel();
++ if (is_compat_task()) {
++ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
++ copy_gr_arg = &copy_gr_arg_compat;
++ copy_acl_object_label = &copy_acl_object_label_compat;
++ copy_acl_subject_label = &copy_acl_subject_label_compat;
++ copy_acl_role_label = &copy_acl_role_label_compat;
++ copy_acl_ip_label = &copy_acl_ip_label_compat;
++ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
++ copy_role_transition = &copy_role_transition_compat;
++ copy_sprole_pw = &copy_sprole_pw_compat;
++ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
++ copy_pointer_from_array = &copy_pointer_from_array_compat;
++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
++ } else {
++ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
++ copy_gr_arg = &copy_gr_arg_normal;
++ copy_acl_object_label = &copy_acl_object_label_normal;
++ copy_acl_subject_label = &copy_acl_subject_label_normal;
++ copy_acl_role_label = &copy_acl_role_label_normal;
++ copy_acl_ip_label = &copy_acl_ip_label_normal;
++ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
++ copy_role_transition = &copy_role_transition_normal;
++ copy_sprole_pw = &copy_sprole_pw_normal;
++ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
++ copy_pointer_from_array = &copy_pointer_from_array_normal;
++ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
++ }
++ pax_close_kernel();
++#endif
++
++ req_count = get_gr_arg_wrapper_size();
++
++ if (count != req_count) {
++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
++ error = -EINVAL;
++ goto out;
++ }
++
++
++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
++ gr_auth_expires = 0;
++ gr_auth_attempts = 0;
++ }
++
++ error = copy_gr_arg_wrapper(buf, &uwrap);
++ if (error)
++ goto out;
++
++ error = copy_gr_arg(uwrap.arg, &gr_usermode);
++ if (error)
++ goto out;
++
++ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++ time_after(gr_auth_expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ /* if non-root trying to do anything other than use a special role,
++ do not attempt authentication, do not count towards authentication
++ locking
++ */
++
++ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
++ gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
++ current_uid()) {
++ error = -EPERM;
++ goto out;
++ }
++
++ /* ensure pw and special role name are null terminated */
++
++ gr_usermode.pw[GR_PW_LEN - 1] = '\0';
++ gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++ /* Okay.
++ * We have our enough of the argument structure..(we have yet
++ * to copy_from_user the tables themselves) . Copy the tables
++ * only if we need them, i.e. for loading operations. */
++
++ switch (gr_usermode.mode) {
++ case GR_STATUS:
++ if (gr_acl_is_enabled()) {
++ error = 1;
++ if (!gr_check_secure_terminal(current))
++ error = 3;
++ } else
++ error = 2;
++ goto out;
++ case GR_SHUTDOWN:
++ if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
++ stop_machine(gr_rbac_disable, NULL, NULL);
++ free_variables(false);
++ memset(&gr_usermode, 0, sizeof(gr_usermode));
++ memset(&gr_system_salt, 0, sizeof(gr_system_salt));
++ memset(&gr_system_sum, 0, sizeof(gr_system_sum));
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
++ } else if (gr_acl_is_enabled()) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
++ error = -EPERM;
++ } else {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
++ error = -EAGAIN;
++ }
++ break;
++ case GR_ENABLE:
++ if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
++ else {
++ if (gr_acl_is_enabled())
++ error = -EAGAIN;
++ else
++ error = error2;
++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
++ }
++ break;
++ case GR_OLDRELOAD:
++ oldmode = 1;
++ case GR_RELOAD:
++ if (!gr_acl_is_enabled()) {
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
++ error = -EAGAIN;
++ } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
++ error2 = gracl_reload(&gr_usermode, oldmode);
++ if (!error2)
++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
++ else {
++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++ error = error2;
++ }
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
++ error = -EPERM;
++ }
++ break;
++ case GR_SEGVMOD:
++ if (unlikely(!gr_acl_is_enabled())) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
++ if (gr_usermode.segv_device && gr_usermode.segv_inode) {
++ struct acl_subject_label *segvacl;
++ segvacl =
++ lookup_acl_subj_label(gr_usermode.segv_inode,
++ gr_usermode.segv_device,
++ current->role);
++ if (segvacl) {
++ segvacl->crashes = 0;
++ segvacl->expires = 0;
++ }
++ } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
++ gr_remove_uid(gr_usermode.segv_uid);
++ }
++ } else {
++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
++ error = -EPERM;
++ }
++ break;
++ case GR_SPROLE:
++ case GR_SPROLEPAM:
++ if (unlikely(!gr_acl_is_enabled())) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
++ current->role->expires = 0;
++ current->role->auth_attempts = 0;
++ }
++
++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
++ time_after(current->role->expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if (lookup_special_role_auth
++ (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
++ && ((!sprole_salt && !sprole_sum)
++ || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
++ char *p = "";
++ assign_special_role(gr_usermode.sp_role);
++ read_lock(&tasklist_lock);
++ if (current->real_parent)
++ p = current->real_parent->role->rolename;
++ read_unlock(&tasklist_lock);
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
++ p, acl_sp_role_value);
++ } else {
++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
++ error = -EPERM;
++ if(!(current->role->auth_attempts++))
++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++ goto out;
++ }
++ break;
++ case GR_UNSPROLE:
++ if (unlikely(!gr_acl_is_enabled())) {
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->roletype & GR_ROLE_SPECIAL) {
++ char *p = "";
++ int i = 0;
++
++ read_lock(&tasklist_lock);
++ if (current->real_parent) {
++ p = current->real_parent->role->rolename;
++ i = current->real_parent->acl_role_id;
++ }
++ read_unlock(&tasklist_lock);
++
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
++ gr_set_acls(1);
++ } else {
++ error = -EPERM;
++ goto out;
++ }
++ break;
++ default:
++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
++ error = -EINVAL;
++ break;
++ }
++
++ if (error != -EPERM)
++ goto out;
++
++ if(!(gr_auth_attempts++))
++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++
++ out:
++ mutex_unlock(&gr_dev_mutex);
++
++ if (!error)
++ error = req_count;
++
++ return error;
++}
++
++int
++gr_set_acls(const int type)
++{
++ struct task_struct *task, *task2;
++ struct acl_role_label *role = current->role;
++ struct acl_subject_label *subj;
++ __u16 acl_role_id = current->acl_role_id;
++ const struct cred *cred;
++ int ret;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ do_each_thread(task2, task) {
++ /* check to see if we're called from the exit handler,
++ if so, only replace ACLs that have inherited the admin
++ ACL */
++
++ if (type && (task->role != role ||
++ task->acl_role_id != acl_role_id))
++ continue;
++
++ task->acl_role_id = 0;
++ task->acl_sp_role = 0;
++ task->inherited = 0;
++
++ if (task->exec_file) {
++ cred = __task_cred(task);
++ task->role = __lookup_acl_role_label(polstate, task, cred->uid, cred->gid);
++ subj = __gr_get_subject_for_task(polstate, task, NULL);
++ if (subj == NULL) {
++ ret = -EINVAL;
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
++ return ret;
++ }
++ __gr_apply_subject_to_task(polstate, task, subj);
++ } else {
++ // it's a kernel process
++ task->role = polstate->kernel_role;
++ task->acl = polstate->kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++ task->acl->mode &= ~GR_PROCFIND;
++#endif
++ }
++ } while_each_thread(task2, task);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ return 0;
++}
+diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
+new file mode 100644
+index 0000000..39645c9
+--- /dev/null
++++ b/grsecurity/gracl_res.c
+@@ -0,0 +1,68 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[] = {
++ [RLIMIT_CPU] = "RLIMIT_CPU",
++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
++ [RLIMIT_DATA] = "RLIMIT_DATA",
++ [RLIMIT_STACK] = "RLIMIT_STACK",
++ [RLIMIT_CORE] = "RLIMIT_CORE",
++ [RLIMIT_RSS] = "RLIMIT_RSS",
++ [RLIMIT_NPROC] = "RLIMIT_NPROC",
++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
++ [RLIMIT_AS] = "RLIMIT_AS",
++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
++ [RLIMIT_NICE] = "RLIMIT_NICE",
++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
++ [GR_CRASH_RES] = "RLIMIT_CRASH"
++};
++
++void
++gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ const struct cred *cred;
++ unsigned long rlim;
++
++ if (!gr_acl_is_enabled() && !grsec_resource_logging)
++ return;
++
++ // not yet supported resource
++ if (unlikely(!restab_log[res]))
++ return;
++
++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
++ rlim = task_rlimit_max(task, res);
++ else
++ rlim = task_rlimit(task, res);
++
++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
++ return;
++
++ rcu_read_lock();
++ cred = __task_cred(task);
++
++ if (res == RLIMIT_NPROC &&
++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
++ goto out_rcu_unlock;
++ else if (res == RLIMIT_MEMLOCK &&
++ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
++ goto out_rcu_unlock;
++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
++ goto out_rcu_unlock;
++ rcu_read_unlock();
++
++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
++
++ return;
++out_rcu_unlock:
++ rcu_read_unlock();
++ return;
++}
+diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
+new file mode 100644
+index 0000000..266766a
+--- /dev/null
++++ b/grsecurity/gracl_segv.c
+@@ -0,0 +1,309 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++#include <linux/magic.h>
++#include <linux/pagemap.h>
++#include "../fs/btrfs/async-thread.h"
++#include "../fs/btrfs/ctree.h"
++#include "../fs/btrfs/btrfs_inode.h"
++#endif
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static DEFINE_SPINLOCK(gr_uid_lock);
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
++ struct acl_role_label *role);
++
++static inline dev_t __get_dev(const struct dentry *dentry)
++{
++#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
++ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
++ return BTRFS_I(dentry->d_inode)->root->anon_dev;
++ else
++#endif
++ return dentry->d_sb->s_dev;
++}
++
++int
++gr_init_uidset(void)
++{
++ uid_set =
++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++ uid_used = 0;
++
++ return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++ if (uid_set) {
++ struct crash_uid *tmpset;
++ spin_lock(&gr_uid_lock);
++ tmpset = uid_set;
++ uid_set = NULL;
++ uid_used = 0;
++ spin_unlock(&gr_uid_lock);
++ if (tmpset)
++ kfree(tmpset);
++ }
++
++ return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++ struct crash_uid *tmp = uid_set;
++ uid_t buid;
++ int low = 0, high = uid_used - 1, mid;
++
++ while (high >= low) {
++ mid = (low + high) >> 1;
++ buid = tmp[mid].uid;
++ if (buid == uid)
++ return mid;
++ if (buid > uid)
++ high = mid - 1;
++ if (buid < uid)
++ low = mid + 1;
++ }
++
++ return -1;
++}
++
++static __inline__ void
++gr_insertsort(void)
++{
++ unsigned short i, j;
++ struct crash_uid index;
++
++ for (i = 1; i < uid_used; i++) {
++ index = uid_set[i];
++ j = i;
++ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++ uid_set[j] = uid_set[j - 1];
++ j--;
++ }
++ uid_set[j] = index;
++ }
++
++ return;
++}
++
++static __inline__ void
++gr_insert_uid(const uid_t uid, const unsigned long expires)
++{
++ int loc;
++
++ if (uid_used == GR_UIDTABLE_MAX)
++ return;
++
++ loc = gr_find_uid(uid);
++
++ if (loc >= 0) {
++ uid_set[loc].expires = expires;
++ return;
++ }
++
++ uid_set[uid_used].uid = uid;
++ uid_set[uid_used].expires = expires;
++ uid_used++;
++
++ gr_insertsort();
++
++ return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++ unsigned short i;
++
++ for (i = loc + 1; i < uid_used; i++)
++ uid_set[i - 1] = uid_set[i];
++
++ uid_used--;
++
++ return;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ int loc;
++ int ret = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ spin_lock(&gr_uid_lock);
++ loc = gr_find_uid(uid);
++
++ if (loc < 0)
++ goto out_unlock;
++
++ if (time_before_eq(uid_set[loc].expires, get_seconds()))
++ gr_remove_uid(loc);
++ else
++ ret = 1;
++
++out_unlock:
++ spin_unlock(&gr_uid_lock);
++ return ret;
++}
++
++static __inline__ int
++proc_is_setxid(const struct cred *cred)
++{
++ if (cred->uid != cred->euid || cred->uid != cred->suid ||
++ cred->uid != cred->fsuid)
++ return 1;
++ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
++ cred->gid != cred->fsgid)
++ return 1;
++
++ return 0;
++}
++
++extern int gr_fake_force_sig(int sig, struct task_struct *t);
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ struct acl_subject_label *curr;
++ struct task_struct *tsk, *tsk2;
++ const struct cred *cred;
++ const struct cred *cred2;
++
++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++ return;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curr = task->acl;
++
++ if (!(curr->resmask & (1U << GR_CRASH_RES)))
++ return;
++
++ if (time_before_eq(curr->expires, get_seconds())) {
++ curr->expires = 0;
++ curr->crashes = 0;
++ }
++
++ curr->crashes++;
++
++ if (!curr->expires)
++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds())) {
++ rcu_read_lock();
++ cred = __task_cred(task);
++ if (cred->uid && proc_is_setxid(cred)) {
++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++ spin_lock(&gr_uid_lock);
++ gr_insert_uid(cred->uid, curr->expires);
++ spin_unlock(&gr_uid_lock);
++ curr->expires = 0;
++ curr->crashes = 0;
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ cred2 = __task_cred(tsk);
++ if (tsk != task && cred2->uid == cred->uid)
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ } else {
++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ do_each_thread(tsk2, tsk) {
++ if (likely(tsk != task)) {
++ // if this thread has the same subject as the one that triggered
++ // RES_CRASH and it's the same binary, kill it
++ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
++ gr_fake_force_sig(SIGKILL, tsk);
++ }
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ }
++ rcu_read_unlock();
++ }
++
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_inode_lock);
++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
++ __get_dev(filp->f_path.dentry),
++ current->role);
++ read_unlock(&gr_inode_lock);
++
++ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
++ (!curr->crashes && !curr->expires))
++ return 0;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds()))
++ return 1;
++ else if (time_before_eq(curr->expires, get_seconds())) {
++ curr->crashes = 0;
++ curr->expires = 0;
++ }
++
++ return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++ struct acl_subject_label *curracl;
++ __u32 curr_ip;
++ struct task_struct *p, *p2;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curracl = task->acl;
++ curr_ip = task->signal->curr_ip;
++
++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
++ read_lock(&tasklist_lock);
++ do_each_thread(p2, p) {
++ if (p->signal->curr_ip == curr_ip)
++ gr_fake_force_sig(SIGKILL, p);
++ } while_each_thread(p2, p);
++ read_unlock(&tasklist_lock);
++ } else if (curracl->mode & GR_KILLPROC)
++ gr_fake_force_sig(SIGKILL, task);
++
++ return;
++}
+diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
+new file mode 100644
+index 0000000..9d83a69
+--- /dev/null
++++ b/grsecurity/gracl_shm.c
+@@ -0,0 +1,40 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ struct task_struct *task;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++
++ task = find_task_by_vpid(shm_cprid);
++
++ if (unlikely(!task))
++ task = find_task_by_vpid(shm_lapid);
++
++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
++ (task->pid == shm_lapid)) &&
++ (task->acl->mode & GR_PROTSHM) &&
++ (task->acl != current->acl))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
++ return 0;
++ }
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ return 1;
++}
+diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
+new file mode 100644
+index 0000000..bc0be01
+--- /dev/null
++++ b/grsecurity/grsec_chdir.c
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ if ((grsec_enable_chdir && grsec_enable_group &&
++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++ !grsec_enable_group)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
++ }
++#endif
++ return;
++}
+diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
+new file mode 100644
+index 0000000..6b654b0
+--- /dev/null
++++ b/grsecurity/grsec_chroot.c
+@@ -0,0 +1,353 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include <linux/pid_namespace.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++int gr_init_ran;
++#endif
++
++void gr_set_chroot_entries(struct task_struct *task, struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
++ path->dentry != task->nsproxy->mnt_ns->root->mnt_root
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++ && gr_init_ran
++#endif
++ )
++ task->gr_is_chrooted = 1;
++ else {
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++ if (task->pid == 1 && !gr_init_ran)
++ gr_init_ran = 1;
++#endif
++ task->gr_is_chrooted = 0;
++ }
++
++ task->gr_chroot_dentry = path->dentry;
++#endif
++ return;
++}
++
++void gr_clear_chroot_entries(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ task->gr_is_chrooted = 0;
++ task->gr_chroot_dentry = NULL;
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ struct task_struct *p;
++
++ if (unlikely(!grsec_enable_chroot_unix))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ p = find_task_by_vpid_unrestricted(pid);
++ if (unlikely(p && !have_same_root(current, p))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
++ return 0;
++ }
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++#endif
++ return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
++ && proc_is_chrooted(current)) {
++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ struct task_struct *p;
++ int ret = 0;
++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
++ return ret;
++
++ read_lock(&tasklist_lock);
++ do_each_pid_task(pid, type, p) {
++ if (!have_same_root(current, p)) {
++ ret = 1;
++ goto out;
++ }
++ } while_each_pid_task(pid, type, p);
++out:
++ read_unlock(&tasklist_lock);
++ return ret;
++#endif
++ return 0;
++}
++
++int
++gr_pid_is_chrooted(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
++ return 0;
++
++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
++ !have_same_root(current, p)) {
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++ struct path path, currentroot;
++ int ret = 0;
++
++ path.dentry = (struct dentry *)u_dentry;
++ path.mnt = (struct vfsmount *)u_mnt;
++ get_fs_root(current->fs, &currentroot);
++ if (path_is_under(&path, &currentroot))
++ ret = 1;
++ path_put(&currentroot);
++
++ return ret;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ if (!grsec_enable_chroot_fchdir)
++ return 1;
++
++ if (!proc_is_chrooted(current))
++ return 1;
++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
++ return 0;
++ }
++#endif
++ return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ struct task_struct *p;
++ time_t starttime;
++
++ if (unlikely(!grsec_enable_chroot_shmat))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++
++ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
++ starttime = p->start_time.tv_sec;
++ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
++ if (have_same_root(current, p)) {
++ goto allow;
++ } else {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++ return 0;
++ }
++ }
++ /* creator exited, pid reuse, fall through to next check */
++ }
++ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
++ if (unlikely(!have_same_root(current, p))) {
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
++ return 0;
++ }
++ }
++
++allow:
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++#endif
++ return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
++ proc_is_chrooted(current)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++ !gr_is_outside_chroot(dentry, mnt)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++extern const char *captab_log[];
++extern int captab_log_entries;
++
++int
++gr_chroot_is_capable(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++ if (cap_raised(chroot_caps, cap)) {
++ const struct cred *creds = current_cred();
++ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
++ }
++ return 0;
++ }
++ }
++#endif
++ return 1;
++}
++
++int
++gr_chroot_is_capable_nolog(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
++ if (cap_raised(chroot_caps, cap)) {
++ return 0;
++ }
++ }
++#endif
++ return 1;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
++ proc_is_chrooted(current))
++ return -EACCES;
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_chdir(struct path *path)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ if (grsec_enable_chroot_chdir)
++ set_fs_pwd(current->fs, path);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ /* allow chmod +s on directories, but not files */
++ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
++ proc_is_chrooted(current)) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
+new file mode 100644
+index 0000000..91cef85
+--- /dev/null
++++ b/grsecurity/grsec_disabled.c
+@@ -0,0 +1,441 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_initial_flags(struct linux_binprm *bprm)
++{
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__u32
++gr_handle_sysctl(const struct ctl_table * table, const int op)
++{
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_TASKSTATS
++int gr_is_taskstats_denied(int pid)
++{
++ return 0;
++}
++#endif
++
++int
++gr_acl_is_enabled(void)
++{
++ return 0;
++}
++
++void
++gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
++{
++ return;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++ return 0;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ return;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ return 0;
++}
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ return 0;
++}
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ return;
++}
++
++int
++gr_set_acls(const int type)
++{
++ return 0;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ return 0;
++}
++
++int
++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
++{
++ return 0;
++}
++
++void
++gr_copy_label(struct task_struct *tsk)
++{
++ return;
++}
++
++void
++gr_set_pax_flags(struct task_struct *task)
++{
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
++ const int unsafe_share)
++{
++ return 0;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ return 0;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ return 0;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ return;
++}
++
++int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++ return 1;
++}
++
++int
++gr_search_connectbind(const int mode, const struct socket *sock,
++ const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++void
++gr_handle_alertkill(struct task_struct *task)
++{
++ return;
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ int acc_mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++ unsigned int *vm_flags)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry,
++ const struct vfsmount * mnt, const int fmode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ umode_t *mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++void
++grsecurity_init(void)
++{
++ return;
++}
++
++umode_t gr_acl_umask(void)
++{
++ return 0;
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ return 1;
++}
++
++int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct inode *old_parent_inode,
++ const struct vfsmount *old_mnt, const char *newname)
++{
++ return 0;
++}
++
++int
++gr_acl_handle_filldir(const struct file *file, const char *name,
++ const int namelen, const ino_t ino)
++{
++ return 1;
++}
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ return 1;
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++int
++gr_search_accept(const struct socket *sock)
++{
++ return 0;
++}
++
++int
++gr_search_listen(const struct socket *sock)
++{
++ return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
++ const int imode)
++{
++ return 1;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ return;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ return 1;
++}
++
++void
++gr_set_role_label(const uid_t uid, const gid_t gid)
++{
++ return;
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ return 0;
++}
++
++int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ return 0;
++}
++
++int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ return 0;
++}
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++int gr_acl_enable_at_secure(void)
++{
++ return 0;
++}
++
++dev_t gr_get_dev_from_dentry(struct dentry *dentry)
++{
++ return dentry->d_sb->s_dev;
++}
++
++void gr_put_exec_file(struct task_struct *task)
++{
++ return;
++}
++
++EXPORT_SYMBOL(gr_learn_resource);
++#ifdef CONFIG_SECURITY
++EXPORT_SYMBOL(gr_check_user_change);
++EXPORT_SYMBOL(gr_check_group_change);
++#endif
+diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
+new file mode 100644
+index 0000000..ee1f60f
+--- /dev/null
++++ b/grsecurity/grsec_exec.c
+@@ -0,0 +1,159 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++#include <linux/module.h>
++#include <linux/compat.h>
++
++#include <asm/uaccess.h>
++
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++static char gr_exec_arg_buf[132];
++static DEFINE_MUTEX(gr_exec_arg_mutex);
++#endif
++
++struct user_arg_ptr {
++#ifdef CONFIG_COMPAT
++ bool is_compat;
++#endif
++ union {
++ const char __user *const __user *native;
++#ifdef CONFIG_COMPAT
++ const compat_uptr_t __user *compat;
++#endif
++ } ptr;
++};
++
++extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ char *grarg = gr_exec_arg_buf;
++ unsigned int i, x, execlen = 0;
++ char c;
++
++ if (!((grsec_enable_execlog && grsec_enable_group &&
++ in_group_p(grsec_audit_gid))
++ || (grsec_enable_execlog && !grsec_enable_group)))
++ return;
++
++ mutex_lock(&gr_exec_arg_mutex);
++ memset(grarg, 0, sizeof(gr_exec_arg_buf));
++
++ for (i = 0; i < bprm->argc && execlen < 128; i++) {
++ const char __user *p;
++ unsigned int len;
++
++ p = get_user_arg_ptr(argv, i);
++ if (IS_ERR(p))
++ goto log;
++
++ len = strnlen_user(p, 128 - execlen);
++ if (len > 128 - execlen)
++ len = 128 - execlen;
++ else if (len > 0)
++ len--;
++ if (copy_from_user(grarg + execlen, p, len))
++ goto log;
++
++ /* rewrite unprintable characters */
++ for (x = 0; x < len; x++) {
++ c = *(grarg + execlen + x);
++ if (c < 32 || c > 126)
++ *(grarg + execlen + x) = ' ';
++ }
++
++ execlen += len;
++ *(grarg + execlen) = ' ';
++ *(grarg + execlen + 1) = '\0';
++ execlen++;
++ }
++
++ log:
++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
++ bprm->file->f_path.mnt, grarg);
++ mutex_unlock(&gr_exec_arg_mutex);
++#endif
++ return;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int gr_acl_is_capable(const int cap);
++extern int gr_acl_is_capable_nolog(const int cap);
++extern int gr_chroot_is_capable(const int cap);
++extern int gr_chroot_is_capable_nolog(const int cap);
++#endif
++
++const char *captab_log[] = {
++ "CAP_CHOWN",
++ "CAP_DAC_OVERRIDE",
++ "CAP_DAC_READ_SEARCH",
++ "CAP_FOWNER",
++ "CAP_FSETID",
++ "CAP_KILL",
++ "CAP_SETGID",
++ "CAP_SETUID",
++ "CAP_SETPCAP",
++ "CAP_LINUX_IMMUTABLE",
++ "CAP_NET_BIND_SERVICE",
++ "CAP_NET_BROADCAST",
++ "CAP_NET_ADMIN",
++ "CAP_NET_RAW",
++ "CAP_IPC_LOCK",
++ "CAP_IPC_OWNER",
++ "CAP_SYS_MODULE",
++ "CAP_SYS_RAWIO",
++ "CAP_SYS_CHROOT",
++ "CAP_SYS_PTRACE",
++ "CAP_SYS_PACCT",
++ "CAP_SYS_ADMIN",
++ "CAP_SYS_BOOT",
++ "CAP_SYS_NICE",
++ "CAP_SYS_RESOURCE",
++ "CAP_SYS_TIME",
++ "CAP_SYS_TTY_CONFIG",
++ "CAP_MKNOD",
++ "CAP_LEASE",
++ "CAP_AUDIT_WRITE",
++ "CAP_AUDIT_CONTROL",
++ "CAP_SETFCAP",
++ "CAP_MAC_OVERRIDE",
++ "CAP_MAC_ADMIN",
++ "CAP_SYSLOG",
++ "CAP_WAKE_ALARM"
++};
++
++int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
++
++int gr_is_capable(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
++ return 1;
++ return 0;
++#else
++ return 1;
++#endif
++}
++
++int gr_is_capable_nolog(const int cap)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
++ return 1;
++ return 0;
++#else
++ return 1;
++#endif
++}
++
++EXPORT_SYMBOL(gr_is_capable);
++EXPORT_SYMBOL(gr_is_capable_nolog);
+diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
+new file mode 100644
+index 0000000..d3ee748
+--- /dev/null
++++ b/grsecurity/grsec_fifo.c
+@@ -0,0 +1,24 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
++ (cred->fsuid != dentry->d_inode->i_uid)) {
++ if (!inode_permission(dentry->d_inode, acc_mode))
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
+new file mode 100644
+index 0000000..8ca18bf
+--- /dev/null
++++ b/grsecurity/grsec_fork.c
+@@ -0,0 +1,23 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/errno.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
++ switch (retval) {
++ case -EAGAIN:
++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
++ break;
++ case -ENOMEM:
++ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
++ break;
++ }
++ }
++#endif
++ return;
++}
+diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
+new file mode 100644
+index 0000000..264a9f3
+--- /dev/null
++++ b/grsecurity/grsec_init.c
+@@ -0,0 +1,268 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++int grsec_enable_ptrace_readexec;
++int grsec_enable_setxid;
++int grsec_enable_symlinkown;
++int grsec_symlinkown_gid;
++int grsec_enable_brute;
++int grsec_enable_link;
++int grsec_enable_dmesg;
++int grsec_enable_harden_ptrace;
++int grsec_enable_fifo;
++int grsec_enable_execlog;
++int grsec_enable_signal;
++int grsec_enable_forkfail;
++int grsec_enable_audit_ptrace;
++int grsec_enable_time;
++int grsec_enable_group;
++int grsec_audit_gid;
++int grsec_enable_chdir;
++int grsec_enable_mount;
++int grsec_enable_rofs;
++int grsec_deny_new_usb;
++int grsec_enable_chroot_findtask;
++int grsec_enable_chroot_mount;
++int grsec_enable_chroot_shmat;
++int grsec_enable_chroot_fchdir;
++int grsec_enable_chroot_double;
++int grsec_enable_chroot_pivot;
++int grsec_enable_chroot_chdir;
++int grsec_enable_chroot_chmod;
++int grsec_enable_chroot_mknod;
++int grsec_enable_chroot_nice;
++int grsec_enable_chroot_execlog;
++int grsec_enable_chroot_caps;
++int grsec_enable_chroot_sysctl;
++int grsec_enable_chroot_unix;
++int grsec_enable_tpe;
++int grsec_tpe_gid;
++int grsec_enable_blackhole;
++#ifdef CONFIG_IPV6_MODULE
++EXPORT_SYMBOL(grsec_enable_blackhole);
++#endif
++int grsec_lastack_retries;
++int grsec_enable_tpe_all;
++int grsec_enable_tpe_invert;
++int grsec_enable_socket_all;
++int grsec_socket_all_gid;
++int grsec_enable_socket_client;
++int grsec_socket_client_gid;
++int grsec_enable_socket_server;
++int grsec_socket_server_gid;
++int grsec_resource_logging;
++int grsec_disable_privio;
++int grsec_enable_log_rwxmaps;
++int grsec_lock;
++
++DEFINE_SPINLOCK(grsec_alert_lock);
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++DEFINE_SPINLOCK(grsec_audit_lock);
++
++DEFINE_RWLOCK(grsec_exec_file_lock);
++
++char *gr_shared_page[4];
++
++char *gr_alert_log_fmt;
++char *gr_audit_log_fmt;
++char *gr_alert_log_buf;
++char *gr_audit_log_buf;
++
++void __init
++grsecurity_init(void)
++{
++ int j;
++ /* create the per-cpu shared pages */
++
++#ifdef CONFIG_X86
++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
++#endif
++
++ for (j = 0; j < 4; j++) {
++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
++ if (gr_shared_page[j] == NULL) {
++ panic("Unable to allocate grsecurity shared page");
++ return;
++ }
++ }
++
++ /* allocate log buffers */
++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
++ if (!gr_alert_log_fmt) {
++ panic("Unable to allocate grsecurity alert log format buffer");
++ return;
++ }
++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
++ if (!gr_audit_log_fmt) {
++ panic("Unable to allocate grsecurity audit log format buffer");
++ return;
++ }
++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++ if (!gr_alert_log_buf) {
++ panic("Unable to allocate grsecurity alert log buffer");
++ return;
++ }
++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
++ if (!gr_audit_log_buf) {
++ panic("Unable to allocate grsecurity audit log buffer");
++ return;
++ }
++
++#ifdef CONFIG_GRKERNSEC_IO
++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
++ grsec_disable_privio = 1;
++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++ grsec_disable_privio = 1;
++#else
++ grsec_disable_privio = 0;
++#endif
++#endif
++
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ /* for backward compatibility, tpe_invert always defaults to on if
++ enabled in the kernel
++ */
++ grsec_enable_tpe_invert = 1;
++#endif
++
++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++ grsec_lock = 1;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ grsec_enable_log_rwxmaps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ grsec_enable_group = 1;
++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++ grsec_enable_ptrace_readexec = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ grsec_enable_harden_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ grsec_enable_brute = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ grsec_enable_blackhole = 1;
++ grsec_lastack_retries = 4;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++ grsec_enable_setxid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ grsec_resource_logging = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++ grsec_enable_audit_ptrace = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++ grsec_enable_symlinkown = 1;
++ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ grsec_enable_tpe = 1;
++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ grsec_enable_socket_all = 1;
++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ grsec_enable_socket_client = 1;
++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ grsec_enable_socket_server = 1;
++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
++ grsec_deny_new_usb = 1;
++#endif
++
++ return;
++}
+diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
+new file mode 100644
+index 0000000..8598e7f
+--- /dev/null
++++ b/grsecurity/grsec_link.c
+@@ -0,0 +1,58 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
++{
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++ const struct inode *link_inode = link->dentry->d_inode;
++
++ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
++ /* ignore root-owned links, e.g. /proc/self */
++ link_inode->i_uid && target &&
++ link_inode->i_uid != target->i_uid) {
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode, const int mode, const char *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ const struct cred *cred = current_cred();
++
++ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
++ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
++ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
++ !capable(CAP_FOWNER) && cred->uid) {
++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
+new file mode 100644
+index 0000000..56b5e9d
+--- /dev/null
++++ b/grsecurity/grsec_log.c
+@@ -0,0 +1,337 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/tty.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/grinternal.h>
++
++#ifdef CONFIG_TREE_PREEMPT_RCU
++#define DISABLE_PREEMPT() preempt_disable()
++#define ENABLE_PREEMPT() preempt_enable()
++#else
++#define DISABLE_PREEMPT()
++#define ENABLE_PREEMPT()
++#endif
++
++#define BEGIN_LOCKS(x) \
++ DISABLE_PREEMPT(); \
++ rcu_read_lock(); \
++ read_lock(&tasklist_lock); \
++ read_lock(&grsec_exec_file_lock); \
++ if (x != GR_DO_AUDIT) \
++ spin_lock(&grsec_alert_lock); \
++ else \
++ spin_lock(&grsec_audit_lock)
++
++#define END_LOCKS(x) \
++ if (x != GR_DO_AUDIT) \
++ spin_unlock(&grsec_alert_lock); \
++ else \
++ spin_unlock(&grsec_audit_lock); \
++ read_unlock(&grsec_exec_file_lock); \
++ read_unlock(&tasklist_lock); \
++ rcu_read_unlock(); \
++ ENABLE_PREEMPT(); \
++ if (x == GR_DONT_AUDIT) \
++ gr_handle_alertkill(current)
++
++enum {
++ FLOODING,
++ NO_FLOODING
++};
++
++extern char *gr_alert_log_fmt;
++extern char *gr_audit_log_fmt;
++extern char *gr_alert_log_buf;
++extern char *gr_audit_log_buf;
++
++static int gr_log_start(int audit)
++{
++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
++ unsigned long curr_secs = get_seconds();
++
++ if (audit == GR_DO_AUDIT)
++ goto set_fmt;
++
++ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
++ grsec_alert_wtime = curr_secs;
++ grsec_alert_fyet = 0;
++ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
++ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
++ grsec_alert_fyet++;
++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
++ grsec_alert_wtime = curr_secs;
++ grsec_alert_fyet++;
++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
++ return FLOODING;
++ }
++ else return FLOODING;
++
++set_fmt:
++#endif
++ memset(buf, 0, PAGE_SIZE);
++ if (current->signal->curr_ip && gr_acl_is_enabled()) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++ } else if (current->signal->curr_ip) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
++ } else if (gr_acl_is_enabled()) {
++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
++ } else {
++ sprintf(fmt, "%s%s", loglevel, "grsec: ");
++ strcpy(buf, fmt);
++ }
++
++ return NO_FLOODING;
++}
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++ __attribute__ ((format (printf, 2, 0)));
++
++static void gr_log_middle(int audit, const char *msg, va_list ap)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++
++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++
++ return;
++}
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++ __attribute__ ((format (printf, 2, 3)));
++
++static void gr_log_middle_varargs(int audit, const char *msg, ...)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++ unsigned int len = strlen(buf);
++ va_list ap;
++
++ va_start(ap, msg);
++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
++ va_end(ap);
++
++ return;
++}
++
++static void gr_log_end(int audit, int append_default)
++{
++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
++
++ if (append_default) {
++ unsigned int len = strlen(buf);
++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
++ }
++
++ printk("%s\n", buf);
++
++ return;
++}
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
++{
++ int logtype;
++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
++ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
++ void *voidptr = NULL;
++ int num1 = 0, num2 = 0;
++ unsigned long ulong1 = 0, ulong2 = 0;
++ struct dentry *dentry = NULL;
++ struct vfsmount *mnt = NULL;
++ struct file *file = NULL;
++ struct task_struct *task = NULL;
++ struct vm_area_struct *vma = NULL;
++ const struct cred *cred, *pcred;
++ va_list ap;
++
++ BEGIN_LOCKS(audit);
++ logtype = gr_log_start(audit);
++ if (logtype == FLOODING) {
++ END_LOCKS(audit);
++ return;
++ }
++ va_start(ap, argtypes);
++ switch (argtypes) {
++ case GR_TTYSNIFF:
++ task = va_arg(ap, struct task_struct *);
++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
++ break;
++ case GR_SYSCTL_HIDDEN:
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, str1);
++ break;
++ case GR_RBAC:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
++ break;
++ case GR_RBAC_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
++ break;
++ case GR_STR_RBAC:
++ str1 = va_arg(ap, char *);
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
++ break;
++ case GR_RBAC_MODE2:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ str2 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
++ break;
++ case GR_RBAC_MODE3:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ str2 = va_arg(ap, char *);
++ str3 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
++ break;
++ case GR_FILENAME:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
++ break;
++ case GR_STR_FILENAME:
++ str1 = va_arg(ap, char *);
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
++ break;
++ case GR_FILENAME_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
++ break;
++ case GR_FILENAME_TWO_INT:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ num1 = va_arg(ap, int);
++ num2 = va_arg(ap, int);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
++ break;
++ case GR_FILENAME_TWO_INT_STR:
++ dentry = va_arg(ap, struct dentry *);
++ mnt = va_arg(ap, struct vfsmount *);
++ num1 = va_arg(ap, int);
++ num2 = va_arg(ap, int);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
++ break;
++ case GR_TEXTREL:
++ file = va_arg(ap, struct file *);
++ ulong1 = va_arg(ap, unsigned long);
++ ulong2 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
++ break;
++ case GR_PTRACE:
++ task = va_arg(ap, struct task_struct *);
++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
++ break;
++ case GR_RESOURCE:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ ulong1 = va_arg(ap, unsigned long);
++ str1 = va_arg(ap, char *);
++ ulong2 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_CAP:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ str1 = va_arg(ap, char *);
++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_SIG:
++ str1 = va_arg(ap, char *);
++ voidptr = va_arg(ap, void *);
++ gr_log_middle_varargs(audit, msg, str1, voidptr);
++ break;
++ case GR_SIG2:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ num1 = va_arg(ap, int);
++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ break;
++ case GR_CRASH1:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ ulong1 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
++ break;
++ case GR_CRASH2:
++ task = va_arg(ap, struct task_struct *);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++ ulong1 = va_arg(ap, unsigned long);
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
++ break;
++ case GR_RWXMAP:
++ file = va_arg(ap, struct file *);
++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
++ break;
++ case GR_RWXMAPVMA:
++ vma = va_arg(ap, struct vm_area_struct *);
++ if (vma->vm_file)
++ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
++ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++ str1 = "<stack>";
++ else if (vma->vm_start <= current->mm->brk &&
++ vma->vm_end >= current->mm->start_brk)
++ str1 = "<heap>";
++ else
++ str1 = "<anonymous mapping>";
++ gr_log_middle_varargs(audit, msg, str1);
++ break;
++ case GR_PSACCT:
++ {
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ char cur_tty[64] = { 0 };
++ char parent_tty[64] = { 0 };
++
++ task = va_arg(ap, struct task_struct *);
++ wday = va_arg(ap, unsigned int);
++ cday = va_arg(ap, unsigned int);
++ whr = va_arg(ap, int);
++ chr = va_arg(ap, int);
++ wmin = va_arg(ap, int);
++ cmin = va_arg(ap, int);
++ wsec = va_arg(ap, int);
++ csec = va_arg(ap, int);
++ ulong1 = va_arg(ap, unsigned long);
++ cred = __task_cred(task);
++ pcred = __task_cred(task->real_parent);
++
++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
++ }
++ break;
++ default:
++ gr_log_middle(audit, msg, ap);
++ }
++ va_end(ap);
++ // these don't need DEFAULTSECARGS printed on the end
++ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
++ gr_log_end(audit, 0);
++ else
++ gr_log_end(audit, 1);
++ END_LOCKS(audit);
++}
+diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
+new file mode 100644
+index 0000000..f536303
+--- /dev/null
++++ b/grsecurity/grsec_mem.c
+@@ -0,0 +1,40 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/grinternal.h>
++
++void
++gr_handle_ioperm(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
++ return;
++}
++
++void
++gr_handle_iopl(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
++ return;
++}
++
++void
++gr_handle_mem_readwrite(u64 from, u64 to)
++{
++ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
++ return;
++}
++
++void
++gr_handle_vm86(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
++ return;
++}
++
++void
++gr_log_badprocpid(const char *entry)
++{
++ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
++ return;
++}
+diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
+new file mode 100644
+index 0000000..cd9e124
+--- /dev/null
++++ b/grsecurity/grsec_mount.c
+@@ -0,0 +1,65 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mount.h>
++#include <linux/major.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++ return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
++#endif
++ return;
++}
++
++void
++gr_log_mount(const char *from, const char *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
++#endif
++ return;
++}
++
++int
++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
++ return -EPERM;
++ } else
++ return 0;
++#endif
++ return 0;
++}
++
++int
++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_ROFS
++ struct inode *inode = dentry->d_inode;
++
++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
++ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
++ return -EPERM;
++ } else
++ return 0;
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
+new file mode 100644
+index 0000000..6ee9d50
+--- /dev/null
++++ b/grsecurity/grsec_pax.c
+@@ -0,0 +1,45 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++ return;
++}
++
++void gr_log_ptgnustack(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
++#endif
++ return;
++}
++
++void
++gr_log_rwxmmap(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
++#endif
++ return;
++}
++
++void
++gr_log_rwxmprotect(struct vm_area_struct *vma)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (grsec_enable_log_rwxmaps)
++ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
++#endif
++ return;
++}
+diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
+new file mode 100644
+index 0000000..f7f29aa
+--- /dev/null
++++ b/grsecurity/grsec_ptrace.c
+@@ -0,0 +1,30 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/security.h>
++
++void
++gr_audit_ptrace(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++ if (grsec_enable_audit_ptrace)
++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
++#endif
++ return;
++}
++
++int
++gr_ptrace_readexec(struct file *file, int unsafe_flags)
++{
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++ const struct dentry *dentry = file->f_path.dentry;
++ const struct vfsmount *mnt = file->f_path.mnt;
++
++ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
++ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
+new file mode 100644
+index 0000000..c6a07aa
+--- /dev/null
++++ b/grsecurity/grsec_sig.c
+@@ -0,0 +1,245 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/hardirq.h>
++
++char *signames[] = {
++ [SIGSEGV] = "Segmentation fault",
++ [SIGILL] = "Illegal instruction",
++ [SIGABRT] = "Abort",
++ [SIGBUS] = "Invalid alignment/Bus error"
++};
++
++void
++gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++ (sig == SIGABRT) || (sig == SIGBUS))) {
++ if (t->pid == current->pid) {
++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
++ } else {
++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
++ }
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++ /* ignore the 0 signal for protected task checks */
++ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
++ return -EPERM;
++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int gr_fake_force_sig(int sig, struct task_struct *t)
++{
++ unsigned long int flags;
++ int ret, blocked, ignored;
++ struct k_sigaction *action;
++
++ spin_lock_irqsave(&t->sighand->siglock, flags);
++ action = &t->sighand->action[sig-1];
++ ignored = action->sa.sa_handler == SIG_IGN;
++ blocked = sigismember(&t->blocked, sig);
++ if (blocked || ignored) {
++ action->sa.sa_handler = SIG_DFL;
++ if (blocked) {
++ sigdelset(&t->blocked, sig);
++ recalc_sigpending_and_wake(t);
++ }
++ }
++ if (action->sa.sa_handler == SIG_DFL)
++ t->signal->flags &= ~SIGNAL_UNKILLABLE;
++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
++
++ spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++ return ret;
++}
++#endif
++
++#ifdef CONFIG_GRKERNSEC_BRUTE
++#define GR_USER_BAN_TIME (15 * 60)
++#define GR_DAEMON_BRUTE_TIME (30 * 60)
++
++static int __get_dumpable(unsigned long mm_flags)
++{
++ int ret;
++
++ ret = mm_flags & MMF_DUMPABLE_MASK;
++ return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
++}
++#endif
++
++void gr_handle_brute_attach(unsigned long mm_flags)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ struct task_struct *p = current;
++ uid_t uid = 0;
++ int daemon = 0;
++
++ if (!grsec_enable_brute)
++ return;
++
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
++ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
++ p->real_parent->brute = 1;
++ daemon = 1;
++ } else {
++ const struct cred *cred = __task_cred(p), *cred2;
++ struct task_struct *tsk, *tsk2;
++ int dumpable = __get_dumpable(mm_flags);
++
++ if (dumpable != SUID_DUMPABLE_ENABLED && cred->uid) {
++ struct user_struct *user;
++
++ uid = cred->uid;
++
++ /* this is put upon execution past expiration */
++ user = find_user(uid);
++ if (user == NULL)
++ goto unlock;
++ user->suid_banned = 1;
++ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
++ if (user->suid_ban_expires == ~0UL)
++ user->suid_ban_expires--;
++
++ /* only kill other threads of the same binary, from the same user */
++ do_each_thread(tsk2, tsk) {
++ cred2 = __task_cred(tsk);
++ if (tsk != p && cred2->uid == uid && gr_is_same_file(tsk->exec_file, p->exec_file))
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ }
++ }
++unlock:
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++
++ if (uid)
++ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, uid, GR_USER_BAN_TIME / 60);
++ else if (daemon)
++ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
++
++#endif
++ return;
++}
++
++void gr_handle_brute_check(void)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ struct task_struct *p = current;
++
++ if (unlikely(p->brute)) {
++ if (!grsec_enable_brute)
++ p->brute = 0;
++ else if (time_before(get_seconds(), p->brute_expires))
++ msleep(30 * 1000);
++ }
++#endif
++ return;
++}
++
++void gr_handle_kernel_exploit(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++ const struct cred *cred;
++ struct task_struct *tsk, *tsk2;
++ struct user_struct *user;
++ uid_t uid;
++
++ if (in_irq() || in_serving_softirq() || in_nmi())
++ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
++
++ uid = current_uid();
++
++ if (uid == 0)
++ panic("grsec: halting the system due to suspicious kernel crash caused by root");
++ else {
++ /* kill all the processes of this user, hold a reference
++ to their creds struct, and prevent them from creating
++ another process until system reset
++ */
++ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
++ /* we intentionally leak this ref */
++ user = get_uid(current->cred->user);
++ if (user)
++ user->kernel_banned = 1;
++
++ /* kill all processes of this user */
++ read_lock(&tasklist_lock);
++ do_each_thread(tsk2, tsk) {
++ cred = __task_cred(tsk);
++ if (cred->uid == uid)
++ gr_fake_force_sig(SIGKILL, tsk);
++ } while_each_thread(tsk2, tsk);
++ read_unlock(&tasklist_lock);
++ }
++#endif
++}
++
++#ifdef CONFIG_GRKERNSEC_BRUTE
++static bool suid_ban_expired(struct user_struct *user)
++{
++ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
++ user->suid_banned = 0;
++ user->suid_ban_expires = 0;
++ free_uid(user);
++ return true;
++ }
++
++ return false;
++}
++#endif
++
++int gr_process_kernel_exec_ban(void)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++ if (unlikely(current->cred->user->kernel_banned))
++ return -EPERM;
++#endif
++ return 0;
++}
++
++int gr_process_kernel_setuid_ban(struct user_struct *user)
++{
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++ if (unlikely(user->kernel_banned))
++ gr_fake_force_sig(SIGKILL, current);
++#endif
++ return 0;
++}
++
++int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ struct user_struct *user = current->cred->user;
++ if (unlikely(user->suid_banned)) {
++ if (suid_ban_expired(user))
++ return 0;
++ /* disallow execution of suid binaries only */
++ else if (bprm->cred->euid != current->cred->uid)
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
+new file mode 100644
+index 0000000..4030d57
+--- /dev/null
++++ b/grsecurity/grsec_sock.c
+@@ -0,0 +1,244 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <net/inet_sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
++
++EXPORT_SYMBOL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL(gr_acl_handle_unix);
++EXPORT_SYMBOL(gr_acl_handle_mknod);
++EXPORT_SYMBOL(gr_handle_chroot_unix);
++EXPORT_SYMBOL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define gr_conn_table_size 32749
++struct conn_table_entry {
++ struct conn_table_entry *next;
++ struct signal_struct *sig;
++};
++
++struct conn_table_entry *gr_conn_table[gr_conn_table_size];
++DEFINE_SPINLOCK(gr_conn_table_lock);
++
++extern const char * gr_socktype_to_name(unsigned char type);
++extern const char * gr_proto_to_name(unsigned char proto);
++extern const char * gr_sockfamily_to_name(unsigned char family);
++
++static __inline__ int
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static __inline__ int
++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
++ sig->gr_sport == sport && sig->gr_dport == dport))
++ return 1;
++ else
++ return 0;
++}
++
++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
++{
++ struct conn_table_entry **match;
++ unsigned int index;
++
++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
++ sig->gr_sport, sig->gr_dport,
++ gr_conn_table_size);
++
++ newent->sig = sig;
++
++ match = &gr_conn_table[index];
++ newent->next = *match;
++ *match = newent;
++
++ return;
++}
++
++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
++{
++ struct conn_table_entry *match, *last = NULL;
++ unsigned int index;
++
++ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
++ sig->gr_sport, sig->gr_dport,
++ gr_conn_table_size);
++
++ match = gr_conn_table[index];
++ while (match && !conn_match(match->sig,
++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
++ sig->gr_dport)) {
++ last = match;
++ match = match->next;
++ }
++
++ if (match) {
++ if (last)
++ last->next = match->next;
++ else
++ gr_conn_table[index] = NULL;
++ kfree(match);
++ }
++
++ return;
++}
++
++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ struct conn_table_entry *match;
++ unsigned int index;
++
++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++ match = gr_conn_table[index];
++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
++ match = match->next;
++
++ if (match)
++ return match->sig;
++ else
++ return NULL;
++}
++
++#endif
++
++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct signal_struct *sig = task->signal;
++ struct conn_table_entry *newent;
++
++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
++ if (newent == NULL)
++ return;
++ /* no bh lock needed since we are called with bh disabled */
++ spin_lock(&gr_conn_table_lock);
++ gr_del_task_from_ip_table_nolock(sig);
++ sig->gr_saddr = inet->inet_rcv_saddr;
++ sig->gr_daddr = inet->inet_daddr;
++ sig->gr_sport = inet->inet_sport;
++ sig->gr_dport = inet->inet_dport;
++ gr_add_to_task_ip_table_nolock(sig, newent);
++ spin_unlock(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ spin_lock_bh(&gr_conn_table_lock);
++ gr_del_task_from_ip_table_nolock(task->signal);
++ spin_unlock_bh(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct signal_struct *p, *set;
++ const struct inet_sock *inet = inet_sk(sk);
++
++ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++ return;
++
++ set = current->signal;
++
++ spin_lock_bh(&gr_conn_table_lock);
++ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
++ inet->inet_dport, inet->inet_sport);
++ if (unlikely(p != NULL)) {
++ set->curr_ip = p->curr_ip;
++ set->used_accept = 1;
++ gr_del_task_from_ip_table_nolock(p);
++ spin_unlock_bh(&gr_conn_table_lock);
++ return;
++ }
++ spin_unlock_bh(&gr_conn_table_lock);
++
++ set->curr_ip = inet->inet_daddr;
++ set->used_accept = 1;
++#endif
++ return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++ (family != AF_UNIX)) {
++ if (family == AF_INET)
++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
++ else
++ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server_other(const struct sock *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sk_family != AF_UNIX) &&
++ (sck->sk_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
+new file mode 100644
+index 0000000..6314062
+--- /dev/null
++++ b/grsecurity/grsec_sysctl.c
+@@ -0,0 +1,468 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
++static int __maybe_unused __read_only one = 1;
++#endif
++
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
++ defined(CONFIG_GRKERNSEC_DENYUSB)
++struct ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
++#ifdef CONFIG_GRKERNSEC_IO
++ {
++ .procname = "disable_priv_io",
++ .data = &grsec_disable_privio,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ {
++ .procname = "linking_restrictions",
++ .data = &grsec_enable_link,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
++ {
++ .procname = "enforce_symlinksifowner",
++ .data = &grsec_enable_symlinkown,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "symlinkown_gid",
++ .data = &grsec_symlinkown_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ {
++ .procname = "deter_bruteforce",
++ .data = &grsec_enable_brute,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ {
++ .procname = "fifo_restrictions",
++ .data = &grsec_enable_fifo,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
++ {
++ .procname = "ptrace_readexec",
++ .data = &grsec_enable_ptrace_readexec,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++ {
++ .procname = "consistent_setxid",
++ .data = &grsec_enable_setxid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ {
++ .procname = "ip_blackhole",
++ .data = &grsec_enable_blackhole,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "lastack_retries",
++ .data = &grsec_lastack_retries,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ {
++ .procname = "exec_logging",
++ .data = &grsec_enable_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ {
++ .procname = "rwxmap_logging",
++ .data = &grsec_enable_log_rwxmaps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ {
++ .procname = "signal_logging",
++ .data = &grsec_enable_signal,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ {
++ .procname = "forkfail_logging",
++ .data = &grsec_enable_forkfail,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ {
++ .procname = "timechange_logging",
++ .data = &grsec_enable_time,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ {
++ .procname = "chroot_deny_shmat",
++ .data = &grsec_enable_chroot_shmat,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ {
++ .procname = "chroot_deny_unix",
++ .data = &grsec_enable_chroot_unix,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ {
++ .procname = "chroot_deny_mount",
++ .data = &grsec_enable_chroot_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ {
++ .procname = "chroot_deny_fchdir",
++ .data = &grsec_enable_chroot_fchdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ {
++ .procname = "chroot_deny_chroot",
++ .data = &grsec_enable_chroot_double,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ {
++ .procname = "chroot_deny_pivot",
++ .data = &grsec_enable_chroot_pivot,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ {
++ .procname = "chroot_enforce_chdir",
++ .data = &grsec_enable_chroot_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ {
++ .procname = "chroot_deny_chmod",
++ .data = &grsec_enable_chroot_chmod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ {
++ .procname = "chroot_deny_mknod",
++ .data = &grsec_enable_chroot_mknod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ {
++ .procname = "chroot_restrict_nice",
++ .data = &grsec_enable_chroot_nice,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ {
++ .procname = "chroot_execlog",
++ .data = &grsec_enable_chroot_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ {
++ .procname = "chroot_caps",
++ .data = &grsec_enable_chroot_caps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ {
++ .procname = "chroot_deny_sysctl",
++ .data = &grsec_enable_chroot_sysctl,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ {
++ .procname = "tpe",
++ .data = &grsec_enable_tpe,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "tpe_gid",
++ .data = &grsec_tpe_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ {
++ .procname = "tpe_invert",
++ .data = &grsec_enable_tpe_invert,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ {
++ .procname = "tpe_restrict_all",
++ .data = &grsec_enable_tpe_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ {
++ .procname = "socket_all",
++ .data = &grsec_enable_socket_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "socket_all_gid",
++ .data = &grsec_socket_all_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ {
++ .procname = "socket_client",
++ .data = &grsec_enable_socket_client,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "socket_client_gid",
++ .data = &grsec_socket_client_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ {
++ .procname = "socket_server",
++ .data = &grsec_enable_socket_server,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "socket_server_gid",
++ .data = &grsec_socket_server_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ {
++ .procname = "audit_group",
++ .data = &grsec_enable_group,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .procname = "audit_gid",
++ .data = &grsec_audit_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ {
++ .procname = "audit_chdir",
++ .data = &grsec_enable_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ {
++ .procname = "audit_mount",
++ .data = &grsec_enable_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ {
++ .procname = "dmesg",
++ .data = &grsec_enable_dmesg,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ {
++ .procname = "chroot_findtask",
++ .data = &grsec_enable_chroot_findtask,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ {
++ .procname = "resource_logging",
++ .data = &grsec_resource_logging,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
++ {
++ .procname = "audit_ptrace",
++ .data = &grsec_enable_audit_ptrace,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
++ {
++ .procname = "harden_ptrace",
++ .data = &grsec_enable_harden_ptrace,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++ {
++ .procname = "grsec_lock",
++ .data = &grsec_lock,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_ROFS
++ {
++ .procname = "romount_protect",
++ .data = &grsec_enable_rofs,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &one,
++ },
++#endif
++#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
++ {
++ .procname = "deny_new_usb",
++ .data = &grsec_deny_new_usb,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++ { }
++};
++#endif
+diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
+new file mode 100644
+index 0000000..0dc13c3
+--- /dev/null
++++ b/grsecurity/grsec_time.c
+@@ -0,0 +1,16 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++ if (grsec_enable_time)
++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
++#endif
++ return;
++}
++
++EXPORT_SYMBOL(gr_log_timechange);
+diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
+new file mode 100644
+index 0000000..07e0dc0
+--- /dev/null
++++ b/grsecurity/grsec_tpe.c
+@@ -0,0 +1,73 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
++ const struct cred *cred = current_cred();
++ char *msg = NULL;
++ char *msg2 = NULL;
++
++ // never restrict root
++ if (!cred->uid)
++ return 1;
++
++ if (grsec_enable_tpe) {
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
++ msg = "not being in trusted group";
++ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
++ msg = "being in untrusted group";
++#else
++ if (in_group_p(grsec_tpe_gid))
++ msg = "being in untrusted group";
++#endif
++ }
++ if (!msg && gr_acl_tpe_check())
++ msg = "being in untrusted role";
++
++ // not in any affected group/role
++ if (!msg)
++ goto next_check;
++
++ if (inode->i_uid)
++ msg2 = "file in non-root-owned directory";
++ else if (inode->i_mode & S_IWOTH)
++ msg2 = "file in world-writable directory";
++ else if (inode->i_mode & S_IWGRP)
++ msg2 = "file in group-writable directory";
++
++ if (msg && msg2) {
++ char fullmsg[70] = {0};
++ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ }
++ msg = NULL;
++next_check:
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
++ return 1;
++
++ if (inode->i_uid && (inode->i_uid != cred->uid))
++ msg = "directory not owned by user";
++ else if (inode->i_mode & S_IWOTH)
++ msg = "file in world-writable directory";
++ else if (inode->i_mode & S_IWGRP)
++ msg = "file in group-writable directory";
++
++ if (msg) {
++ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
++ return 0;
++ }
++#endif
++#endif
++ return 1;
++}
+diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
+new file mode 100644
+index 0000000..ae02d8e
+--- /dev/null
++++ b/grsecurity/grsec_usb.c
+@@ -0,0 +1,15 @@
++#include <linux/kernel.h>
++#include <linux/grinternal.h>
++#include <linux/module.h>
++
++int gr_handle_new_usb(void)
++{
++#ifdef CONFIG_GRKERNSEC_DENYUSB
++ if (grsec_deny_new_usb) {
++ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
++ return 1;
++ }
++#endif
++ return 0;
++}
++EXPORT_SYMBOL_GPL(gr_handle_new_usb);
+diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
+new file mode 100644
+index 0000000..9f7b1ac
+--- /dev/null
++++ b/grsecurity/grsum.c
+@@ -0,0 +1,61 @@
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++ char *p;
++ struct crypto_hash *tfm;
++ struct hash_desc desc;
++ struct scatterlist sg;
++ unsigned char temp_sum[GR_SHA_LEN];
++ volatile int retval = 0;
++ volatile int dummy = 0;
++ unsigned int i;
++
++ sg_init_table(&sg, 1);
++
++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
++ if (IS_ERR(tfm)) {
++ /* should never happen, since sha256 should be built in */
++ return 1;
++ }
++
++ desc.tfm = tfm;
++ desc.flags = 0;
++
++ crypto_hash_init(&desc);
++
++ p = salt;
++ sg_set_buf(&sg, p, GR_SALT_LEN);
++ crypto_hash_update(&desc, &sg, sg.length);
++
++ p = entry->pw;
++ sg_set_buf(&sg, p, strlen(p));
++
++ crypto_hash_update(&desc, &sg, sg.length);
++
++ crypto_hash_final(&desc, temp_sum);
++
++ memset(entry->pw, 0, GR_PW_LEN);
++
++ for (i = 0; i < GR_SHA_LEN; i++)
++ if (sum[i] != temp_sum[i])
++ retval = 1;
++ else
++ dummy = 1; // waste a cycle
++
++ crypto_free_hash(tfm);
++
++ return retval;
++}
+diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
+index 77ff547..181834f 100644
+--- a/include/asm-generic/4level-fixup.h
++++ b/include/asm-generic/4level-fixup.h
+@@ -13,8 +13,10 @@
+ #define pmd_alloc(mm, pud, address) \
+ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+ NULL: pmd_offset(pud, address))
++#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
+
+ #define pud_alloc(mm, pgd, address) (pgd)
++#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
+ #define pud_offset(pgd, start) (pgd)
+ #define pud_none(pud) 0
+ #define pud_bad(pud) 0
+diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
+index b7babf0..97f4c4f 100644
+--- a/include/asm-generic/atomic-long.h
++++ b/include/asm-generic/atomic-long.h
+@@ -22,6 +22,12 @@
+
+ typedef atomic64_t atomic_long_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic64_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic64_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+ static inline long atomic_long_read(atomic_long_t *l)
+@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
+ return (long)atomic64_read(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
+ atomic64_set(v, i);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
+ atomic64_inc(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
+ atomic64_dec(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
+ atomic64_add(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ atomic64_sub(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ atomic64_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+ return atomic64_add_negative(i, v);
+ }
+
+-static inline long atomic_long_add_return(long i, atomic_long_t *l)
++static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_return(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_add_return_unchecked(i, v);
++}
++#endif
++
+ static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
+ return (long)atomic64_inc_return(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
++
++ return (long)atomic64_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+ atomic64_t *v = (atomic64_t *)l;
+@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+
+ typedef atomic_t atomic_long_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef atomic_unchecked_t atomic_long_unchecked_t;
++#else
++typedef atomic_t atomic_long_unchecked_t;
++#endif
++
+ #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+ static inline long atomic_long_read(atomic_long_t *l)
+ {
+@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
+ return (long)atomic_read(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_read_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_set(atomic_long_t *l, long i)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
+ atomic_set(v, i);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_set_unchecked(v, i);
++}
++#endif
++
+ static inline void atomic_long_inc(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
+ atomic_inc(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_inc_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_dec(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
+ atomic_dec(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_dec_unchecked(v);
++}
++#endif
++
+ static inline void atomic_long_add(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
+ atomic_add(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_add_unchecked(i, v);
++}
++#endif
++
+ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
+ atomic_sub(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ atomic_sub_unchecked(i, v);
++}
++#endif
++
+ static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
+ return (long)atomic_add_return(i, v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_add_return_unchecked(i, v);
++}
++
++#endif
++
+ static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
+ return (long)atomic_inc_return(v);
+ }
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
++{
++ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
++
++ return (long)atomic_inc_return_unchecked(v);
++}
++#endif
++
+ static inline long atomic_long_dec_return(atomic_long_t *l)
+ {
+ atomic_t *v = (atomic_t *)l;
+@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+
+ #endif /* BITS_PER_LONG == 64 */
+
++#ifdef CONFIG_PAX_REFCOUNT
++static inline void pax_refcount_needs_these_functions(void)
++{
++ atomic_read_unchecked((atomic_unchecked_t *)NULL);
++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
++ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
++ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
++ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
++ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
++ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
++#ifdef CONFIG_X86
++ atomic_clear_mask_unchecked(0, NULL);
++ atomic_set_mask_unchecked(0, NULL);
++#endif
++
++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
++}
++#else
++#define atomic_read_unchecked(v) atomic_read(v)
++#define atomic_set_unchecked(v, i) atomic_set((v), (i))
++#define atomic_add_unchecked(i, v) atomic_add((i), (v))
++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
++#define atomic_inc_unchecked(v) atomic_inc(v)
++#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
++#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
++#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
++#define atomic_dec_unchecked(v) atomic_dec(v)
++#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
++#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
++
++#define atomic_long_read_unchecked(v) atomic_long_read(v)
++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
++#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
++#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
++#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
++#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
++#endif
++
+ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
+index e37963c..6f5b60b 100644
+--- a/include/asm-generic/atomic.h
++++ b/include/asm-generic/atomic.h
+@@ -158,7 +158,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically clears the bits set in @mask from @v
+ */
+ #ifndef atomic_clear_mask
+-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+ {
+ unsigned long flags;
+
+diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
+index b18ce4f..2ee2843 100644
+--- a/include/asm-generic/atomic64.h
++++ b/include/asm-generic/atomic64.h
+@@ -16,6 +16,8 @@ typedef struct {
+ long long counter;
+ } atomic64_t;
+
++typedef atomic64_t atomic64_unchecked_t;
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ extern long long atomic64_read(const atomic64_t *v);
+@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_GENERIC_ATOMIC64_H */
+diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
+index a60a7cc..0fe12f2 100644
+--- a/include/asm-generic/bitops/__fls.h
++++ b/include/asm-generic/bitops/__fls.h
+@@ -9,7 +9,7 @@
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+-static __always_inline unsigned long __fls(unsigned long word)
++static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+ int num = BITS_PER_LONG - 1;
+
+diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
+index 0576d1f..dad6c71 100644
+--- a/include/asm-generic/bitops/fls.h
++++ b/include/asm-generic/bitops/fls.h
+@@ -9,7 +9,7 @@
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+-static __always_inline int fls(int x)
++static __always_inline int __intentional_overflow(-1) fls(int x)
+ {
+ int r = 32;
+
+diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
+index b097cf8..3d40e14 100644
+--- a/include/asm-generic/bitops/fls64.h
++++ b/include/asm-generic/bitops/fls64.h
+@@ -15,7 +15,7 @@
+ * at position 64.
+ */
+ #if BITS_PER_LONG == 32
+-static __always_inline int fls64(__u64 x)
++static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
+ {
+ __u32 h = x >> 32;
+ if (h)
+@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
+ return fls(x);
+ }
+ #elif BITS_PER_LONG == 64
+-static __always_inline int fls64(__u64 x)
++static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
+ {
+ if (x == 0)
+ return 0;
+diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
+index 1bfcfe5..e04c5c9 100644
+--- a/include/asm-generic/cache.h
++++ b/include/asm-generic/cache.h
+@@ -6,7 +6,7 @@
+ * cache lines need to provide their own cache.h.
+ */
+
+-#define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5UL
++#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_GENERIC_CACHE_H */
+diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
+index 0d68a1e..b74a761 100644
+--- a/include/asm-generic/emergency-restart.h
++++ b/include/asm-generic/emergency-restart.h
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+ #define _ASM_GENERIC_EMERGENCY_RESTART_H
+
+-static inline void machine_emergency_restart(void)
++static inline __noreturn void machine_emergency_restart(void)
+ {
+ machine_restart(NULL);
+ }
+diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
+index 0232ccb..13d9165 100644
+--- a/include/asm-generic/kmap_types.h
++++ b/include/asm-generic/kmap_types.h
+@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
+ KMAP_D(17) KM_NMI,
+ KMAP_D(18) KM_NMI_PTE,
+ KMAP_D(19) KM_KDB,
++KMAP_D(20) KM_CLEARPAGE,
+ /*
+ * Remember to update debug_kmap_atomic() when adding new kmap types!
+ */
+-KMAP_D(20) KM_TYPE_NR
++KMAP_D(21) KM_TYPE_NR
+ };
+
+ #undef KMAP_D
+diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
+index 9ceb03b..62b0b8f 100644
+--- a/include/asm-generic/local.h
++++ b/include/asm-generic/local.h
+@@ -23,24 +23,37 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct {
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i) atomic_long_set((&(l)->a),(i))
++#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+ #define local_add(i,l) atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
+
+ #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+ #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+ #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+ #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+ #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
++#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
+ #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+ #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
++#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
+
+ #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
++#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+ #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+ #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
+ #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
+index 725612b..9cc513a 100644
+--- a/include/asm-generic/pgtable-nopmd.h
++++ b/include/asm-generic/pgtable-nopmd.h
+@@ -1,14 +1,19 @@
+ #ifndef _PGTABLE_NOPMD_H
+ #define _PGTABLE_NOPMD_H
+
+-#ifndef __ASSEMBLY__
+-
+ #include <asm-generic/pgtable-nopud.h>
+
+-struct mm_struct;
+-
+ #define __PAGETABLE_PMD_FOLDED
+
++#define PMD_SHIFT PUD_SHIFT
++#define PTRS_PER_PMD 1
++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
++#define PMD_MASK (~(PMD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
+ /*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+@@ -16,11 +21,6 @@ struct mm_struct;
+ */
+ typedef struct { pud_t pud; } pmd_t;
+
+-#define PMD_SHIFT PUD_SHIFT
+-#define PTRS_PER_PMD 1
+-#define PMD_SIZE (1UL << PMD_SHIFT)
+-#define PMD_MASK (~(PMD_SIZE-1))
+-
+ /*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
+index 810431d..0ec4804f 100644
+--- a/include/asm-generic/pgtable-nopud.h
++++ b/include/asm-generic/pgtable-nopud.h
+@@ -1,10 +1,15 @@
+ #ifndef _PGTABLE_NOPUD_H
+ #define _PGTABLE_NOPUD_H
+
+-#ifndef __ASSEMBLY__
+-
+ #define __PAGETABLE_PUD_FOLDED
+
++#define PUD_SHIFT PGDIR_SHIFT
++#define PTRS_PER_PUD 1
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++
++#ifndef __ASSEMBLY__
++
+ /*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+@@ -12,11 +17,6 @@
+ */
+ typedef struct { pgd_t pgd; } pud_t;
+
+-#define PUD_SHIFT PGDIR_SHIFT
+-#define PTRS_PER_PUD 1
+-#define PUD_SIZE (1UL << PUD_SHIFT)
+-#define PUD_MASK (~(PUD_SIZE-1))
+-
+ /*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
+ #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+
+ #define pgd_populate(mm, pgd, pud) do { } while (0)
++#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
+ /*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index bc00876..9aa9b1f 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -530,6 +530,22 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
+ #endif
+ }
+
++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
++#ifdef CONFIG_PAX_KERNEXEC
++#error KERNEXEC requires pax_open_kernel
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++#endif
++#endif
++
++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
++#ifdef CONFIG_PAX_KERNEXEC
++#error KERNEXEC requires pax_close_kernel
++#else
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++#endif
++
+ #endif /* CONFIG_MMU */
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
+index 0dd4e87..af5d035 100644
+--- a/include/asm-generic/siginfo.h
++++ b/include/asm-generic/siginfo.h
+@@ -90,9 +90,18 @@ typedef struct siginfo {
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
++
++ /* SIGSYS */
++ struct {
++ void __user *_call_addr; /* calling user insn */
++ int _syscall; /* triggering system call number */
++ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
++ } _sigsys;
+ } _sifields;
+ } siginfo_t;
+
++/* If the arch shares siginfo, then it has SIGSYS. */
++#define __ARCH_SIGSYS
+ #endif
+
+ /*
+@@ -116,6 +125,11 @@ typedef struct siginfo {
+ #define si_addr_lsb _sifields._sigfault._addr_lsb
+ #define si_band _sifields._sigpoll._band
+ #define si_fd _sifields._sigpoll._fd
++#ifdef __ARCH_SIGSYS
++#define si_call_addr _sifields._sigsys._call_addr
++#define si_syscall _sifields._sigsys._syscall
++#define si_arch _sifields._sigsys._arch
++#endif
+
+ #ifdef __KERNEL__
+ #define __SI_MASK 0xffff0000u
+@@ -126,6 +140,7 @@ typedef struct siginfo {
+ #define __SI_CHLD (4 << 16)
+ #define __SI_RT (5 << 16)
+ #define __SI_MESGQ (6 << 16)
++#define __SI_SYS (7 << 16)
+ #define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
+ #else
+ #define __SI_KILL 0
+@@ -135,6 +150,7 @@ typedef struct siginfo {
+ #define __SI_CHLD 0
+ #define __SI_RT 0
+ #define __SI_MESGQ 0
++#define __SI_SYS 0
+ #define __SI_CODE(T,N) (N)
+ #endif
+
+@@ -232,6 +248,12 @@ typedef struct siginfo {
+ #define NSIGPOLL 6
+
+ /*
++ * SIGSYS si_codes
++ */
++#define SYS_SECCOMP (__SI_SYS|1) /* seccomp triggered */
++#define NSIGSYS 1
++
++/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
+index 5c122ae..a2c13dc 100644
+--- a/include/asm-generic/syscall.h
++++ b/include/asm-generic/syscall.h
+@@ -142,4 +142,18 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args);
+
++/**
++ * syscall_get_arch - return the AUDIT_ARCH for the current system call
++ * @task: task of interest, must be in system call entry tracing
++ * @regs: task_pt_regs() of @task
++ *
++ * Returns the AUDIT_ARCH_* based on the system call convention in use.
++ *
++ * It's only valid to call this when @task is stopped on entry to a system
++ * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
++ *
++ * Note, at present this function is only required with
++ * CONFIG_HAVE_ARCH_SECCOMP_FILTER.
++ */
++int syscall_get_arch(struct task_struct *task, struct pt_regs *regs);
+ #endif /* _ASM_SYSCALL_H */
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index b5e2e4c..6a5373e 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -217,6 +217,7 @@
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rodata) = .; \
+ *(.rodata) *(.rodata.*) \
++ *(.data..read_only) \
+ *(__vermagic) /* Kernel version magic */ \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
+@@ -722,17 +723,18 @@
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+- * Note that this macros defines __per_cpu_load as an absolute symbol.
++ * Note that this macros defines per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU_SECTION.
+ */
+ #define PERCPU_VADDR(cacheline, vaddr, phdr) \
+- VMLINUX_SYMBOL(__per_cpu_load) = .; \
+- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
++ per_cpu_load = .; \
++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
+ - LOAD_OFFSET) { \
++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
+ PERCPU_INPUT(cacheline) \
+ } phdr \
+- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
+
+ /**
+ * PERCPU_SECTION - define output section for percpu area, simple version
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index ecc721d..029cf5a 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -34,7 +34,7 @@ struct crypto_type {
+ unsigned int maskclear;
+ unsigned int maskset;
+ unsigned int tfmsize;
+-};
++} __do_const;
+
+ struct crypto_instance {
+ struct crypto_alg alg;
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index bf4b2dc..2df6e61 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -72,6 +72,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/poll.h>
+ #include <asm/pgalloc.h>
++#include <asm/local.h>
+ #include "drm.h"
+
+ #include <linux/idr.h>
+@@ -284,10 +285,12 @@ do { \
+ * \param cmd command.
+ * \param arg argument.
+ */
+-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
++typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
++typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+ #define DRM_IOCTL_NR(n) _IOC_NR(n)
+@@ -302,9 +305,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ struct drm_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+- drm_ioctl_t *func;
++ drm_ioctl_t func;
+ unsigned int cmd_drv;
+-};
++} __do_const;
+
+ /**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+@@ -965,7 +968,7 @@ struct drm_info_list {
+ int (*show)(struct seq_file*, void*); /** show callback */
+ u32 driver_features; /**< Required driver features for this entry */
+ void *data;
+-};
++} __do_const;
+
+ /**
+ * debugfs node structure. This structure represents a debugfs file.
+@@ -1038,7 +1041,7 @@ struct drm_device {
+
+ /** \name Usage Counters */
+ /*@{ */
+- int open_count; /**< Outstanding files open */
++ local_t open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ atomic_t vma_count; /**< Outstanding vma areas open */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+@@ -1049,7 +1052,7 @@ struct drm_device {
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+- atomic_t counts[15];
++ atomic_unchecked_t counts[15];
+ /*@} */
+
+ struct list_head filelist;
+diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
+index 73b0712..2e581af 100644
+--- a/include/drm/drm_crtc_helper.h
++++ b/include/drm/drm_crtc_helper.h
+@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
+ struct drm_connector *connector);
+ /* disable encoder when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_encoder *encoder);
+-};
++} __no_const;
+
+ struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
+index 6bd325f..19a2404 100644
+--- a/include/drm/drm_mem_util.h
++++ b/include/drm/drm_mem_util.h
+@@ -31,7 +31,7 @@
+
+ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+ {
+- if (size != 0 && nmemb > ULONG_MAX / size)
++ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+ /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+ {
+- if (size != 0 && nmemb > ULONG_MAX / size)
++ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
+index 26c1f78..6722682 100644
+--- a/include/drm/ttm/ttm_memory.h
++++ b/include/drm/ttm/ttm_memory.h
+@@ -47,7 +47,7 @@
+
+ struct ttm_mem_shrink {
+ int (*do_shrink) (struct ttm_mem_shrink *);
+-};
++} __no_const;
+
+ /**
+ * struct ttm_mem_global - Global memory accounting structure.
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index a3ce901..fd50c75 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -329,6 +329,7 @@ header-y += scc.h
+ header-y += sched.h
+ header-y += screen_info.h
+ header-y += sdla.h
++header-y += seccomp.h
+ header-y += securebits.h
+ header-y += selinux_netlink.h
+ header-y += sem.h
+diff --git a/include/linux/a.out.h b/include/linux/a.out.h
+index e86dfca..40cc55f 100644
+--- a/include/linux/a.out.h
++++ b/include/linux/a.out.h
+@@ -39,6 +39,14 @@ enum machine_type {
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
+index 49a83ca..d0a847e 100644
+--- a/include/linux/atmdev.h
++++ b/include/linux/atmdev.h
+@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
+ #endif
+
+ struct k_atm_aal_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ };
+@@ -406,7 +406,7 @@ struct atmdev_ops { /* only send is required */
+ int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
+ int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
+ struct module *owner;
+-};
++} __do_const ;
+
+ struct atmphy_ops {
+ int (*start)(struct atm_dev *dev);
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index 2f81c6f..225b4e4 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -430,6 +430,7 @@ extern void audit_putname(const char *name);
+ extern void __audit_inode(const char *name, const struct dentry *dentry);
+ extern void __audit_inode_child(const struct dentry *dentry,
+ const struct inode *parent);
++extern void __audit_seccomp(unsigned long syscall, long signr, int code);
+ extern void __audit_ptrace(struct task_struct *t);
+
+ static inline int audit_dummy_context(void)
+@@ -453,6 +454,12 @@ static inline void audit_inode_child(const struct dentry *dentry,
+ }
+ void audit_core_dumps(long signr);
+
++static inline void audit_seccomp(unsigned long syscall, long signr, int code)
++{
++ if (unlikely(!audit_dummy_context()))
++ __audit_seccomp(syscall, signr, code);
++}
++
+ static inline void audit_ptrace(struct task_struct *t)
+ {
+ if (unlikely(!audit_dummy_context()))
+@@ -558,6 +565,8 @@ extern int audit_signals;
+ #define audit_inode(n,d) do { (void)(d); } while (0)
+ #define audit_inode_child(i,p) do { ; } while (0)
+ #define audit_core_dumps(i) do { ; } while (0)
++#define audit_seccomp(i,s,c) do { ; } while (0)
++#define __audit_seccomp(i,s,c) do { ; } while (0)
+ #define auditsc_get_stamp(c,t,s) (0)
+ #define audit_get_loginuid(t) (-1)
+ #define audit_get_sessionid(t) (-1)
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index d337419..c87c74b 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -18,7 +18,7 @@ struct pt_regs;
+ #define BINPRM_BUF_SIZE 128
+
+ #ifdef __KERNEL__
+-#include <linux/list.h>
++#include <linux/sched.h>
+
+ #define CORENAME_MAX_SIZE 128
+
+@@ -58,6 +58,7 @@ struct linux_binprm {
+ unsigned interp_flags;
+ unsigned interp_data;
+ unsigned long loader, exec;
++ char tcomm[TASK_COMM_LEN];
+ };
+
+ #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
+@@ -86,8 +87,10 @@ struct linux_binfmt {
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(struct file *);
+ int (*core_dump)(struct coredump_params *cprm);
++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
++ void (*handle_mmap)(struct file *);
+ unsigned long min_coredump; /* minimal dump size */
+-};
++} __do_const;
+
+ extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
+
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index fc8a3ff..ad5938b 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -74,7 +74,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+-static inline __u32 rol32(__u32 word, unsigned int shift)
++static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
+ {
+ return (word << shift) | (word >> (32 - shift));
+ }
+@@ -84,7 +84,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+-static inline __u32 ror32(__u32 word, unsigned int shift)
++static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
+ {
+ return (word >> shift) | (word << (32 - shift));
+ }
+@@ -140,7 +140,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
+ return (__s32)(value << shift) >> shift;
+ }
+
+-static inline unsigned fls_long(unsigned long l)
++static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
+ {
+ if (sizeof(l) == 4)
+ return fls(l);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index ff039f0..cdf89ae 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1316,7 +1316,7 @@ struct block_device_operations {
+ /* this callback is with swap_lock and sometimes page table lock held */
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ struct module *owner;
+-};
++} __do_const;
+
+ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+ unsigned long);
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index 4d1a074..88f929a 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -162,7 +162,7 @@ struct blk_trace {
+ struct dentry *dir;
+ struct dentry *dropped_file;
+ struct dentry *msg_file;
+- atomic_t dropped;
++ atomic_unchecked_t dropped;
+ };
+
+ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
+diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
+index 83195fb..46fe38f 100644
+--- a/include/linux/byteorder/little_endian.h
++++ b/include/linux/byteorder/little_endian.h
+@@ -42,51 +42,51 @@
+
+ static inline __le64 __cpu_to_le64p(const __u64 *p)
+ {
+- return (__force __le64)*p;
++ return (__force const __le64)*p;
+ }
+-static inline __u64 __le64_to_cpup(const __le64 *p)
++static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
+ {
+- return (__force __u64)*p;
++ return (__force const __u64)*p;
+ }
+ static inline __le32 __cpu_to_le32p(const __u32 *p)
+ {
+- return (__force __le32)*p;
++ return (__force const __le32)*p;
+ }
+ static inline __u32 __le32_to_cpup(const __le32 *p)
+ {
+- return (__force __u32)*p;
++ return (__force const __u32)*p;
+ }
+ static inline __le16 __cpu_to_le16p(const __u16 *p)
+ {
+- return (__force __le16)*p;
++ return (__force const __le16)*p;
+ }
+ static inline __u16 __le16_to_cpup(const __le16 *p)
+ {
+- return (__force __u16)*p;
++ return (__force const __u16)*p;
+ }
+ static inline __be64 __cpu_to_be64p(const __u64 *p)
+ {
+- return (__force __be64)__swab64p(p);
++ return (__force const __be64)__swab64p(p);
+ }
+ static inline __u64 __be64_to_cpup(const __be64 *p)
+ {
+- return __swab64p((__u64 *)p);
++ return __swab64p((const __u64 *)p);
+ }
+ static inline __be32 __cpu_to_be32p(const __u32 *p)
+ {
+- return (__force __be32)__swab32p(p);
++ return (__force const __be32)__swab32p(p);
+ }
+-static inline __u32 __be32_to_cpup(const __be32 *p)
++static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
+ {
+- return __swab32p((__u32 *)p);
++ return __swab32p((const __u32 *)p);
+ }
+ static inline __be16 __cpu_to_be16p(const __u16 *p)
+ {
+- return (__force __be16)__swab16p(p);
++ return (__force const __be16)__swab16p(p);
+ }
+ static inline __u16 __be16_to_cpup(const __be16 *p)
+ {
+- return __swab16p((__u16 *)p);
++ return __swab16p((const __u16 *)p);
+ }
+ #define __cpu_to_le64s(x) do { (void)(x); } while (0)
+ #define __le64_to_cpus(x) do { (void)(x); } while (0)
+diff --git a/include/linux/cache.h b/include/linux/cache.h
+index 4c57065..40346da 100644
+--- a/include/linux/cache.h
++++ b/include/linux/cache.h
+@@ -16,6 +16,14 @@
+ #define __read_mostly
+ #endif
+
++#ifndef __read_only
++#ifdef CONFIG_PAX_KERNEXEC
++#error KERNEXEC requires __read_only
++#else
++#define __read_only __read_mostly
++#endif
++#endif
++
+ #ifndef ____cacheline_aligned
+ #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+ #endif
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index a63d13d..f15d415 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -548,10 +548,16 @@ extern bool capable(int cap);
+ extern bool ns_capable(struct user_namespace *ns, int cap);
+ extern bool task_ns_capable(struct task_struct *t, int cap);
+ extern bool nsown_capable(int cap);
++extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
++extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
++extern bool capable_nolog(int cap);
+
+ /* audit system wants to get cap info from files as well */
+ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+
++extern int is_privileged_binary(const struct dentry *dentry);
++extern int is_root_privileged_binary(const struct dentry *dentry);
++
+ #endif /* __KERNEL__ */
+
+ #endif /* !_LINUX_CAPABILITY_H */
+diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
+index 35eae4b..5673e99 100644
+--- a/include/linux/cdrom.h
++++ b/include/linux/cdrom.h
+@@ -985,7 +985,6 @@ struct cdrom_device_ops {
+
+ /* driver specifications */
+ const int capability; /* capability flags */
+- int n_minors; /* number of active minor devices */
+ /* handle uniform packets for scsi type devices (scsi,atapi) */
+ int (*generic_packet) (struct cdrom_device_info *,
+ struct packet_command *);
+diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
+index 04ffb2e..6799180 100644
+--- a/include/linux/cleancache.h
++++ b/include/linux/cleancache.h
+@@ -31,7 +31,7 @@ struct cleancache_ops {
+ void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
+ void (*flush_inode)(int, struct cleancache_filekey);
+ void (*flush_fs)(int);
+-};
++} __no_const;
+
+ extern struct cleancache_ops
+ cleancache_register_ops(struct cleancache_ops *ops);
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index d42bd48..554dcd5 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -240,7 +240,7 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
+ int version, void __user *uptr);
+ long compat_sys_msgctl(int first, int second, void __user *uptr);
+ long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
+- void __user *uptr);
++ void __user *uptr) __intentional_overflow(0);
+ long compat_sys_shmctl(int first, int second, void __user *uptr);
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+ unsigned nsems, const struct compat_timespec __user *timeout);
+@@ -334,7 +334,7 @@ extern int compat_ptrace_request(struct task_struct *child,
+ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data);
+ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+- compat_long_t addr, compat_long_t data);
++ compat_ulong_t addr, compat_ulong_t data);
+
+ /*
+ * epoll (fs/eventpoll.c) compat bits follow ...
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index 643d6c4..3e46a17 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -46,6 +46,21 @@
+ #endif
+
+ #if __GNUC_MINOR__ >= 5
++
++#ifdef CONSTIFY_PLUGIN
++#define __no_const __attribute__((no_const))
++#define __do_const __attribute__((do_const))
++#endif
++
++#ifdef SIZE_OVERFLOW_PLUGIN
++#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
++#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
++#endif
++
++#ifdef LATENT_ENTROPY_PLUGIN
++#define __latent_entropy __attribute__((latent_entropy))
++#endif
++
+ /*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+@@ -61,6 +76,11 @@
+ #define __noclone __attribute__((__noclone__))
+
+ #endif
++
++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
++#define __bos0(ptr) __bos((ptr), 0)
++#define __bos1(ptr) __bos((ptr), 1)
+ #endif
+
+ #if __GNUC_MINOR__ > 0
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 320d6c9..2d1df6b 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -5,31 +5,51 @@
+
+ #ifdef __CHECKER__
+ # define __user __attribute__((noderef, address_space(1)))
++# define __force_user __force __user
+ # define __kernel __attribute__((address_space(0)))
++# define __force_kernel __force __kernel
+ # define __safe __attribute__((safe))
+ # define __force __attribute__((force))
+ # define __nocast __attribute__((nocast))
+ # define __iomem __attribute__((noderef, address_space(2)))
++# define __force_iomem __force __iomem
+ # define __acquires(x) __attribute__((context(x,0,1)))
+ # define __releases(x) __attribute__((context(x,1,0)))
+ # define __acquire(x) __context__(x,1)
+ # define __release(x) __context__(x,-1)
+ # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+ # define __percpu __attribute__((noderef, address_space(3)))
++# define __force_percpu __force __percpu
+ #ifdef CONFIG_SPARSE_RCU_POINTER
+ # define __rcu __attribute__((noderef, address_space(4)))
++# define __force_rcu __force __rcu
+ #else
+ # define __rcu
++# define __force_rcu
+ #endif
+ extern void __chk_user_ptr(const volatile void __user *);
+ extern void __chk_io_ptr(const volatile void __iomem *);
+ #else
+-# define __user
+-# define __kernel
++# ifdef CHECKER_PLUGIN
++//# define __user
++//# define __force_user
++//# define __kernel
++//# define __force_kernel
++# else
++# ifdef STRUCTLEAK_PLUGIN
++# define __user __attribute__((user))
++# else
++# define __user
++# endif
++# define __force_user
++# define __kernel
++# define __force_kernel
++# endif
+ # define __safe
+ # define __force
+ # define __nocast
+ # define __iomem
++# define __force_iomem
+ # define __chk_user_ptr(x) (void)0
+ # define __chk_io_ptr(x) (void)0
+ # define __builtin_warning(x, y...) (1)
+@@ -39,7 +59,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
+ # define __release(x) (void)0
+ # define __cond_lock(x,c) (c)
+ # define __percpu
++# define __force_percpu
+ # define __rcu
++# define __force_rcu
+ #endif
+
+ #ifdef __KERNEL__
+@@ -264,6 +286,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ # define __attribute_const__ /* unimplemented */
+ #endif
+
++#ifndef __no_const
++# define __no_const
++#endif
++
++#ifndef __do_const
++# define __do_const
++#endif
++
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
++#ifndef __latent_entropy
++# define __latent_entropy
++#endif
++
+ /*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+@@ -273,6 +311,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ #define __cold
+ #endif
+
++#ifndef __alloc_size
++#define __alloc_size(...)
++#endif
++
++#ifndef __bos
++#define __bos(ptr, arg)
++#endif
++
++#ifndef __bos0
++#define __bos0(ptr)
++#endif
++
++#ifndef __bos1
++#define __bos1(ptr)
++#endif
++
+ /* Simple shorthand for a section definition */
+ #ifndef __section
+ # define __section(S) __attribute__ ((__section__(#S)))
+@@ -292,6 +346,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ #endif
+ #ifndef __compiletime_error
+ # define __compiletime_error(message)
++# define __compiletime_error_fallback(condition) \
++ do { ((void)sizeof(char[1 - 2*!!(condition)])); } while (0)
++#else
++# define __compiletime_error_fallback(condition) do { } while (0)
++#endif
++
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
++#ifndef __intentional_overflow
++# define __intentional_overflow(...)
+ #endif
+
+ /*
+@@ -306,6 +372,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
++#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
++#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
+
+ #endif /* __LINUX_COMPILER_H */
+diff --git a/include/linux/completion.h b/include/linux/completion.h
+index 51494e6..340575ab 100644
+--- a/include/linux/completion.h
++++ b/include/linux/completion.h
+@@ -77,14 +77,14 @@ static inline void init_completion(struct completion *x)
+ }
+
+ extern void wait_for_completion(struct completion *);
+-extern int wait_for_completion_interruptible(struct completion *x);
+-extern int wait_for_completion_killable(struct completion *x);
++extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
++extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
+ extern unsigned long wait_for_completion_timeout(struct completion *x,
+- unsigned long timeout);
++ unsigned long timeout) __intentional_overflow(-1);
+ extern long wait_for_completion_interruptible_timeout(
+- struct completion *x, unsigned long timeout);
++ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
+ extern long wait_for_completion_killable_timeout(
+- struct completion *x, unsigned long timeout);
++ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
+ extern bool try_wait_for_completion(struct completion *x);
+ extern bool completion_done(struct completion *x);
+
+diff --git a/include/linux/configfs.h b/include/linux/configfs.h
+index 3081c58..7714c00 100644
+--- a/include/linux/configfs.h
++++ b/include/linux/configfs.h
+@@ -125,7 +125,7 @@ struct configfs_attribute {
+ const char *ca_name;
+ struct module *ca_owner;
+ mode_t ca_mode;
+-};
++} __do_const;
+
+ /*
+ * Users often need to create attribute structures for their configurable
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 9c3e071..8a8ebea 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -108,7 +108,7 @@ enum {
+ /* Need to know about CPUs going up/down? */
+ #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
+ #define cpu_notifier(fn, pri) { \
+- static struct notifier_block fn##_nb __cpuinitdata = \
++ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = pri }; \
+ register_cpu_notifier(&fn##_nb); \
+ }
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 6216115..68bf1d0 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -236,7 +236,7 @@ struct cpufreq_driver {
+ int (*suspend) (struct cpufreq_policy *policy);
+ int (*resume) (struct cpufreq_policy *policy);
+ struct freq_attr **attr;
+-};
++} __do_const;
+
+ /* flags */
+
+@@ -295,6 +295,7 @@ struct global_attr {
+ ssize_t (*store)(struct kobject *a, struct attribute *b,
+ const char *c, size_t count);
+ };
++typedef struct global_attr __no_const global_attr_no_const;
+
+ #define define_one_global_ro(_name) \
+ static struct global_attr _name = \
+diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
+index 7408af8..8d6f9dd 100644
+--- a/include/linux/cpuidle.h
++++ b/include/linux/cpuidle.h
+@@ -49,7 +49,8 @@ struct cpuidle_state {
+ int (*enter) (struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
+-};
++} __do_const;
++typedef struct cpuidle_state __no_const cpuidle_state_no_const;
+
+ /* Idle State Flags */
+ #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
+@@ -181,7 +182,7 @@ struct cpuidle_governor {
+ void (*reflect) (struct cpuidle_device *dev, int index);
+
+ struct module *owner;
+-};
++} __do_const;
+
+ #ifdef CONFIG_CPU_IDLE
+
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index 4f7a632..b9e6f95 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -117,17 +117,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
+ }
+
+ /* Valid inputs for n are -1 and 0. */
+-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
+ {
+ return n+1;
+ }
+
+-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
+ {
+ return n+1;
+ }
+
+-static inline unsigned int cpumask_next_and(int n,
++static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
+ const struct cpumask *srcp,
+ const struct cpumask *andp)
+ {
+@@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
+ *
+ * Returns >= nr_cpu_ids if no further cpus set.
+ */
+-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
+ {
+ /* -1 is a legal arg here. */
+ if (n != -1)
+@@ -181,7 +181,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+ *
+ * Returns >= nr_cpu_ids if no further cpus unset.
+ */
+-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
++static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
+ {
+ /* -1 is a legal arg here. */
+ if (n != -1)
+@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+ return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ }
+
+-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
++int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
+ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+
+ /**
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 4030896..8d6f342 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+ static inline void validate_process_creds(void)
+ {
+ }
++static inline void validate_task_creds(struct task_struct *task)
++{
++}
+ #endif
+
+ /**
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index 8a94217..15d49e3 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -365,7 +365,7 @@ struct cipher_tfm {
+ const u8 *key, unsigned int keylen);
+ void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+-};
++} __no_const;
+
+ struct hash_tfm {
+ int (*init)(struct hash_desc *desc);
+@@ -386,13 +386,13 @@ struct compress_tfm {
+ int (*cot_decompress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+-};
++} __no_const;
+
+ struct rng_tfm {
+ int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen);
+ int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+-};
++} __no_const;
+
+ #define crt_ablkcipher crt_u.ablkcipher
+ #define crt_aead crt_u.aead
+diff --git a/include/linux/ctype.h b/include/linux/ctype.h
+index 8acfe31..6ffccd63 100644
+--- a/include/linux/ctype.h
++++ b/include/linux/ctype.h
+@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
+ * Fast implementation of tolower() for internal usage. Do not use in your
+ * code.
+ */
+-static inline char _tolower(const char c)
++static inline unsigned char _tolower(const unsigned char c)
+ {
+ return c | 0x20;
+ }
+diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
+index 7925bf0..d5143d2 100644
+--- a/include/linux/decompress/mm.h
++++ b/include/linux/decompress/mm.h
+@@ -77,7 +77,7 @@ static void free(void *where)
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+
+ #define large_malloc(a) vmalloc(a)
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index 98ce812..9afa76d 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -100,7 +100,7 @@ struct devfreq_governor {
+ int (*init)(struct devfreq *this);
+ void (*exit)(struct devfreq *this);
+ const bool no_central_polling;
+-};
++} __do_const;
+
+ /**
+ * struct devfreq - Device devfreq structure
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 3136ede..9a589c5 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -427,7 +427,7 @@ struct device_type {
+ void (*release)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+-};
++} __do_const;
+
+ /* interface for exporting device attributes */
+ struct device_attribute {
+@@ -437,6 +437,7 @@ struct device_attribute {
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ };
++typedef struct device_attribute __no_const device_attribute_no_const;
+
+ #define DEVICE_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index e13117c..e9fc938 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -46,7 +46,7 @@ struct dma_map_ops {
+ u64 (*get_required_mask)(struct device *dev);
+ #endif
+ int is_phys;
+-};
++} __do_const;
+
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
+index 75f53f8..5c7972d 100644
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -881,9 +881,9 @@ struct dma_pinned_list {
+ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
+ void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
+
+-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
++dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
+ struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
+-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
++dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
+ struct dma_pinned_list *pinned_list, struct page *page,
+ unsigned int offset, size_t len);
+
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 88c953d..48685de 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -486,6 +486,7 @@ struct efivar_operations {
+ efi_set_variable_t *set_variable;
+ efi_query_variable_store_t *query_variable_store;
+ };
++typedef struct efivar_operations __no_const efivar_operations_no_const;
+
+ struct efivars {
+ /*
+diff --git a/include/linux/elf.h b/include/linux/elf.h
+index 31f0508..5421c01 100644
+--- a/include/linux/elf.h
++++ b/include/linux/elf.h
+@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
+ #define PT_GNU_EH_FRAME 0x6474e550
+
+ #define PT_GNU_STACK (PT_LOOS + 0x474e551)
++#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
++
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
+
+ /*
+ * Extended Numbering
+@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_ENCODING 32
+ #define OLD_DT_LOOS 0x60000000
+ #define DT_LOOS 0x6000000d
+@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
+ #define EI_OSABI 7
+ #define EI_PAD 8
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elf_note elf32_note
+ #define elf_addr_t Elf32_Off
+ #define Elf_Half Elf32_Half
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elf_note elf64_note
+ #define elf_addr_t Elf64_Off
+ #define Elf_Half Elf64_Half
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff --git a/include/linux/err.h b/include/linux/err.h
+index f2edce2..cc2082c 100644
+--- a/include/linux/err.h
++++ b/include/linux/err.h
+@@ -19,12 +19,12 @@
+
+ #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+
+-static inline void * __must_check ERR_PTR(long error)
++static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
+ {
+ return (void *) error;
+ }
+
+-static inline long __must_check PTR_ERR(const void *ptr)
++static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
+ {
+ return (long) ptr;
+ }
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 73845ce..e5678a7 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -691,7 +691,7 @@ struct fb_ops {
+ /* called at KDB enter and leave time to prepare the console */
+ int (*fb_debug_enter)(struct fb_info *info);
+ int (*fb_debug_leave)(struct fb_info *info);
+-};
++} __do_const;
+
+ #ifdef CONFIG_FB_TILEBLITTING
+ #define FB_TILE_CURSOR_NONE 0
+diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
+index 82163c4..bd0f588 100644
+--- a/include/linux/fdtable.h
++++ b/include/linux/fdtable.h
+@@ -71,7 +71,7 @@ struct file_operations;
+ struct vfsmount;
+ struct dentry;
+
+-extern int expand_files(struct files_struct *, int nr);
++extern int expand_files(struct files_struct *, unsigned int nr);
+ extern void free_fdtable_rcu(struct rcu_head *rcu);
+ extern void __init files_defer_init(void);
+
+@@ -101,7 +101,7 @@ struct files_struct *get_files_struct(struct task_struct *);
+ void put_files_struct(struct files_struct *fs);
+ void reset_files_struct(struct files_struct *);
+ int unshare_files(struct files_struct **);
+-struct files_struct *dup_fd(struct files_struct *, int *);
++struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
+
+ extern struct kmem_cache *files_cachep;
+
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 8eeb205..13d571c 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -10,6 +10,7 @@
+
+ #ifdef __KERNEL__
+ #include <linux/atomic.h>
++#include <linux/compat.h>
+ #endif
+
+ /*
+@@ -132,8 +133,19 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_COMPAT
++/*
++ * A struct sock_filter is architecture independent.
++ */
++struct compat_sock_fprog {
++ u16 len;
++ compat_uptr_t filter; /* struct sock_filter * */
++};
++#endif
++
+ struct sk_buff;
+ struct sock;
++struct bpf_jit_work;
+
+ struct sk_filter
+ {
+@@ -141,6 +153,9 @@ struct sk_filter
+ unsigned int len; /* Number of filter blocks */
+ unsigned int (*bpf_func)(const struct sk_buff *skb,
+ const struct sock_filter *filter);
++#ifdef CONFIG_BPF_JIT
++ struct bpf_jit_work *work;
++#endif
+ struct rcu_head rcu;
+ struct sock_filter insns[0];
+ };
+@@ -228,6 +243,7 @@ enum {
+ BPF_S_ANC_HATYPE,
+ BPF_S_ANC_RXHASH,
+ BPF_S_ANC_CPU,
++ BPF_S_ANC_SECCOMP_LD_W,
+ };
+
+ #endif /* __KERNEL__ */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index dd74385..c745e49 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1624,7 +1624,8 @@ struct file_operations {
+ int (*setlease)(struct file *, long, struct file_lock **);
+ long (*fallocate)(struct file *file, int mode, loff_t offset,
+ loff_t len);
+-};
++} __do_const;
++typedef struct file_operations __no_const file_operations_no_const;
+
+ struct inode_operations {
+ struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+@@ -1891,6 +1892,8 @@ struct file_system_type {
+ struct lock_class_key i_mutex_dir_key;
+ };
+
++#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
++
+ extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
+ void *data, int (*fill_super)(struct super_block *, void *, int));
+ extern struct dentry *mount_bdev(struct file_system_type *fs_type,
+@@ -2722,5 +2725,15 @@ static inline void inode_has_no_xattr(struct inode *inode)
+ inode->i_flags |= S_NOSEC;
+ }
+
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
++ umode_t mode = inode->i_mode;
++ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++#else
++ return false;
++#endif
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_FS_H */
+diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
+index 003dc0f..3c4ea97 100644
+--- a/include/linux/fs_struct.h
++++ b/include/linux/fs_struct.h
+@@ -6,7 +6,7 @@
+ #include <linux/seqlock.h>
+
+ struct fs_struct {
+- int users;
++ atomic_t users;
+ spinlock_t lock;
+ seqcount_t seq;
+ int umask;
+diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
+index ce31408..b1ad003 100644
+--- a/include/linux/fscache-cache.h
++++ b/include/linux/fscache-cache.h
+@@ -102,7 +102,7 @@ struct fscache_operation {
+ fscache_operation_release_t release;
+ };
+
+-extern atomic_t fscache_op_debug_id;
++extern atomic_unchecked_t fscache_op_debug_id;
+ extern void fscache_op_work_func(struct work_struct *work);
+
+ extern void fscache_enqueue_operation(struct fscache_operation *);
+@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
+ {
+ INIT_WORK(&op->work, fscache_op_work_func);
+ atomic_set(&op->usage, 1);
+- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
++ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
+ op->processor = processor;
+ op->release = release;
+ INIT_LIST_HEAD(&op->pend_link);
+diff --git a/include/linux/fscache.h b/include/linux/fscache.h
+index 9ec20de..8007b8a 100644
+--- a/include/linux/fscache.h
++++ b/include/linux/fscache.h
+@@ -152,7 +152,7 @@ struct fscache_cookie_def {
+ * - this is mandatory for any object that may have data
+ */
+ void (*now_uncached)(void *cookie_netfs_data);
+-};
++} __do_const;
+
+ /*
+ * fscache cached network filesystem type
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 2a53f10..e3cd3e0 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -194,6 +194,9 @@ static inline void fsnotify_access(struct file *file)
+ struct inode *inode = path->dentry->d_inode;
+ __u32 mask = FS_ACCESS;
+
++ if (is_sidechannel_device(inode))
++ return;
++
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+@@ -212,6 +215,9 @@ static inline void fsnotify_modify(struct file *file)
+ struct inode *inode = path->dentry->d_inode;
+ __u32 mask = FS_MODIFY;
+
++ if (is_sidechannel_device(inode))
++ return;
++
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+@@ -314,7 +320,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+ */
+ static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
+ {
+- return kstrdup(name, GFP_KERNEL);
++ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
+ }
+
+ /*
+diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
+index 82924bf..1aa58e7 100644
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -256,7 +256,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+ extern int trace_add_event_call(struct ftrace_event_call *call);
+ extern void trace_remove_event_call(struct ftrace_event_call *call);
+
+-#define is_signed_type(type) (((type)(-1)) < 0)
++#define is_signed_type(type) (((type)(-1)) < (type)1)
+
+ int trace_set_clr_event(const char *system, const char *event, int set);
+
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 4eec461..4ff5db5 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -185,7 +185,7 @@ struct gendisk {
+ struct kobject *slave_dir;
+
+ struct timer_rand_state *random;
+- atomic_t sync_io; /* RAID */
++ atomic_unchecked_t sync_io; /* RAID */
+ struct disk_events *ev;
+ #ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *integrity;
+@@ -420,7 +420,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
+ extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
+
+ /* drivers/char/random.c */
+-extern void add_disk_randomness(struct gendisk *disk);
++extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+ extern void rand_initialize_disk(struct gendisk *disk);
+
+ static inline sector_t get_start_sect(struct block_device *bdev)
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 3a76faf..c0592c7 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -37,6 +37,12 @@ struct vm_area_struct;
+ #define ___GFP_NO_KSWAPD 0x400000u
+ #define ___GFP_OTHER_NODE 0x800000u
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++#define ___GFP_USERCOPY 0x1000000u
++#else
++#define ___GFP_USERCOPY 0
++#endif
++
+ /*
+ * GFP bitmasks..
+ *
+@@ -85,6 +91,7 @@ struct vm_area_struct;
+
+ #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
+ #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
++#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
+
+ /*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+@@ -92,7 +99,7 @@ struct vm_area_struct;
+ */
+ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+-#define __GFP_BITS_SHIFT 24 /* Room for N __GFP_FOO bits */
++#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+ /* This equals 0, but use constants in case they ever change */
+@@ -146,6 +153,8 @@ struct vm_area_struct;
+ /* 4GB DMA on some platforms */
+ #define GFP_DMA32 __GFP_DMA32
+
++#define GFP_USERCOPY __GFP_USERCOPY
++
+ /* Convert GFP flags to their corresponding migrate type */
+ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+ {
+diff --git a/include/linux/gracl.h b/include/linux/gracl.h
+new file mode 100644
+index 0000000..edb2cb6
+--- /dev/null
++++ b/include/linux/gracl.h
+@@ -0,0 +1,340 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/capability.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* Major status information */
++
++#define GR_VERSION "grsecurity 3.0"
++#define GRSECURITY_VERSION 0x3000
++
++enum {
++ GR_SHUTDOWN = 0,
++ GR_ENABLE = 1,
++ GR_SPROLE = 2,
++ GR_OLDRELOAD = 3,
++ GR_SEGVMOD = 4,
++ GR_STATUS = 5,
++ GR_UNSPROLE = 6,
++ GR_PASSSET = 7,
++ GR_SPROLEPAM = 8,
++ GR_RELOAD = 9,
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++ GR_PW_LEN = 128,
++ GR_SALT_LEN = 16,
++ GR_SHA_LEN = 32,
++};
++
++enum {
++ GR_SPROLE_LEN = 64,
++};
++
++enum {
++ GR_NO_GLOB = 0,
++ GR_REG_GLOB,
++ GR_CREATE_GLOB
++};
++
++#define GR_NLIMITS 32
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++ unsigned char *rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++ __u32 key;
++ ino_t inode;
++ dev_t device;
++ char *name;
++ __u16 len;
++ __u8 deleted;
++ struct name_entry *prev;
++ struct name_entry *next;
++};
++
++struct inodev_entry {
++ struct name_entry *nentry;
++ struct inodev_entry *prev;
++ struct inodev_entry *next;
++};
++
++struct acl_role_db {
++ struct acl_role_label **r_hash;
++ __u32 r_size;
++};
++
++struct inodev_db {
++ struct inodev_entry **i_hash;
++ __u32 i_size;
++};
++
++struct name_db {
++ struct name_entry **n_hash;
++ __u32 n_size;
++};
++
++struct crash_uid {
++ uid_t uid;
++ unsigned long expires;
++};
++
++struct gr_hash_struct {
++ void **table;
++ void **nametable;
++ void *first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++
++struct acl_subject_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++ kernel_cap_t cap_mask;
++ kernel_cap_t cap_lower;
++ kernel_cap_t cap_invert_audit;
++
++ struct rlimit res[GR_NLIMITS];
++ __u32 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ uid_t *user_transitions;
++ gid_t *group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 sock_families[2];
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ struct acl_ip_label **ips;
++ __u32 ip_num;
++ __u32 inaddr_any_override;
++
++ __u32 crashes;
++ unsigned long expires;
++
++ struct acl_subject_label *parent_subject;
++ struct gr_hash_struct *hash;
++ struct acl_subject_label *prev;
++ struct acl_subject_label *next;
++
++ struct acl_object_label **obj_hash;
++ __u32 obj_hash_size;
++ __u16 pax_flags;
++};
++
++struct role_allowed_ip {
++ __u32 addr;
++ __u32 netmask;
++
++ struct role_allowed_ip *prev;
++ struct role_allowed_ip *next;
++};
++
++struct role_transition {
++ char *rolename;
++
++ struct role_transition *prev;
++ struct role_transition *next;
++};
++
++struct acl_role_label {
++ char *rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ unsigned long expires;
++
++ struct acl_subject_label *root_label;
++ struct gr_hash_struct *hash;
++
++ struct acl_role_label *prev;
++ struct acl_role_label *next;
++
++ struct role_transition *transitions;
++ struct role_allowed_ip *allowed_ips;
++ uid_t *domain_children;
++ __u16 domain_child_num;
++
++ umode_t umask;
++
++ struct acl_subject_label **subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++ struct acl_role_label **r_table;
++ __u32 num_pointers; /* Number of allocations to track */
++ __u32 num_roles; /* Number of roles */
++ __u32 num_domain_children; /* Number of domain children */
++ __u32 num_subjects; /* Number of subjects */
++ __u32 num_objects; /* Number of objects */
++};
++
++struct acl_object_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++
++ struct acl_subject_label *nested;
++ struct acl_object_label *globbed;
++
++ /* next two structures not used */
++
++ struct acl_object_label *prev;
++ struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++ char *iface;
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ /* next two structures not used */
++
++ struct acl_ip_label *prev;
++ struct acl_ip_label *next;
++};
++
++struct gr_arg {
++ struct user_acl_role_db role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ struct sprole_pw *sprole_pws;
++ dev_t segv_device;
++ ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper {
++ struct gr_arg *arg;
++ __u32 version;
++ __u32 size;
++};
++
++struct subject_map {
++ struct acl_subject_label *user;
++ struct acl_subject_label *kernel;
++ struct subject_map *prev;
++ struct subject_map *next;
++};
++
++struct acl_subj_map_db {
++ struct subject_map **s_hash;
++ __u32 s_size;
++};
++
++struct gr_policy_state {
++ struct sprole_pw **acl_special_roles;
++ __u16 num_sprole_pws;
++ struct acl_role_label *kernel_role;
++ struct acl_role_label *role_list;
++ struct acl_role_label *default_role;
++ struct acl_role_db acl_role_set;
++ struct acl_subj_map_db subj_map_set;
++ struct name_db name_set;
++ struct inodev_db inodev_set;
++};
++
++struct gr_alloc_state {
++ unsigned long alloc_stack_next;
++ unsigned long alloc_stack_size;
++ void **alloc_stack;
++};
++
++struct gr_reload_state {
++ struct gr_policy_state oldpolicy;
++ struct gr_alloc_state oldalloc;
++ struct gr_policy_state newpolicy;
++ struct gr_alloc_state newalloc;
++ struct gr_policy_state *oldpolicy_ptr;
++ struct gr_alloc_state *oldalloc_ptr;
++ unsigned char oldmode;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++ Makes good use of the low bits of the inode. Generally 0-1 times
++ in loop for successful match. 0-3 for unsuccessful match.
++ Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned int
++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++{
++ return ((((uid + type) << (16 + type)) ^ uid) % sz);
++}
++
++ static __inline__ unsigned int
++gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
++{
++ return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned int
++gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
++{
++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
++}
++
++static __inline__ unsigned int
++gr_nhash(const char *name, const __u16 len, const unsigned int sz)
++{
++ return full_name_hash((const unsigned char *)name, len) % sz;
++}
++
++#define FOR_EACH_SUBJECT_START(role,subj,iter) \
++ subj = NULL; \
++ iter = 0; \
++ while (iter < role->subj_hash_size) { \
++ if (subj == NULL) \
++ subj = role->subj_hash[iter]; \
++ if (subj == NULL) { \
++ iter++; \
++ continue; \
++ }
++
++#define FOR_EACH_SUBJECT_END(subj,iter) \
++ subj = subj->next; \
++ if (subj == NULL) \
++ iter++; \
++ }
++
++
++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
++ subj = role->hash->first; \
++ while (subj != NULL) {
++
++#define FOR_EACH_NESTED_SUBJECT_END(subj) \
++ subj = subj->next; \
++ }
++
++#endif
++
+diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
+new file mode 100644
+index 0000000..33ebd1f
+--- /dev/null
++++ b/include/linux/gracl_compat.h
+@@ -0,0 +1,156 @@
++#ifndef GR_ACL_COMPAT_H
++#define GR_ACL_COMPAT_H
++
++#include <linux/resource.h>
++#include <asm/resource.h>
++
++struct sprole_pw_compat {
++ compat_uptr_t rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++};
++
++struct gr_hash_struct_compat {
++ compat_uptr_t table;
++ compat_uptr_t nametable;
++ compat_uptr_t first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++struct acl_subject_label_compat {
++ compat_uptr_t filename;
++ compat_ino_t inode;
++ __u32 device;
++ __u32 mode;
++ kernel_cap_t cap_mask;
++ kernel_cap_t cap_lower;
++ kernel_cap_t cap_invert_audit;
++
++ struct compat_rlimit res[GR_NLIMITS];
++ __u32 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ compat_uptr_t user_transitions;
++ compat_uptr_t group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 sock_families[2];
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ compat_uptr_t ips;
++ __u32 ip_num;
++ __u32 inaddr_any_override;
++
++ __u32 crashes;
++ compat_ulong_t expires;
++
++ compat_uptr_t parent_subject;
++ compat_uptr_t hash;
++ compat_uptr_t prev;
++ compat_uptr_t next;
++
++ compat_uptr_t obj_hash;
++ __u32 obj_hash_size;
++ __u16 pax_flags;
++};
++
++struct role_allowed_ip_compat {
++ __u32 addr;
++ __u32 netmask;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct role_transition_compat {
++ compat_uptr_t rolename;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct acl_role_label_compat {
++ compat_uptr_t rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ compat_ulong_t expires;
++
++ compat_uptr_t root_label;
++ compat_uptr_t hash;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++
++ compat_uptr_t transitions;
++ compat_uptr_t allowed_ips;
++ compat_uptr_t domain_children;
++ __u16 domain_child_num;
++
++ umode_t umask;
++
++ compat_uptr_t subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db_compat {
++ compat_uptr_t r_table;
++ __u32 num_pointers;
++ __u32 num_roles;
++ __u32 num_domain_children;
++ __u32 num_subjects;
++ __u32 num_objects;
++};
++
++struct acl_object_label_compat {
++ compat_uptr_t filename;
++ compat_ino_t inode;
++ __u32 device;
++ __u32 mode;
++
++ compat_uptr_t nested;
++ compat_uptr_t globbed;
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct acl_ip_label_compat {
++ compat_uptr_t iface;
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ compat_uptr_t prev;
++ compat_uptr_t next;
++};
++
++struct gr_arg_compat {
++ struct user_acl_role_db_compat role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ compat_uptr_t sprole_pws;
++ __u32 segv_device;
++ compat_ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper_compat {
++ compat_uptr_t arg;
++ __u32 version;
++ __u32 size;
++};
++
++#endif
+diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
+new file mode 100644
+index 0000000..323ecf2
+--- /dev/null
++++ b/include/linux/gralloc.h
+@@ -0,0 +1,9 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++void *acl_alloc_num(unsigned long num, unsigned long len);
++
++#endif
+diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
+new file mode 100644
+index 0000000..be66033
+--- /dev/null
++++ b/include/linux/grdefs.h
+@@ -0,0 +1,140 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++ GR_READY = 0x01,
++ GR_STATUS_INIT = 0x00 // disabled state
++};
++
++/* Begin ACL declarations */
++
++/* Role flags */
++
++enum {
++ GR_ROLE_USER = 0x0001,
++ GR_ROLE_GROUP = 0x0002,
++ GR_ROLE_DEFAULT = 0x0004,
++ GR_ROLE_SPECIAL = 0x0008,
++ GR_ROLE_AUTH = 0x0010,
++ GR_ROLE_NOPW = 0x0020,
++ GR_ROLE_GOD = 0x0040,
++ GR_ROLE_LEARN = 0x0080,
++ GR_ROLE_TPE = 0x0100,
++ GR_ROLE_DOMAIN = 0x0200,
++ GR_ROLE_PAM = 0x0400,
++ GR_ROLE_PERSIST = 0x0800
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++ GR_DELETED = 0x80000000
++};
++
++/* ACL Object-only mode flags */
++enum {
++ GR_READ = 0x00000001,
++ GR_APPEND = 0x00000002,
++ GR_WRITE = 0x00000004,
++ GR_EXEC = 0x00000008,
++ GR_FIND = 0x00000010,
++ GR_INHERIT = 0x00000020,
++ GR_SETID = 0x00000040,
++ GR_CREATE = 0x00000080,
++ GR_DELETE = 0x00000100,
++ GR_LINK = 0x00000200,
++ GR_AUDIT_READ = 0x00000400,
++ GR_AUDIT_APPEND = 0x00000800,
++ GR_AUDIT_WRITE = 0x00001000,
++ GR_AUDIT_EXEC = 0x00002000,
++ GR_AUDIT_FIND = 0x00004000,
++ GR_AUDIT_INHERIT= 0x00008000,
++ GR_AUDIT_SETID = 0x00010000,
++ GR_AUDIT_CREATE = 0x00020000,
++ GR_AUDIT_DELETE = 0x00040000,
++ GR_AUDIT_LINK = 0x00080000,
++ GR_PTRACERD = 0x00100000,
++ GR_NOPTRACE = 0x00200000,
++ GR_SUPPRESS = 0x00400000,
++ GR_NOLEARN = 0x00800000,
++ GR_INIT_TRANSFER= 0x01000000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
++
++/* ACL subject-only mode flags */
++enum {
++ GR_KILL = 0x00000001,
++ GR_VIEW = 0x00000002,
++ GR_PROTECTED = 0x00000004,
++ GR_LEARN = 0x00000008,
++ GR_OVERRIDE = 0x00000010,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_DUMMY = 0x00000020,
++ GR_PROTSHM = 0x00000040,
++ GR_KILLPROC = 0x00000080,
++ GR_KILLIPPROC = 0x00000100,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_NOTROJAN = 0x00000200,
++ GR_PROTPROCFD = 0x00000400,
++ GR_PROCACCT = 0x00000800,
++ GR_RELAXPTRACE = 0x00001000,
++ //GR_NESTED = 0x00002000,
++ GR_INHERITLEARN = 0x00004000,
++ GR_PROCFIND = 0x00008000,
++ GR_POVERRIDE = 0x00010000,
++ GR_KERNELAUTH = 0x00020000,
++ GR_ATSECURE = 0x00040000,
++ GR_SHMEXEC = 0x00080000
++};
++
++enum {
++ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
++ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
++ GR_PAX_ENABLE_MPROTECT = 0x0004,
++ GR_PAX_ENABLE_RANDMMAP = 0x0008,
++ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
++ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
++ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
++ GR_PAX_DISABLE_MPROTECT = 0x0400,
++ GR_PAX_DISABLE_RANDMMAP = 0x0800,
++ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
++};
++
++enum {
++ GR_ID_USER = 0x01,
++ GR_ID_GROUP = 0x02,
++};
++
++enum {
++ GR_ID_ALLOW = 0x01,
++ GR_ID_DENY = 0x02,
++};
++
++#define GR_CRASH_RES 31
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++ GR_RLIM_CPU_BUMP = 60,
++ GR_RLIM_FSIZE_BUMP = 50000,
++ GR_RLIM_DATA_BUMP = 10000,
++ GR_RLIM_STACK_BUMP = 1000,
++ GR_RLIM_CORE_BUMP = 10000,
++ GR_RLIM_RSS_BUMP = 500000,
++ GR_RLIM_NPROC_BUMP = 1,
++ GR_RLIM_NOFILE_BUMP = 5,
++ GR_RLIM_MEMLOCK_BUMP = 50000,
++ GR_RLIM_AS_BUMP = 500000,
++ GR_RLIM_LOCKS_BUMP = 2,
++ GR_RLIM_SIGPENDING_BUMP = 5,
++ GR_RLIM_MSGQUEUE_BUMP = 10000,
++ GR_RLIM_NICE_BUMP = 1,
++ GR_RLIM_RTPRIO_BUMP = 1,
++ GR_RLIM_RTTIME_BUMP = 1000000
++};
++
++#endif
+diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
+new file mode 100644
+index 0000000..44d8215
+--- /dev/null
++++ b/include/linux/grinternal.h
+@@ -0,0 +1,236 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/mnt_namespace.h>
++#include <linux/nsproxy.h>
++#include <linux/gracl.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++void gr_add_learn_entry(const char *fmt, ...)
++ __attribute__ ((format (printf, 1, 2)));
++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++ const struct vfsmount *mnt);
++__u32 gr_check_create(const struct dentry *new_dentry,
++ const struct dentry *parent,
++ const struct vfsmount *mnt, const __u32 mode);
++int gr_check_protected_task(const struct task_struct *task);
++__u32 to_gr_audit(const __u32 reqmode);
++int gr_set_acls(const int type);
++int gr_acl_is_enabled(void);
++char gr_roletype_to_char(void);
++
++void gr_handle_alertkill(struct task_struct *task);
++char *gr_to_filename(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename1(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename2(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++char *gr_to_filename3(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++
++extern int grsec_enable_ptrace_readexec;
++extern int grsec_enable_harden_ptrace;
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_shm;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_audit_ptrace;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_rofs;
++extern int grsec_deny_new_usb;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_symlinkown;
++extern int grsec_symlinkown_gid;
++extern int grsec_enable_tpe;
++extern int grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_tpe_invert;
++extern int grsec_enable_socket_all;
++extern int grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern int grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern int grsec_socket_server_gid;
++extern int grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_log_rwxmaps;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_resource_logging;
++extern int grsec_enable_blackhole;
++extern int grsec_lastack_retries;
++extern int grsec_enable_brute;
++extern int grsec_lock;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++extern rwlock_t grsec_exec_file_lock;
++
++#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
++ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
++ (tsk)->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
++ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
++
++#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
++ gr_to_filename((tsk)->exec_file->f_path.dentry, \
++ (tsk)->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
++ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
++ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
++
++#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
++
++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
++ (task)->pid, (cred)->uid, \
++ (cred)->euid, (cred)->gid, (cred)->egid, \
++ gr_parent_task_fullpath(task), \
++ (task)->real_parent->comm, (task)->real_parent->pid, \
++ (pcred)->uid, (pcred)->euid, \
++ (pcred)->gid, (pcred)->egid
++
++static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
++{
++ if (file1 && file2) {
++ const struct inode *inode1 = file1->f_path.dentry->d_inode;
++ const struct inode *inode2 = file2->f_path.dentry->d_inode;
++ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
++ return true;
++ }
++
++ return false;
++}
++
++#define GR_CHROOT_CAPS {{ \
++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
++ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
++
++#define security_learn(normal_msg,args...) \
++({ \
++ read_lock(&grsec_exec_file_lock); \
++ gr_add_learn_entry(normal_msg "\n", ## args); \
++ read_unlock(&grsec_exec_file_lock); \
++})
++
++enum {
++ GR_DO_AUDIT,
++ GR_DONT_AUDIT,
++ /* used for non-audit messages that we shouldn't kill the task on */
++ GR_DONT_AUDIT_GOOD
++};
++
++enum {
++ GR_TTYSNIFF,
++ GR_RBAC,
++ GR_RBAC_STR,
++ GR_STR_RBAC,
++ GR_RBAC_MODE2,
++ GR_RBAC_MODE3,
++ GR_FILENAME,
++ GR_SYSCTL_HIDDEN,
++ GR_NOARGS,
++ GR_ONE_INT,
++ GR_ONE_INT_TWO_STR,
++ GR_ONE_STR,
++ GR_STR_INT,
++ GR_TWO_STR_INT,
++ GR_TWO_INT,
++ GR_TWO_U64,
++ GR_THREE_INT,
++ GR_FIVE_INT_TWO_STR,
++ GR_TWO_STR,
++ GR_THREE_STR,
++ GR_FOUR_STR,
++ GR_STR_FILENAME,
++ GR_FILENAME_STR,
++ GR_FILENAME_TWO_INT,
++ GR_FILENAME_TWO_INT_STR,
++ GR_TEXTREL,
++ GR_PTRACE,
++ GR_RESOURCE,
++ GR_CAP,
++ GR_SIG,
++ GR_SIG2,
++ GR_CRASH1,
++ GR_CRASH2,
++ GR_PSACCT,
++ GR_RWXMAP,
++ GR_RWXMAPVMA
++};
++
++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
++#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
++#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
++#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
++#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
++
++void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
++
++#endif
++
++#endif
+diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
+new file mode 100644
+index 0000000..1357a24
+--- /dev/null
++++ b/include/linux/grmsg.h
+@@ -0,0 +1,114 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
++#define GR_STOPMOD_MSG "denied modification of module state by "
++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
++#define GR_IOPERM_MSG "denied use of ioperm() by "
++#define GR_IOPL_MSG "denied use of iopl() by "
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
++#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
++#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
++#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
++#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
++#define GR_INITF_ACL_MSG "init_variables() failed %s by "
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
++#define GR_SHUTS_ACL_MSG "shutdown auth success for "
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
++#define GR_ENABLEF_ACL_MSG "unable to load %s for "
++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
++#define GR_RELOADF_ACL_MSG "failed reload of %s for "
++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
++#define GR_SPROLEF_ACL_MSG "special role %s failure for "
++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
++#define GR_INVMODE_ACL_MSG "invalid mode %d by "
++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
++#define GR_FAILFORK_MSG "failed fork with errno %s by "
++#define GR_NICE_CHROOT_MSG "denied priority change by "
++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
++#define GR_TIME_MSG "time set by "
++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
++#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
++#define GR_BIND_MSG "denied bind() by "
++#define GR_CONNECT_MSG "denied connect() by "
++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
++#define GR_CAP_ACL_MSG "use of %s denied for "
++#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
++#define GR_CAP_ACL_MSG2 "use of %s permitted for "
++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
++#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
++#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
++#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
++#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
++#define GR_VM86_MSG "denied use of vm86 by "
++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
++#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
++#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
++#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
++#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
++#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
+diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
+new file mode 100644
+index 0000000..8996115
+--- /dev/null
++++ b/include/linux/grsecurity.h
+@@ -0,0 +1,224 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/binfmts.h>
++#include <linux/gracl.h>
++
++/* notify of brain-dead configs */
++#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
++#endif
++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
++#endif
++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
++#endif
++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
++#error "CONFIG_PAX enabled, but no PaX options are enabled."
++#endif
++
++int gr_handle_new_usb(void);
++
++void gr_handle_brute_attach(unsigned long mm_flags);
++void gr_handle_brute_check(void);
++void gr_handle_kernel_exploit(void);
++
++char gr_roletype_to_char(void);
++
++int gr_acl_enable_at_secure(void);
++
++int gr_check_user_change(int real, int effective, int fs);
++int gr_check_group_change(int real, int effective, int fs);
++
++void gr_del_task_from_ip_table(struct task_struct *p);
++
++int gr_pid_is_chrooted(struct task_struct *p);
++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
++int gr_handle_chroot_nice(void);
++int gr_handle_chroot_sysctl(const int op);
++int gr_handle_chroot_setpriority(struct task_struct *p,
++ const int niceval);
++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++int gr_handle_chroot_chroot(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_chroot_chdir(struct path *path);
++int gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++int gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const char *dev_name);
++int gr_handle_chroot_pivot(void);
++int gr_handle_chroot_unix(const pid_t pid);
++
++int gr_handle_rawio(const struct inode *inode);
++
++void gr_handle_ioperm(void);
++void gr_handle_iopl(void);
++
++umode_t gr_acl_umask(void);
++
++int gr_tpe_allow(const struct file *file);
++
++void gr_set_chroot_entries(struct task_struct *task, struct path *path);
++void gr_clear_chroot_entries(struct task_struct *task);
++
++void gr_log_forkfail(const int retval);
++void gr_log_timechange(void);
++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
++void gr_log_chdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_log_chroot_exec(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_log_remount(const char *devname, const int retval);
++void gr_log_unmount(const char *devname, const int retval);
++void gr_log_mount(const char *from, const char *to, const int retval);
++void gr_log_textrel(struct vm_area_struct *vma);
++void gr_log_ptgnustack(struct file *file);
++void gr_log_rwxmmap(struct file *file);
++void gr_log_rwxmprotect(struct vm_area_struct *vma);
++
++int gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_fifo(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag,
++ const int acc_mode);
++int gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode,
++ const int mode, const char *to);
++
++int gr_is_capable(const int cap);
++int gr_is_capable_nolog(const int cap);
++void gr_learn_resource(const struct task_struct *task, const int limit,
++ const unsigned long wanted, const int gt);
++void gr_copy_label(struct task_struct *tsk);
++void gr_handle_crash(struct task_struct *task, const int sig);
++int gr_handle_signal(const struct task_struct *p, const int sig);
++int gr_check_crash_uid(const uid_t uid);
++int gr_check_protected_task(const struct task_struct *task);
++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
++int gr_acl_handle_mmap(const struct file *file,
++ const unsigned long prot);
++int gr_acl_handle_mprotect(const struct file *file,
++ const unsigned long prot);
++int gr_check_hidden_task(const struct task_struct *tsk);
++__u32 gr_acl_handle_truncate(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_utime(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_access(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++__u32 gr_acl_handle_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, umode_t *mode);
++__u32 gr_acl_handle_chown(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_handle_ptrace(struct task_struct *task, const long request);
++int gr_handle_proc_ptrace(struct task_struct *task);
++__u32 gr_acl_handle_execve(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++int gr_check_crash_exec(const struct file *filp);
++int gr_acl_is_enabled(void);
++void gr_set_role_label(struct task_struct *task, const uid_t uid,
++ const gid_t gid);
++int gr_set_proc_label(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const int unsafe_flags);
++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_open(const struct dentry *dentry,
++ const struct vfsmount *mnt, int acc_mode);
++__u32 gr_acl_handle_creat(const struct dentry *dentry,
++ const struct dentry *p_dentry,
++ const struct vfsmount *p_mnt,
++ int open_flags, int acc_mode, const int imode);
++void gr_handle_create(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_proc_create(const struct dentry *dentry,
++ const struct inode *inode);
++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const int mode);
++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt);
++__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_handle_delete(const ino_t ino, const dev_t dev);
++__u32 gr_acl_handle_unlink(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const char *from);
++__u32 gr_acl_handle_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt, const char *to);
++int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
++int gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname);
++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace);
++__u32 gr_check_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt);
++int gr_acl_handle_filldir(const struct file *file, const char *name,
++ const unsigned int namelen, const ino_t ino);
++
++__u32 gr_acl_handle_unix(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++void gr_acl_handle_exit(void);
++void gr_acl_handle_psacct(struct task_struct *task, const long code);
++int gr_acl_handle_procpidmem(const struct task_struct *task);
++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
++void gr_audit_ptrace(struct task_struct *task);
++dev_t gr_get_dev_from_dentry(struct dentry *dentry);
++void gr_put_exec_file(struct task_struct *task);
++
++int gr_ptrace_readexec(struct file *file, int unsafe_flags);
++
++#ifdef CONFIG_GRKERNSEC
++void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
++void gr_handle_vm86(void);
++void gr_handle_mem_readwrite(u64 from, u64 to);
++
++void gr_log_badprocpid(const char *entry);
++
++extern int grsec_enable_dmesg;
++extern int grsec_disable_privio;
++
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++extern int grsec_proc_gid;
++#endif
++
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++extern int grsec_enable_chroot_findtask;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern int grsec_enable_setxid;
++#endif
++#endif
++
++#endif
+diff --git a/include/linux/grsock.h b/include/linux/grsock.h
+new file mode 100644
+index 0000000..e7ffaaf
+--- /dev/null
++++ b/include/linux/grsock.h
+@@ -0,0 +1,19 @@
++#ifndef __GRSOCK_H
++#define __GRSOCK_H
++
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++ const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_server_other(const struct sock *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(struct socket * sock,
++ struct sockaddr_in * addr);
++extern int gr_search_bind(struct socket * sock,
++ struct sockaddr_in * addr);
++extern int gr_search_listen(struct socket * sock);
++extern int gr_search_accept(struct socket * sock);
++extern int gr_search_socket(const int domain, const int type,
++ const int protocol);
++
++#endif
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 52e9620..26c34b1 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -192,6 +192,18 @@ static inline void clear_highpage(struct page *page)
+ kunmap_atomic(kaddr, KM_USER0);
+ }
+
++static inline void sanitize_highpage(struct page *page)
++{
++ void *kaddr;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kaddr = kmap_atomic(page, KM_CLEARPAGE);
++ clear_page(kaddr);
++ kunmap_atomic(kaddr, KM_CLEARPAGE);
++ local_irq_restore(flags);
++}
++
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
+index a90c09d..15f7933 100644
+--- a/include/linux/hwmon-sysfs.h
++++ b/include/linux/hwmon-sysfs.h
+@@ -23,7 +23,8 @@
+ struct sensor_device_attribute{
+ struct device_attribute dev_attr;
+ int index;
+-};
++} __do_const;
++typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
+ #define to_sensor_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
+
+@@ -39,7 +40,7 @@ struct sensor_device_attribute_2 {
+ struct device_attribute dev_attr;
+ u8 index;
+ u8 nr;
+-};
++} __do_const;
+ #define to_sensor_dev_attr_2(_dev_attr) \
+ container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
+
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 07d103a..04ec65b 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -364,6 +364,7 @@ struct i2c_algorithm {
+ /* To determine what the adapter supports */
+ u32 (*functionality) (struct i2c_adapter *);
+ };
++typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
+
+ /*
+ * i2c_adapter is the structure used to identify a physical i2c bus along
+diff --git a/include/linux/i2o.h b/include/linux/i2o.h
+index a6deef4..c56a7f2 100644
+--- a/include/linux/i2o.h
++++ b/include/linux/i2o.h
+@@ -564,7 +564,7 @@ struct i2o_controller {
+ struct i2o_device *exec; /* Executive */
+ #if BITS_PER_LONG == 64
+ spinlock_t context_list_lock; /* lock for context_list */
+- atomic_t context_list_counter; /* needed for unique contexts */
++ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
+ struct list_head context_list; /* list of context id's
+ and pointers */
+ #endif
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index 732c962..61c3f70 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -203,7 +203,7 @@ struct pppox_proto {
+ int (*ioctl)(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ struct module *owner;
+-};
++} __do_const;
+
+ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
+ extern void unregister_pppox_proto(int proto_num);
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 9146f39..536519a 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -38,9 +38,29 @@
+ * Also note, that this data cannot be "const".
+ */
+
++#define add_init_latent_entropy __latent_entropy
++
++#ifdef CONFIG_HOTPLUG
++#define add_devinit_latent_entropy
++#else
++#define add_devinit_latent_entropy __latent_entropy
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define add_cpuinit_latent_entropy
++#else
++#define add_cpuinit_latent_entropy __latent_entropy
++#endif
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++#define add_meminit_latent_entropy
++#else
++#define add_meminit_latent_entropy __latent_entropy
++#endif
++
+ /* These are for everybody (although not all archs will actually
+ discard it in modules) */
+-#define __init __section(.init.text) __cold notrace
++#define __init __section(.init.text) __cold notrace add_init_latent_entropy
+ #define __initdata __section(.init.data)
+ #define __initconst __section(.init.rodata)
+ #define __exitdata __section(.exit.data)
+@@ -82,7 +102,7 @@
+ #define __exit __section(.exit.text) __exitused __cold notrace
+
+ /* Used for HOTPLUG */
+-#define __devinit __section(.devinit.text) __cold notrace
++#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
+ #define __devinitdata __section(.devinit.data)
+ #define __devinitconst __section(.devinit.rodata)
+ #define __devexit __section(.devexit.text) __exitused __cold notrace
+@@ -90,7 +110,7 @@
+ #define __devexitconst __section(.devexit.rodata)
+
+ /* Used for HOTPLUG_CPU */
+-#define __cpuinit __section(.cpuinit.text) __cold notrace
++#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
+ #define __cpuinitdata __section(.cpuinit.data)
+ #define __cpuinitconst __section(.cpuinit.rodata)
+ #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
+@@ -98,7 +118,7 @@
+ #define __cpuexitconst __section(.cpuexit.rodata)
+
+ /* Used for MEMORY_HOTPLUG */
+-#define __meminit __section(.meminit.text) __cold notrace
++#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
+ #define __meminitdata __section(.meminit.data)
+ #define __meminitconst __section(.meminit.rodata)
+ #define __memexit __section(.memexit.text) __exitused __cold notrace
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index cdde2b3..d782954 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -144,6 +144,12 @@ extern struct task_group root_task_group;
+
+ #define INIT_TASK_COMM "swapper"
+
++#ifdef CONFIG_X86
++#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
++#else
++#define INIT_TASK_THREAD_INFO
++#endif
++
+ /*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -183,6 +189,7 @@ extern struct task_group root_task_group;
+ RCU_INIT_POINTER(.cred, &init_cred), \
+ .comm = INIT_TASK_COMM, \
+ .thread = INIT_THREAD, \
++ INIT_TASK_THREAD_INFO \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index a64b00e..2ef3855f 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -441,7 +441,7 @@ enum
+ /* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+-extern char *softirq_to_name[NR_SOFTIRQS];
++extern const char * const softirq_to_name[NR_SOFTIRQS];
+
+ /* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage. KAO
+@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
+
+ struct softirq_action
+ {
+- void (*action)(struct softirq_action *);
+-};
++ void (*action)(void);
++} __no_const;
+
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
++extern void open_softirq(int nr, void (*action)(void));
+ extern void softirq_init(void);
+ static inline void __raise_softirq_irqoff(unsigned int nr)
+ {
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 9d57a71..8d0f701 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -166,7 +166,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
+-static inline resource_size_t resource_size(const struct resource *res)
++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+ }
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index bff29c5..7437762 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -328,7 +328,7 @@ struct irq_chip {
+ #ifdef CONFIG_IRQ_RELEASE_METHOD
+ void (*release)(unsigned int irq, void *dev_id);
+ #endif
+-};
++} __do_const;
+
+ /*
+ * irq_chip specific flags
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index f1e2527..9a323d1 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -39,7 +39,6 @@ struct module;
+ */
+ struct irq_desc {
+ struct irq_data irq_data;
+- struct timer_rand_state *timer_rand_state;
+ unsigned int __percpu *kstat_irqs;
+ irq_flow_handler_t handle_irq;
+ #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index 265e2c3..cdd6f29 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -295,9 +295,9 @@ extern unsigned long preset_lpj;
+ */
+ extern unsigned int jiffies_to_msecs(const unsigned long j);
+ extern unsigned int jiffies_to_usecs(const unsigned long j);
+-extern unsigned long msecs_to_jiffies(const unsigned int m);
+-extern unsigned long usecs_to_jiffies(const unsigned int u);
+-extern unsigned long timespec_to_jiffies(const struct timespec *value);
++extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
++extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
++extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
+ extern void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value);
+ extern unsigned long timeval_to_jiffies(const struct timeval *value);
+diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
+index 3875719..4663bc3 100644
+--- a/include/linux/kallsyms.h
++++ b/include/linux/kallsyms.h
+@@ -15,7 +15,8 @@
+
+ struct module;
+
+-#ifdef CONFIG_KALLSYMS
++#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ /* Lookup the address for a symbol. Returns 0 if not found. */
+ unsigned long kallsyms_lookup_name(const char *name);
+
+@@ -99,6 +100,20 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
+ /* Stupid that this does nothing, but I didn't create this mess. */
+ #define __print_symbol(fmt, addr)
+ #endif /*CONFIG_KALLSYMS*/
++#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
++ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
++extern unsigned long kallsyms_lookup_name(const char *name);
++extern void __print_symbol(const char *fmt, unsigned long address);
++extern int sprint_backtrace(char *buffer, unsigned long address);
++extern int sprint_symbol(char *buffer, unsigned long address);
++const char *kallsyms_lookup(unsigned long addr,
++ unsigned long *symbolsize,
++ unsigned long *offset,
++ char **modname, char *namebuf);
++extern int kallsyms_lookup_size_offset(unsigned long addr,
++ unsigned long *symbolsize,
++ unsigned long *offset);
++#endif
+
+ /* This macro allows us to keep printk typechecking */
+ static __printf(1, 2)
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index a70783d..bf1dd28 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -34,6 +34,7 @@
+ #define LLONG_MAX ((long long)(~0ULL>>1))
+ #define LLONG_MIN (-LLONG_MAX - 1)
+ #define ULLONG_MAX (~0ULL)
++#define SIZE_MAX (~(size_t)0)
+
+ #define STACK_MAGIC 0xdeadbeef
+
+@@ -696,24 +697,30 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+- * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
++ * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+- * The implementation uses gcc's reluctance to create a negative array, but
+- * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
+- * to inline functions). So as a fallback we use the optimizer; if it can't
+- * prove the condition is false, it will cause a link error on the undefined
+- * "__build_bug_on_failed". This error message can be harder to track down
+- * though, hence the two different methods.
++ * The implementation uses gcc's reluctance to create a negative array, but gcc
++ * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to
++ * inline functions). Luckily, in 4.3 they added the "error" function
++ * attribute just for this type of case. Thus, we use a negative sized array
++ * (should always create an error on gcc versions older than 4.4) and then call
++ * an undefined function with the error attribute (should always create an
++ * error on gcc 4.3 and later). If for some reason, neither creates a
++ * compile-time error, we'll still have a link-time error, which is harder to
++ * track down.
+ */
+ #ifndef __OPTIMIZE__
+ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+ #else
+-extern int __build_bug_on_failed;
+-#define BUILD_BUG_ON(condition) \
+- do { \
+- ((void)sizeof(char[1 - 2*!!(condition)])); \
+- if (condition) __build_bug_on_failed = 1; \
++#define BUILD_BUG_ON(condition) \
++ do { \
++ bool __cond = !!(condition); \
++ extern void __build_bug_on_failed(void) \
++ __compiletime_error("BUILD_BUG_ON failed"); \
++ if (__cond) \
++ __build_bug_on_failed(); \
++ __compiletime_error_fallback(__cond); \
+ } while(0)
+ #endif
+ #endif /* __CHECKER__ */
+diff --git a/include/linux/key-type.h b/include/linux/key-type.h
+index 9efd081..19f989c 100644
+--- a/include/linux/key-type.h
++++ b/include/linux/key-type.h
+@@ -92,7 +92,7 @@ struct key_type {
+
+ /* internal fields */
+ struct list_head link; /* link in types list */
+-};
++} __do_const;
+
+ extern struct key_type key_type_keyring;
+
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index c4d2fc1..5df9c19 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -53,7 +53,7 @@ extern int kgdb_connected;
+ extern int kgdb_io_module_registered;
+
+ extern atomic_t kgdb_setting_breakpoint;
+-extern atomic_t kgdb_cpu_doing_single_step;
++extern atomic_unchecked_t kgdb_cpu_doing_single_step;
+
+ extern struct task_struct *kgdb_usethread;
+ extern struct task_struct *kgdb_contthread;
+@@ -252,7 +252,7 @@ struct kgdb_arch {
+ void (*disable_hw_break)(struct pt_regs *regs);
+ void (*remove_all_hw_break)(void);
+ void (*correct_hw_break)(void);
+-};
++} __do_const;
+
+ /**
+ * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
+@@ -277,7 +277,7 @@ struct kgdb_io {
+ void (*pre_exception) (void);
+ void (*post_exception) (void);
+ int is_console;
+-};
++} __do_const;
+
+ extern struct kgdb_arch arch_kgdb_ops;
+
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index f8d4b27..8560882 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
+ * usually useless though. */
+ extern __printf(2, 3)
+ int __request_module(bool wait, const char *name, ...);
++extern __printf(3, 4)
++int ___request_module(bool wait, char *param_name, const char *name, ...);
+ #define request_module(mod...) __request_module(true, mod)
+ #define request_module_nowait(mod...) __request_module(false, mod)
+ #define try_then_request_module(x, mod...) \
+@@ -60,6 +62,9 @@ struct subprocess_info {
+ struct work_struct work;
+ struct completion *complete;
+ char *path;
++#ifdef CONFIG_GRKERNSEC
++ char *origpath;
++#endif
+ char **argv;
+ char **envp;
+ enum umh_wait wait;
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index 445f978..3a02264 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -111,7 +111,7 @@ struct kobj_type {
+ struct attribute **default_attrs;
+ const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
+ const void *(*namespace)(struct kobject *kobj);
+-};
++} __do_const;
+
+ struct kobj_uevent_env {
+ char *envp[UEVENT_NUM_ENVP];
+@@ -134,6 +134,7 @@ struct kobj_attribute {
+ ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+ };
++typedef struct kobj_attribute __no_const kobj_attribute_no_const;
+
+ extern const struct sysfs_ops kobj_sysfs_ops;
+
+diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
+index f66b065..c2c29b4 100644
+--- a/include/linux/kobject_ns.h
++++ b/include/linux/kobject_ns.h
+@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
+ const void *(*netlink_ns)(struct sock *sk);
+ const void *(*initial_ns)(void);
+ void (*drop_ns)(void *);
+-};
++} __do_const;
+
+ int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+ int kobj_ns_type_registered(enum kobj_ns_type type);
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index e6796c1..350d338 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+ void vcpu_load(struct kvm_vcpu *vcpu);
+ void vcpu_put(struct kvm_vcpu *vcpu);
+
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module);
+ void kvm_exit(void);
+
+@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg);
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+
+-int kvm_arch_init(void *opaque);
++int kvm_arch_init(const void *opaque);
+ void kvm_arch_exit(void);
+
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 62467ca..3d65d7d 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -910,7 +910,7 @@ struct ata_port_operations {
+ * fields must be pointers.
+ */
+ const struct ata_port_operations *inherits;
+-};
++} __do_const;
+
+ struct ata_port_info {
+ unsigned long flags;
+diff --git a/include/linux/list.h b/include/linux/list.h
+index cc6d2aa..c10ee83 100644
+--- a/include/linux/list.h
++++ b/include/linux/list.h
+@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
+ extern void list_del(struct list_head *entry);
+ #endif
+
++extern void __pax_list_add(struct list_head *new,
++ struct list_head *prev,
++ struct list_head *next);
++static inline void pax_list_add(struct list_head *new, struct list_head *head)
++{
++ __pax_list_add(new, head, head->next);
++}
++static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
++{
++ __pax_list_add(new, head->prev, head);
++}
++extern void pax_list_del(struct list_head *entry);
++
+ /**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
+ INIT_LIST_HEAD(entry);
+ }
+
++extern void pax_list_del_init(struct list_head *entry);
++
+ /**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
+index 88e78de..c63979a 100644
+--- a/include/linux/lsm_audit.h
++++ b/include/linux/lsm_audit.h
+@@ -124,6 +124,10 @@ struct common_audit_data {
+ u32 denied;
+ uid_t ouid;
+ } fs;
++ struct {
++ int type, protocol;
++ struct sock *sk;
++ } net;
+ };
+ } apparmor_audit_data;
+ #endif
+diff --git a/include/linux/math64.h b/include/linux/math64.h
+index b8ba855..bfdffd0 100644
+--- a/include/linux/math64.h
++++ b/include/linux/math64.h
+@@ -14,7 +14,7 @@
+ * This is commonly provided by 32bit archs to provide an optimized 64bit
+ * divide.
+ */
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+@@ -32,7 +32,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+ /**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ */
+-static inline u64 div64_u64(u64 dividend, u64 divisor)
++static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
+ {
+ return dividend / divisor;
+ }
+@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
+ #define div64_long(x,y) div_s64((x),(y))
+
+ #ifndef div_u64_rem
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+ *remainder = do_div(dividend, divisor);
+ return dividend;
+@@ -62,7 +62,7 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+ #endif
+
+ #ifndef div64_u64
+-extern u64 div64_u64(u64 dividend, u64 divisor);
++extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
+ #endif
+
+ #ifndef div64_s64
+@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
+ * divide.
+ */
+ #ifndef div_u64
+-static inline u64 div_u64(u64 dividend, u32 divisor)
++static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
+ {
+ u32 remainder;
+ return div_u64_rem(dividend, divisor, &remainder);
+diff --git a/include/linux/mca.h b/include/linux/mca.h
+index 3797270..7765ede 100644
+--- a/include/linux/mca.h
++++ b/include/linux/mca.h
+@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
+ int region);
+ void * (*mca_transform_memory)(struct mca_device *,
+ void *memory);
+-};
++} __no_const;
+
+ struct mca_bus {
+ u64 default_dma_mask;
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 305fd75..cdbfb05 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
+
+ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
++#else
+ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
++#endif
++
+ #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
+ #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+
+@@ -213,8 +220,8 @@ struct vm_operations_struct {
+ /* called by access_process_vm when get_user_pages() fails, typically
+ * for use by special VMAs that can switch between memory and hardware
+ */
+- int (*access)(struct vm_area_struct *vma, unsigned long addr,
+- void *buf, int len, int write);
++ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
++ void *buf, size_t len, int write);
+ #ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+@@ -241,6 +248,7 @@ struct vm_operations_struct {
+ const nodemask_t *to, unsigned long flags);
+ #endif
+ };
++typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
+
+ struct mmu_gather;
+ struct inode;
+@@ -941,8 +949,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long *prot, resource_size_t *phys);
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+- void *buf, int len, int write);
++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++ void *buf, size_t len, int write);
+
+ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+@@ -984,10 +992,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+ }
+ #endif
+
+-extern int make_pages_present(unsigned long addr, unsigned long end);
+-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+- void *buf, int len, int write);
++extern ssize_t make_pages_present(unsigned long addr, unsigned long end);
++extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
++extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++ void *buf, size_t len, int write);
+
+ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, unsigned int foll_flags,
+@@ -1013,34 +1021,6 @@ int set_page_dirty(struct page *page);
+ int set_page_dirty_lock(struct page *page);
+ int clear_page_dirty_for_io(struct page *page);
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page_start(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_growsdown(vma->vm_prev, addr);
+-}
+-
+-/* Is the vma a continuation of the stack vma below it? */
+-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+-}
+-
+-static inline int stack_guard_page_end(struct vm_area_struct *vma,
+- unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSUP) &&
+- (vma->vm_end == addr) &&
+- !vma_growsup(vma->vm_next, addr);
+-}
+-
+ extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len);
+@@ -1135,6 +1115,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
+ }
+ #endif
+
++#ifdef CONFIG_MMU
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
++#else
++static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
++{
++ return __pgprot(0);
++}
++#endif
++
+ int vma_wants_writenotify(struct vm_area_struct *vma);
+
+ extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+@@ -1153,8 +1142,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+ {
+ return 0;
+ }
++
++static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
++ unsigned long address)
++{
++ return 0;
++}
+ #else
+ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+ #endif
+
+ #ifdef __PAGETABLE_PMD_FOLDED
+@@ -1163,8 +1159,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ {
+ return 0;
+ }
++
++static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
++ unsigned long address)
++{
++ return 0;
++}
+ #else
+ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
+ #endif
+
+ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1182,11 +1185,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+ NULL: pud_offset(pgd, address);
+ }
+
++static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
++ NULL: pud_offset(pgd, address);
++}
++
+ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ {
+ return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+ NULL: pmd_offset(pud, address);
+ }
++
++static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
++ NULL: pmd_offset(pud, address);
++}
+ #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+
+ #if USE_SPLIT_PTLOCKS
+@@ -1397,7 +1412,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long flags, struct page **pages);
+
+-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
++extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
+
+ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+@@ -1420,6 +1435,7 @@ out:
+ }
+
+ extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
+
+ extern unsigned long do_brk(unsigned long, unsigned long);
+
+@@ -1477,6 +1493,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
++
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+@@ -1493,15 +1513,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ }
+
+-#ifdef CONFIG_MMU
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+-#else
+-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+-{
+- return __pgprot(0);
+-}
+-#endif
+-
+ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+@@ -1537,6 +1548,12 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+ static inline void vm_stat_account(struct mm_struct *mm,
+ unsigned long flags, struct file *file, long pages)
+ {
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
++ mm->total_vm += pages;
+ }
+ #endif /* CONFIG_PROC_FS */
+
+@@ -1617,7 +1634,7 @@ extern int unpoison_memory(unsigned long pfn);
+ extern int sysctl_memory_failure_early_kill;
+ extern int sysctl_memory_failure_recovery;
+ extern void shake_page(struct page *p, int access);
+-extern atomic_long_t mce_bad_pages;
++extern atomic_long_unchecked_t mce_bad_pages;
+ extern int soft_offline_page(struct page *page, int flags);
+
+ extern void dump_page(struct page *page);
+@@ -1631,5 +1648,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
+ unsigned int pages_per_huge_page);
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index de3a321..8761f4a 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -253,6 +253,8 @@ struct vm_area_struct {
+ #ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+ #endif
++
++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+ };
+
+ struct core_thread {
+@@ -390,6 +392,24 @@ struct mm_struct {
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+ struct cpumask cpumask_allocation;
+ #endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ unsigned long pax_flags;
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
+index c5d5278..f0b68c8 100644
+--- a/include/linux/mmiotrace.h
++++ b/include/linux/mmiotrace.h
+@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+ /* Called from ioremap.c */
+ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+ void __iomem *addr);
+-extern void mmiotrace_iounmap(volatile void __iomem *addr);
++extern void mmiotrace_iounmap(const volatile void __iomem *addr);
+
+ /* For anyone to insert markers. Remember trailing newline. */
+ extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
+@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
+ {
+ }
+
+-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
++static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+ }
+
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index ee2baf0..e24a58c 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -256,12 +256,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+ */
+ #define ptep_clear_flush_notify(__vma, __address, __ptep) \
+ ({ \
+- pte_t __pte; \
++ pte_t ___pte; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
+ mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
+- __pte; \
++ ___pte; \
+ })
+
+ #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 25842b6..6e42df8 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -371,7 +371,7 @@ struct zone {
+ unsigned long flags; /* zone flags, see below */
+
+ /* Zone statistics */
+- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 468819c..c02b63b 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -12,7 +12,7 @@
+ typedef unsigned long kernel_ulong_t;
+ #endif
+
+-#define PCI_ANY_ID (~0)
++#define PCI_ANY_ID ((__u16)~0)
+
+ struct pci_device_id {
+ __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
+@@ -131,7 +131,7 @@ struct usb_device_id {
+ #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+ #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+
+-#define HID_ANY_ID (~0)
++#define HID_ANY_ID (~0U)
+
+ struct hid_device_id {
+ __u16 bus;
+@@ -479,7 +479,7 @@ struct dmi_system_id {
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
+-};
++} __do_const;
+ /*
+ * struct dmi_device_id appears during expansion of
+ * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 3cb7839..56d41ff 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -17,9 +17,11 @@
+ #include <linux/moduleparam.h>
+ #include <linux/tracepoint.h>
+ #include <linux/export.h>
++#include <linux/fs.h>
+
+ #include <linux/percpu.h>
+ #include <asm/module.h>
++#include <asm/pgtable.h>
+
+ #include <trace/events/module.h>
+
+@@ -53,12 +55,13 @@ struct module_attribute {
+ int (*test)(struct module *);
+ void (*free)(struct module *);
+ };
++typedef struct module_attribute __no_const module_attribute_no_const;
+
+ struct module_version_attribute {
+ struct module_attribute mattr;
+ const char *module_name;
+ const char *version;
+-} __attribute__ ((__aligned__(sizeof(void *))));
++} __do_const __attribute__ ((__aligned__(sizeof(void *))));
+
+ extern ssize_t __modver_version_show(struct module_attribute *,
+ struct module_kobject *, char *);
+@@ -217,7 +220,7 @@ struct module
+
+ /* Sysfs stuff. */
+ struct module_kobject mkobj;
+- struct module_attribute *modinfo_attrs;
++ module_attribute_no_const *modinfo_attrs;
+ const char *version;
+ const char *srcversion;
+ struct kobject *holders_dir;
+@@ -261,19 +264,16 @@ struct module
+ int (*init)(void);
+
+ /* If this is non-NULL, vfree after init() returns */
+- void *module_init;
++ void *module_init_rx, *module_init_rw;
+
+ /* Here is the actual code + data, vfree'd on unload. */
+- void *module_core;
++ void *module_core_rx, *module_core_rw;
+
+ /* Here are the sizes of the init and core sections */
+- unsigned int init_size, core_size;
++ unsigned int init_size_rw, core_size_rw;
+
+ /* The size of the executable code in each section. */
+- unsigned int init_text_size, core_text_size;
+-
+- /* Size of RO sections of the module (text+rodata) */
+- unsigned int init_ro_size, core_ro_size;
++ unsigned int init_size_rx, core_size_rx;
+
+ /* Arch-specific module values */
+ struct mod_arch_specific arch;
+@@ -329,6 +329,10 @@ struct module
+ #ifdef CONFIG_EVENT_TRACING
+ struct ftrace_event_call **trace_events;
+ unsigned int num_trace_events;
++ struct file_operations trace_id;
++ struct file_operations trace_enable;
++ struct file_operations trace_format;
++ struct file_operations trace_filter;
+ #endif
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ unsigned int num_ftrace_callsites;
+@@ -379,16 +383,46 @@ bool is_module_address(unsigned long addr);
+ bool is_module_percpu_address(unsigned long addr);
+ bool is_module_text_address(unsigned long addr);
+
++static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (ktla_ktva(addr) >= (unsigned long)start &&
++ ktla_ktva(addr) < (unsigned long)start + size)
++ return 1;
++#endif
++
++ return ((void *)addr >= start && (void *)addr < start + size);
++}
++
++static inline int within_module_core_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
++}
++
++static inline int within_module_core_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
++}
++
++static inline int within_module_init_rx(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
++}
++
++static inline int within_module_init_rw(unsigned long addr, struct module *mod)
++{
++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
++}
++
+ static inline int within_module_core(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_core <= addr &&
+- addr < (unsigned long)mod->module_core + mod->core_size;
++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
+ }
+
+ static inline int within_module_init(unsigned long addr, struct module *mod)
+ {
+- return (unsigned long)mod->module_init <= addr &&
+- addr < (unsigned long)mod->module_init + mod->init_size;
++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
+ }
+
+ /* Search for module by name: must hold module_mutex. */
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index b2be02e..6a9fdb1 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
+ sections. Returns NULL on failure. */
+ void *module_alloc(unsigned long size);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void *module_alloc_exec(unsigned long size);
++#else
++#define module_alloc_exec(x) module_alloc(x)
++#endif
++
+ /* Free memory returned from module_alloc. */
+ void module_free(struct module *mod, void *module_region);
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region);
++#else
++#define module_free_exec(x, y) module_free((x), (y))
++#endif
++
+ /* Apply the given relocation to the (simplified) ELF. Return -error
+ or 0. */
+ int apply_relocate(Elf_Shdr *sechdrs,
+diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
+index 7939f63..ec6df57 100644
+--- a/include/linux/moduleparam.h
++++ b/include/linux/moduleparam.h
+@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
+ * @len is usually just sizeof(string).
+ */
+ #define module_param_string(name, string, len, perm) \
+- static const struct kparam_string __param_string_##name \
++ static const struct kparam_string __param_string_##name __used \
+ = { len, string }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_ops_string, \
+@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
+ * module_param_named() for why this might be necessary.
+ */
+ #define module_param_array_named(name, array, type, nump, perm) \
+- static const struct kparam_array __param_arr_##name \
++ static const struct kparam_array __param_arr_##name __used \
+ = { .max = ARRAY_SIZE(array), .num = nump, \
+ .ops = &param_ops_##type, \
+ .elemsize = sizeof(array[0]), .elem = array }; \
+diff --git a/include/linux/namei.h b/include/linux/namei.h
+index ffc0213..2c1f2cb 100644
+--- a/include/linux/namei.h
++++ b/include/linux/namei.h
+@@ -24,7 +24,7 @@ struct nameidata {
+ unsigned seq;
+ int last_type;
+ unsigned depth;
+- char *saved_names[MAX_NESTED_LINKS + 1];
++ const char *saved_names[MAX_NESTED_LINKS + 1];
+
+ /* Intent data */
+ union {
+@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
+ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+ extern void unlock_rename(struct dentry *, struct dentry *);
+
+-static inline void nd_set_link(struct nameidata *nd, char *path)
++static inline void nd_set_link(struct nameidata *nd, const char *path)
+ {
+ nd->saved_names[nd->depth] = path;
+ }
+
+-static inline char *nd_get_link(struct nameidata *nd)
++static inline const char *nd_get_link(const struct nameidata *nd)
+ {
+ return nd->saved_names[nd->depth];
+ }
+diff --git a/include/linux/net.h b/include/linux/net.h
+index bd4f6c7..e9b8bb8 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -224,7 +224,7 @@ struct net_proto_family {
+ int (*create)(struct net *net, struct socket *sock,
+ int protocol, int kern);
+ struct module *owner;
+-};
++} __do_const;
+
+ struct iovec;
+ struct kvec;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8c43fd1..782342e 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -949,6 +949,7 @@ struct net_device_ops {
+ int (*ndo_set_features)(struct net_device *dev,
+ u32 features);
+ };
++typedef struct net_device_ops __no_const net_device_ops_no_const;
+
+ /*
+ * The DEVICE structure.
+@@ -1088,7 +1089,7 @@ struct net_device {
+ int iflink;
+
+ struct net_device_stats stats;
+- atomic_long_t rx_dropped; /* dropped packets by core network
++ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
+ * Do not use this in drivers.
+ */
+
+@@ -2585,7 +2586,7 @@ static inline int netif_is_bond_slave(struct net_device *dev)
+ return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
+ }
+
+-extern struct pernet_operations __net_initdata loopback_net_ops;
++extern struct pernet_operations __net_initconst loopback_net_ops;
+
+ static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
+ {
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index 857f502..350a113 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -141,7 +141,7 @@ struct nf_sockopt_ops {
+ #endif
+ /* Use the module struct to lock set/get code in place */
+ struct module *owner;
+-};
++} __do_const;
+
+ /* Function to register/unregister hook points. */
+ int nf_register_hook(struct nf_hook_ops *reg);
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
+index 3540c6e..83adb6c 100644
+--- a/include/linux/netfilter/ipset/ip_set.h
++++ b/include/linux/netfilter/ipset/ip_set.h
+@@ -274,7 +274,7 @@ struct ip_set_type_variant {
+ /* Return true if "b" set is the same as "a"
+ * according to the create set parameters */
+ bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+-};
++} __do_const;
+
+ /* The core set type structure */
+ struct ip_set_type {
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index 74d3386..e800dbf 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -65,7 +65,7 @@ struct nfnl_callback {
+ const struct nlattr * const cda[]);
+ const struct nla_policy *policy; /* netlink attribute policy */
+ const u_int16_t attr_count; /* number of nlattr's */
+-};
++} __do_const;
+
+ struct nfnetlink_subsystem {
+ const char *name;
+diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
+new file mode 100644
+index 0000000..33f4af8
+--- /dev/null
++++ b/include/linux/netfilter/xt_gradm.h
+@@ -0,0 +1,9 @@
++#ifndef _LINUX_NETFILTER_XT_GRADM_H
++#define _LINUX_NETFILTER_XT_GRADM_H 1
++
++struct xt_gradm_mtinfo {
++ __u16 flags;
++ __u16 invflags;
++};
++
++#endif
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index 5dc635f..35f5e11 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -31,7 +31,7 @@ struct nls_table {
+ const unsigned char *charset2upper;
+ struct module *owner;
+ struct nls_table *next;
+-};
++} __do_const;
+
+ /* this value hold the maximum octet of charset */
+ #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index d65746e..62e72c2 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -51,7 +51,8 @@ struct notifier_block {
+ int (*notifier_call)(struct notifier_block *, unsigned long, void *);
+ struct notifier_block __rcu *next;
+ int priority;
+-};
++} __do_const;
++typedef struct notifier_block __no_const notifier_block_no_const;
+
+ struct atomic_notifier_head {
+ spinlock_t lock;
+diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
+index a4c5624..79d6d88 100644
+--- a/include/linux/oprofile.h
++++ b/include/linux/oprofile.h
+@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
+ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+ char const * name, ulong * val);
+
+-/** Create a file for read-only access to an atomic_t. */
++/** Create a file for read-only access to an atomic_unchecked_t. */
+ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+- char const * name, atomic_t * val);
++ char const * name, atomic_unchecked_t * val);
+
+ /** create a directory */
+ struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 4633b2f..988bc08 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -129,7 +129,7 @@ struct parallel_data {
+ struct padata_instance *pinst;
+ struct padata_parallel_queue __percpu *pqueue;
+ struct padata_serial_queue __percpu *squeue;
+- atomic_t seq_nr;
++ atomic_unchecked_t seq_nr;
+ atomic_t reorder_objects;
+ atomic_t refcnt;
+ unsigned int max_seq_nr;
+diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
+index 45fc162..01a4068 100644
+--- a/include/linux/pci_hotplug.h
++++ b/include/linux/pci_hotplug.h
+@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
+ int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
+ int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
+ int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
+-};
++} __do_const;
++typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
+
+ /**
+ * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 8d5b91e..9209ea4 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -750,8 +750,8 @@ struct perf_event {
+
+ enum perf_event_active_state state;
+ unsigned int attach_state;
+- local64_t count;
+- atomic64_t child_count;
++ local64_t count; /* PaX: fix it one day */
++ atomic64_unchecked_t child_count;
+
+ /*
+ * These are the total time in nanoseconds that the event
+@@ -802,8 +802,8 @@ struct perf_event {
+ * These accumulate total time (in nanoseconds) that children
+ * events have been enabled and running, respectively.
+ */
+- atomic64_t child_total_time_enabled;
+- atomic64_t child_total_time_running;
++ atomic64_unchecked_t child_total_time_enabled;
++ atomic64_unchecked_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+@@ -1102,7 +1102,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
+ entry->ip[entry->nr++] = ip;
+ }
+
+-extern int sysctl_perf_event_paranoid;
++extern int sysctl_perf_event_legitimately_concerned;
+ extern int sysctl_perf_event_mlock;
+ extern int sysctl_perf_event_sample_rate;
+
+@@ -1110,19 +1110,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
++static inline bool perf_paranoid_any(void)
++{
++ return sysctl_perf_event_legitimately_concerned > 2;
++}
++
+ static inline bool perf_paranoid_tracepoint_raw(void)
+ {
+- return sysctl_perf_event_paranoid > -1;
++ return sysctl_perf_event_legitimately_concerned > -1;
+ }
+
+ static inline bool perf_paranoid_cpu(void)
+ {
+- return sysctl_perf_event_paranoid > 0;
++ return sysctl_perf_event_legitimately_concerned > 0;
+ }
+
+ static inline bool perf_paranoid_kernel(void)
+ {
+- return sysctl_perf_event_paranoid > 1;
++ return sysctl_perf_event_legitimately_concerned > 1;
+ }
+
+ extern void perf_event_init(void);
+@@ -1200,7 +1205,7 @@ static inline void perf_restore_debug_store(void) { }
+ */
+ #define perf_cpu_notifier(fn) \
+ do { \
+- static struct notifier_block fn##_nb __cpuinitdata = \
++ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
+ fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
+ (void *)(unsigned long)smp_processor_id()); \
+diff --git a/include/linux/personality.h b/include/linux/personality.h
+index 8fc7dd1a..c19d89e 100644
+--- a/include/linux/personality.h
++++ b/include/linux/personality.h
+@@ -44,6 +44,7 @@ enum {
+ #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
+ ADDR_NO_RANDOMIZE | \
+ ADDR_COMPAT_LAYOUT | \
++ ADDR_LIMIT_3GB | \
+ MMAP_PAGE_ZERO)
+
+ /*
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 0072a53..c5dcca5 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -47,9 +47,9 @@ struct pipe_buffer {
+ struct pipe_inode_info {
+ wait_queue_head_t wait;
+ unsigned int nrbufs, curbuf, buffers;
+- unsigned int readers;
+- unsigned int writers;
+- unsigned int waiting_writers;
++ atomic_t readers;
++ atomic_t writers;
++ atomic_t waiting_writers;
+ unsigned int r_counter;
+ unsigned int w_counter;
+ struct page *tmp_page;
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index d3085e7..fd01052 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
+
+ static inline void pm_runtime_mark_last_busy(struct device *dev)
+ {
+- ACCESS_ONCE(dev->power.last_busy) = jiffies;
++ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
+ }
+
+ #else /* !CONFIG_PM_RUNTIME */
+diff --git a/include/linux/pnp.h b/include/linux/pnp.h
+index 195aafc..49a7bc2 100644
+--- a/include/linux/pnp.h
++++ b/include/linux/pnp.h
+@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
+ struct pnp_fixup {
+ char id[7];
+ void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
+-};
++} __do_const;
+
+ /* config parameters */
+ #define PNP_CONFIG_NORMAL 0x0001
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 79159de..f1233a9 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -19,8 +19,8 @@
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
++#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
++#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
+
+ /********** include/linux/timer.h **********/
+ /*
+diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
+index b8d4ddd..bb59d8b 100644
+--- a/include/linux/ppp-comp.h
++++ b/include/linux/ppp-comp.h
+@@ -111,7 +111,7 @@ struct compressor {
+ struct module *owner;
+ /* Extra skb space needed by the compressor algorithm */
+ unsigned int comp_extra;
+-};
++} __do_const;
+
+ /*
+ * The return value from decompress routine is the length of the
+diff --git a/include/linux/prctl.h b/include/linux/prctl.h
+index a3baeb2..b527252 100644
+--- a/include/linux/prctl.h
++++ b/include/linux/prctl.h
+@@ -102,4 +102,19 @@
+
+ #define PR_MCE_KILL_GET 34
+
++/*
++ * If no_new_privs is set, then operations that grant new privileges (i.e.
++ * execve) will either fail or not grant them. This affects suid/sgid,
++ * file capabilities, and LSMs.
++ *
++ * Operations that merely manipulate or drop existing privileges (setresuid,
++ * capset, etc.) will still work. Drop those privileges if you want them gone.
++ *
++ * Changing LSM security domain is considered a new privilege. So, for example,
++ * asking selinux for a specific new context (e.g. with runcon) will result
++ * in execve returning -EPERM.
++ */
++#define PR_SET_NO_NEW_PRIVS 38
++#define PR_GET_NO_NEW_PRIVS 39
++
+ #endif /* _LINUX_PRCTL_H */
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index f0e22f7..82dd544 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
+ extern int printk_needs_cpu(int cpu);
+ extern void printk_tick(void);
+
++extern int kptr_restrict;
++
+ #ifdef CONFIG_PRINTK
+ asmlinkage __printf(1, 0)
+ int vprintk(const char *fmt, va_list args);
+@@ -112,7 +114,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+
+ extern int printk_delay_msec;
+ extern int dmesg_restrict;
+-extern int kptr_restrict;
+
+ void log_buf_kexec_setup(void);
+ void __init setup_log_buf(int early);
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index 643b96c..9544c71 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+ return proc_create_data(name, mode, parent, proc_fops, NULL);
+ }
+
++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
++ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
++{
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
++#else
++ return proc_create_data(name, mode, parent, proc_fops, NULL);
++#endif
++}
++
++
+ static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
+ mode_t mode, struct proc_dir_entry *base,
+ read_proc_t *read_proc, void * data)
+@@ -247,7 +260,7 @@ struct proc_ns_operations {
+ void *(*get)(struct task_struct *task);
+ void (*put)(void *ns);
+ int (*install)(struct nsproxy *nsproxy, void *ns);
+-};
++} __do_const;
+ extern const struct proc_ns_operations netns_operations;
+ extern const struct proc_ns_operations utsns_operations;
+ extern const struct proc_ns_operations ipcns_operations;
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 800f113..13b3715 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -62,8 +62,9 @@
+ #define PTRACE_O_TRACEEXEC 0x00000010
+ #define PTRACE_O_TRACEVFORKDONE 0x00000020
+ #define PTRACE_O_TRACEEXIT 0x00000040
++#define PTRACE_O_TRACESECCOMP 0x00000080
+
+-#define PTRACE_O_MASK 0x0000007f
++#define PTRACE_O_MASK 0x000000ff
+
+ /* Wait extended result codes for the above trace options. */
+ #define PTRACE_EVENT_FORK 1
+@@ -73,6 +74,7 @@
+ #define PTRACE_EVENT_VFORK_DONE 5
+ #define PTRACE_EVENT_EXIT 6
+ #define PTRACE_EVENT_STOP 7
++#define PTRACE_EVENT_SECCOMP 8
+
+ #include <asm/ptrace.h>
+
+@@ -101,8 +103,9 @@
+ #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
+ #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
+ #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
++#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
+
+-#define PT_TRACE_MASK 0x000003f4
++#define PT_TRACE_MASK 0x00000bf4
+
+ /* single stepping state bits (used on ARM and PA-RISC) */
+ #define PT_SINGLESTEP_BIT 31
+@@ -129,10 +132,12 @@ extern void __ptrace_unlink(struct task_struct *child);
+ extern void exit_ptrace(struct task_struct *tracer);
+ #define PTRACE_MODE_READ 1
+ #define PTRACE_MODE_ATTACH 2
+-/* Returns 0 on success, -errno on denial. */
+-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
+ /* Returns true on success, false on denial. */
+ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
++/* Returns true on success, false on denial. */
++extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
++/* Returns true on success, false on denial. */
++extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
+
+ static inline int ptrace_reparented(struct task_struct *child)
+ {
+@@ -197,9 +202,10 @@ static inline void ptrace_event(int event, unsigned long message)
+ if (unlikely(ptrace_event_enabled(current, event))) {
+ current->ptrace_message = message;
+ ptrace_notify((event << 8) | SIGTRAP);
+- } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
++ } else if (event == PTRACE_EVENT_EXEC) {
+ /* legacy EXEC report via SIGTRAP */
+- send_sig(SIGTRAP, current, 0);
++ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
++ send_sig(SIGTRAP, current, 0);
+ }
+ }
+
+diff --git a/include/linux/random.h b/include/linux/random.h
+index f5e1311..d51eec7 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -41,19 +41,27 @@ struct rand_pool_info {
+ };
+
+ struct rnd_state {
+- __u32 s1, s2, s3;
++ __u32 s1, s2, s3, s4;
+ };
+
+ /* Exported functions */
+
+ #ifdef __KERNEL__
+
+-extern void rand_initialize_irq(int irq);
+-
+ extern void add_device_randomness(const void *, unsigned int);
++
++static inline void add_latent_entropy(void)
++{
++
++#ifdef LATENT_ENTROPY_PLUGIN
++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++#endif
++
++}
++
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+- unsigned int value);
+-extern void add_interrupt_randomness(int irq, int irq_flags);
++ unsigned int value) __latent_entropy;
++extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+@@ -67,10 +75,25 @@ extern const struct file_operations random_fops, urandom_fops;
+ unsigned int get_random_int(void);
+ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+
+-u32 random32(void);
+-void srandom32(u32 seed);
++u32 prandom_u32(void) __intentional_overflow(-1);
++void prandom_bytes(void *buf, int nbytes);
++void prandom_seed(u32 seed);
++void prandom_reseed_late(void);
+
+-u32 prandom32(struct rnd_state *);
++/*
++ * These macros are preserved for backward compatibility and should be
++ * removed as soon as a transition is finished.
++ */
++#define random32() prandom_u32()
++#define srandom32(seed) prandom_seed(seed)
++
++u32 prandom_u32_state(struct rnd_state *state) __intentional_overflow(-1);
++void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
++
++static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
++{
++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
++}
+
+ /*
+ * Handle minimum values for seeds
+@@ -81,17 +104,18 @@ static inline u32 __seed(u32 x, u32 m)
+ }
+
+ /**
+- * prandom32_seed - set seed for prandom32().
++ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+-static inline void prandom32_seed(struct rnd_state *state, u64 seed)
++static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+ {
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+- state->s1 = __seed(i, 2);
+- state->s2 = __seed(i, 8);
+- state->s3 = __seed(i, 16);
++ state->s1 = __seed(i, 2U);
++ state->s2 = __seed(i, 8U);
++ state->s3 = __seed(i, 16U);
++ state->s4 = __seed(i, 128U);
+ }
+
+ #ifdef CONFIG_ARCH_RANDOM
+@@ -107,6 +131,11 @@ static inline int arch_get_random_int(unsigned int *v)
+ }
+ #endif
+
++static inline u32 next_pseudo_random32(u32 seed)
++{
++ return seed * 1664525 + 1013904223;
++}
++
+ #endif /* __KERNEL___ */
+
+ #endif /* _LINUX_RANDOM_H */
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index 3863352..4ec4bfb 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -39,6 +39,9 @@ static inline void __list_add_rcu(struct list_head *new,
+ next->prev = new;
+ }
+
++extern void __pax_list_add_rcu(struct list_head *new,
++ struct list_head *prev, struct list_head *next);
++
+ /**
+ * list_add_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+@@ -60,6 +63,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+ __list_add_rcu(new, head, head->next);
+ }
+
++static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
++{
++ __pax_list_add_rcu(new, head, head->next);
++}
++
+ /**
+ * list_add_tail_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+@@ -82,6 +90,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
+ __list_add_rcu(new, head->prev, head);
+ }
+
++static inline void pax_list_add_tail_rcu(struct list_head *new,
++ struct list_head *head)
++{
++ __pax_list_add_rcu(new, head->prev, head);
++}
++
+ /**
+ * list_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+@@ -112,6 +126,8 @@ static inline void list_del_rcu(struct list_head *entry)
+ entry->prev = LIST_POISON2;
+ }
+
++extern void pax_list_del_rcu(struct list_head *entry);
++
+ /**
+ * hlist_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+diff --git a/include/linux/reboot.h b/include/linux/reboot.h
+index e0879a7..a12f962 100644
+--- a/include/linux/reboot.h
++++ b/include/linux/reboot.h
+@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
+ * Architecture-specific implementations of sys_reboot commands.
+ */
+
+-extern void machine_restart(char *cmd);
+-extern void machine_halt(void);
+-extern void machine_power_off(void);
++extern void machine_restart(char *cmd) __noreturn;
++extern void machine_halt(void) __noreturn;
++extern void machine_power_off(void) __noreturn;
+
+ extern void machine_shutdown(void);
+ struct pt_regs;
+@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
+ */
+
+ extern void kernel_restart_prepare(char *cmd);
+-extern void kernel_restart(char *cmd);
+-extern void kernel_halt(void);
+-extern void kernel_power_off(void);
++extern void kernel_restart(char *cmd) __noreturn;
++extern void kernel_halt(void) __noreturn;
++extern void kernel_power_off(void) __noreturn;
+
+ extern int C_A_D; /* for sysctl */
+ void ctrl_alt_del(void);
+@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
+ * Emergency restart, callable from an interrupt handler.
+ */
+
+-extern void emergency_restart(void);
++extern void emergency_restart(void) __noreturn;
+ #include <asm/emergency-restart.h>
+
+ #endif
+diff --git a/include/linux/regset.h b/include/linux/regset.h
+index 686f373..6ade19e 100644
+--- a/include/linux/regset.h
++++ b/include/linux/regset.h
+@@ -160,7 +160,8 @@ struct user_regset {
+ unsigned int align;
+ unsigned int bias;
+ unsigned int core_note_type;
+-};
++} __do_const;
++typedef struct user_regset __no_const user_regset_no_const;
+
+ /**
+ * struct user_regset_view - available regsets
+diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
+index 96d465f..b084e05 100644
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
+ #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
+
+ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
+-#define get_generation(s) atomic_read (&fs_generation(s))
++#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
+ #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
+ #define __fs_changed(gen,s) (gen != get_generation (s))
+ #define fs_changed(gen,s) \
+diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
+index 52c83b6..18ed7eb 100644
+--- a/include/linux/reiserfs_fs_sb.h
++++ b/include/linux/reiserfs_fs_sb.h
+@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
+ /* Comment? -Hans */
+ wait_queue_head_t s_wait;
+ /* To be obsoleted soon by per buffer seals.. -Hans */
+- atomic_t s_generation_counter; // increased by one every time the
++ atomic_unchecked_t s_generation_counter; // increased by one every time the
+ // tree gets re-balanced
+ unsigned long s_properties; /* File system properties. Currently holds
+ on-disk FS format */
+diff --git a/include/linux/relay.h b/include/linux/relay.h
+index 14a86bc..17d0700 100644
+--- a/include/linux/relay.h
++++ b/include/linux/relay.h
+@@ -159,7 +159,7 @@ struct rchan_callbacks
+ * The callback should return 0 if successful, negative if not.
+ */
+ int (*remove_buf_file)(struct dentry *dentry);
+-};
++} __no_const;
+
+ /*
+ * CONFIG_RELAY kernel API, kernel/relay.c
+diff --git a/include/linux/rio.h b/include/linux/rio.h
+index 4d50611..c6858a2 100644
+--- a/include/linux/rio.h
++++ b/include/linux/rio.h
+@@ -315,7 +315,7 @@ struct rio_ops {
+ int mbox, void *buffer, size_t len);
+ int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
+ void *(*get_inb_message)(struct rio_mport *mport, int mbox);
+-};
++} __no_const;
+
+ #define RIO_RESOURCE_MEM 0x00000100
+ #define RIO_RESOURCE_DOORBELL 0x00000200
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 2148b12..519b820 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
+ void anon_vma_init(void); /* create anon_vma_cachep */
+ int anon_vma_prepare(struct vm_area_struct *);
+ void unlink_anon_vmas(struct vm_area_struct *);
+-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
+ void __anon_vma_link(struct vm_area_struct *);
+
+ static inline void anon_vma_merge(struct vm_area_struct *vma,
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 312d047..dbf4637 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -101,6 +101,7 @@ struct bio_list;
+ struct fs_struct;
+ struct perf_event_context;
+ struct blk_plug;
++struct linux_binprm;
+
+ /*
+ * List of flags we want to share for kernel threads,
+@@ -355,7 +356,7 @@ extern char __sched_text_start[], __sched_text_end[];
+ extern int in_sched_functions(unsigned long addr);
+
+ #define MAX_SCHEDULE_TIMEOUT LONG_MAX
+-extern signed long schedule_timeout(signed long timeout);
++extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
+ extern signed long schedule_timeout_interruptible(signed long timeout);
+ extern signed long schedule_timeout_killable(signed long timeout);
+ extern signed long schedule_timeout_uninterruptible(signed long timeout);
+@@ -381,10 +382,23 @@ struct user_namespace;
+ #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+
+ #include <linux/aio.h>
+
+ #ifdef CONFIG_MMU
++
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
++#else
++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++ return 0;
++}
++#endif
++
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+@@ -403,6 +417,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+ extern void set_dumpable(struct mm_struct *mm, int value);
+ extern int get_dumpable(struct mm_struct *mm);
+
++/* get/set_dumpable() values */
++#define SUID_DUMPABLE_DISABLED 0
++#define SUID_DUMPABLE_ENABLED 1
++#define SUID_DUMPABLE_SAFE 2
++
+ #define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+ #define SUID_DUMP_USER 1 /* Dump as user of process */
+ #define SUID_DUMP_ROOT 2 /* Dump as root */
+@@ -634,6 +653,17 @@ struct signal_struct {
+ #ifdef CONFIG_TASKSTATS
+ struct taskstats *stats;
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++ u32 curr_ip;
++ u32 saved_ip;
++ u32 gr_saddr;
++ u32 gr_daddr;
++ u16 gr_sport;
++ u16 gr_dport;
++ u8 used_accept:1;
++#endif
++
+ #ifdef CONFIG_AUDIT
+ unsigned audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+@@ -715,6 +745,14 @@ struct user_struct {
+ struct key *session_keyring; /* UID's default session keyring */
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
++ unsigned char kernel_banned;
++#endif
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ unsigned char suid_banned;
++ unsigned long suid_ban_expires;
++#endif
++
+ /* Hash table maintenance information */
+ struct hlist_node uidhash_node;
+ uid_t uid;
+@@ -1129,7 +1167,7 @@ struct sched_class {
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ void (*task_move_group) (struct task_struct *p, int on_rq);
+ #endif
+-};
++} __do_const;
+
+ struct load_weight {
+ unsigned long weight, inv_weight;
+@@ -1305,6 +1343,8 @@ struct task_struct {
+ * execve */
+ unsigned in_iowait:1;
+
++ /* task may not gain privileges */
++ unsigned no_new_privs:1;
+
+ /* Revert to default priority/policy when forking */
+ unsigned sched_reset_on_fork:1;
+@@ -1345,8 +1385,8 @@ struct task_struct {
+ struct list_head thread_group;
+
+ struct completion *vfork_done; /* for vfork() */
+- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t gtime;
+@@ -1362,13 +1402,6 @@ struct task_struct {
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+-/* process credentials */
+- const struct cred __rcu *real_cred; /* objective and real subjective task
+- * credentials (COW) */
+- const struct cred __rcu *cred; /* effective (overridable) subjective task
+- * credentials (COW) */
+- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
+-
+ char comm[TASK_COMM_LEN]; /* executable name excluding path
+ - access with [gs]et_task_comm (which lock
+ it with task_lock())
+@@ -1385,8 +1418,16 @@ struct task_struct {
+ #endif
+ /* CPU-specific state of this task */
+ struct thread_struct thread;
++/* thread_info moved to task_struct */
++#ifdef CONFIG_X86
++ struct thread_info tinfo;
++#endif
+ /* filesystem information */
+ struct fs_struct *fs;
++
++ const struct cred __rcu *cred; /* effective (overridable) subjective task
++ * credentials (COW) */
++
+ /* open file information */
+ struct files_struct *files;
+ /* namespaces */
+@@ -1409,7 +1450,7 @@ struct task_struct {
+ uid_t loginuid;
+ unsigned int sessionid;
+ #endif
+- seccomp_t seccomp;
++ struct seccomp seccomp;
+
+ /* Thread group tracking */
+ u32 parent_exec_id;
+@@ -1433,6 +1474,11 @@ struct task_struct {
+ struct rt_mutex_waiter *pi_blocked_on;
+ #endif
+
++/* process credentials */
++ const struct cred __rcu *real_cred; /* objective and real subjective task
++ * credentials (COW) */
++ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
++
+ #ifdef CONFIG_DEBUG_MUTEXES
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+@@ -1548,6 +1594,30 @@ struct task_struct {
+ unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
++
++#ifdef CONFIG_GRKERNSEC
++ /* grsecurity */
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ u64 exec_id;
++#endif
++#ifdef CONFIG_GRKERNSEC_SETXID
++ const struct cred *delayed_cred;
++#endif
++ struct dentry *gr_chroot_dentry;
++ struct acl_subject_label *acl;
++ struct acl_subject_label *tmpacl;
++ struct acl_role_label *role;
++ struct file *exec_file;
++ unsigned long brute_expires;
++ u16 acl_role_id;
++ u8 inherited;
++ /* is this the task that authenticated to the special role */
++ u8 acl_sp_role;
++ u8 is_writable;
++ u8 brute;
++ u8 gr_is_chrooted;
++#endif
++
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored address in ret_stack */
+ int curr_ret_stack;
+@@ -1582,6 +1652,52 @@ struct task_struct {
+ #endif
+ };
+
++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++extern int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++/* if tsk != current then task_lock must be held on it */
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline unsigned long pax_get_flags(struct task_struct *tsk)
++{
++ if (likely(tsk->mm))
++ return tsk->mm->pax_flags;
++ else
++ return 0UL;
++}
++
++/* if tsk != current then task_lock must be held on it */
++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
++{
++ if (likely(tsk->mm)) {
++ tsk->mm->pax_flags = flags;
++ return 0;
++ }
++ return -EINVAL;
++}
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_initial_flags(struct linux_binprm *bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
++#endif
++
++struct path;
++extern char *pax_get_path(const struct path *path, char *buf, int buflen);
++extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
++extern void pax_report_refcount_overflow(struct pt_regs *regs);
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
+@@ -2097,7 +2213,9 @@ void yield(void);
+ extern struct exec_domain default_exec_domain;
+
+ union thread_union {
++#ifndef CONFIG_X86
+ struct thread_info thread_info;
++#endif
+ unsigned long stack[THREAD_SIZE/sizeof(long)];
+ };
+
+@@ -2130,6 +2248,7 @@ extern struct pid_namespace init_pid_ns;
+ */
+
+ extern struct task_struct *find_task_by_vpid(pid_t nr);
++extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
+ extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+ struct pid_namespace *ns);
+
+@@ -2251,6 +2370,12 @@ static inline void mmdrop(struct mm_struct * mm)
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+ extern struct mm_struct *get_task_mm(struct task_struct *task);
++/*
++ * Grab a reference to a task's mm, if it is not already going away
++ * and ptrace_may_access with the mode parameter passed to it
++ * succeeds.
++ */
++extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
+ /* Remove the current tasks stale references to the old mm_struct */
+ extern void mm_release(struct task_struct *, struct mm_struct *);
+ /* Allocate a new mm structure and copy contents from tsk->mm */
+@@ -2267,9 +2392,8 @@ extern void __cleanup_sighand(struct sighand_struct *);
+ extern void exit_itimers(struct signal_struct *);
+ extern void flush_itimer_signals(void);
+
+-extern NORET_TYPE void do_group_exit(int);
++extern __noreturn void do_group_exit(int);
+
+-extern void daemonize(const char *, ...);
+ extern int allow_signal(int);
+ extern int disallow_signal(int);
+
+@@ -2432,9 +2556,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+
+ #endif
+
+-static inline int object_is_on_stack(void *obj)
++static inline int object_starts_on_stack(void *obj)
+ {
+- void *stack = task_stack_page(current);
++ const void *stack = task_stack_page(current);
+
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+ }
+diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
+index 899fbb4..1cb4138 100644
+--- a/include/linux/screen_info.h
++++ b/include/linux/screen_info.h
+@@ -43,7 +43,8 @@ struct screen_info {
+ __u16 pages; /* 0x32 */
+ __u16 vesa_attributes; /* 0x34 */
+ __u32 capabilities; /* 0x36 */
+- __u8 _reserved[6]; /* 0x3a */
++ __u16 vesapm_size; /* 0x3a */
++ __u8 _reserved[4]; /* 0x3c */
+ } __attribute__((packed));
+
+ #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
+index cc7a4e9..306733e 100644
+--- a/include/linux/seccomp.h
++++ b/include/linux/seccomp.h
+@@ -1,25 +1,89 @@
+ #ifndef _LINUX_SECCOMP_H
+ #define _LINUX_SECCOMP_H
+
++#include <linux/compiler.h>
++#include <linux/types.h>
+
++
++/* Valid values for seccomp.mode and prctl(PR_SET_SECCOMP, <mode>) */
++#define SECCOMP_MODE_DISABLED 0 /* seccomp is not in use. */
++#define SECCOMP_MODE_STRICT 1 /* uses hard-coded filter. */
++#define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
++
++/*
++ * All BPF programs must return a 32-bit value.
++ * The bottom 16-bits are for optional return data.
++ * The upper 16-bits are ordered from least permissive values to most.
++ *
++ * The ordering ensures that a min_t() over composed return values always
++ * selects the least permissive choice.
++ */
++#define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
++#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
++#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
++#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
++#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
++
++/* Masks for the return value sections. */
++#define SECCOMP_RET_ACTION 0xffff0000U
++#define SECCOMP_RET_DATA 0x0000ffffU
++
++/**
++ * struct seccomp_data - the format the BPF program executes over.
++ * @nr: the system call number
++ * @arch: indicates system call convention as an AUDIT_ARCH_* value
++ * as defined in <linux/audit.h>.
++ * @instruction_pointer: at the time of the system call.
++ * @args: up to 6 system call arguments always stored as 64-bit values
++ * regardless of the architecture.
++ */
++struct seccomp_data {
++ int nr;
++ __u32 arch;
++ __u64 instruction_pointer;
++ __u64 args[6];
++};
++
++#ifdef __KERNEL__
+ #ifdef CONFIG_SECCOMP
+
+ #include <linux/thread_info.h>
+ #include <asm/seccomp.h>
+
+-typedef struct { int mode; } seccomp_t;
++struct seccomp_filter;
++/**
++ * struct seccomp - the state of a seccomp'ed process
++ *
++ * @mode: indicates one of the valid values above for controlled
++ * system calls available to a process.
++ * @filter: The metadata and ruleset for determining what system calls
++ * are allowed for a task.
++ *
++ * @filter must only be accessed from the context of current as there
++ * is no locking.
++ */
++struct seccomp {
++ int mode;
++ struct seccomp_filter *filter;
++};
+
+-extern void __secure_computing(int);
+-static inline void secure_computing(int this_syscall)
++/*
++ * Direct callers to __secure_computing should be updated as
++ * CONFIG_HAVE_ARCH_SECCOMP_FILTER propagates.
++ */
++extern void __secure_computing(int) __deprecated;
++extern int __secure_computing_int(int);
++static inline int secure_computing(int this_syscall)
+ {
+ if (unlikely(test_thread_flag(TIF_SECCOMP)))
+- __secure_computing(this_syscall);
++ return __secure_computing_int(this_syscall);
++ return 0;
+ }
+
+ extern long prctl_get_seccomp(void);
+-extern long prctl_set_seccomp(unsigned long);
++extern long prctl_set_seccomp(unsigned long, char __user *);
+
+-static inline int seccomp_mode(seccomp_t *s)
++static inline int seccomp_mode(struct seccomp *s)
+ {
+ return s->mode;
+ }
+@@ -28,25 +92,40 @@ static inline int seccomp_mode(seccomp_t *s)
+
+ #include <linux/errno.h>
+
+-typedef struct { } seccomp_t;
++struct seccomp { };
++struct seccomp_filter { };
+
+-#define secure_computing(x) do { } while (0)
++#define secure_computing(x) 0
+
+ static inline long prctl_get_seccomp(void)
+ {
+ return -EINVAL;
+ }
+
+-static inline long prctl_set_seccomp(unsigned long arg2)
++static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
+ {
+ return -EINVAL;
+ }
+
+-static inline int seccomp_mode(seccomp_t *s)
++static inline int seccomp_mode(struct seccomp *s)
+ {
+ return 0;
+ }
+-
+ #endif /* CONFIG_SECCOMP */
+
++#ifdef CONFIG_SECCOMP_FILTER
++extern void put_seccomp_filter(struct task_struct *tsk);
++extern void get_seccomp_filter(struct task_struct *tsk);
++extern u32 seccomp_bpf_load(int off);
++#else /* CONFIG_SECCOMP_FILTER */
++static inline void put_seccomp_filter(struct task_struct *tsk)
++{
++ return;
++}
++static inline void get_seccomp_filter(struct task_struct *tsk)
++{
++ return;
++}
++#endif /* CONFIG_SECCOMP_FILTER */
++#endif /* __KERNEL__ */
+ #endif /* _LINUX_SECCOMP_H */
+diff --git a/include/linux/security.h b/include/linux/security.h
+index e8c619d..99d0f1f 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -37,6 +37,7 @@
+ #include <linux/xfrm.h>
+ #include <linux/slab.h>
+ #include <linux/xattr.h>
++#include <linux/grsecurity.h>
+ #include <net/flow.h>
+
+ /* Maximum number of letters for an LSM name string */
+@@ -98,8 +99,6 @@ struct seq_file;
+ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
+ extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+
+-void reset_security_ops(void);
+-
+ #ifdef CONFIG_MMU
+ extern unsigned long mmap_min_addr;
+ extern unsigned long dac_mmap_min_addr;
+@@ -130,6 +129,7 @@ struct request_sock;
+ #define LSM_UNSAFE_SHARE 1
+ #define LSM_UNSAFE_PTRACE 2
+ #define LSM_UNSAFE_PTRACE_CAP 4
++#define LSM_UNSAFE_NO_NEW_PRIVS 8
+
+ #ifdef CONFIG_MMU
+ /*
+@@ -1676,6 +1676,8 @@ int security_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *permitted);
+ int security_capable(struct user_namespace *ns, const struct cred *cred,
+ int cap);
++int security_capable_noaudit(struct user_namespace *ns, const struct cred *cred,
++ int cap);
+ int security_real_capable(struct task_struct *tsk, struct user_namespace *ns,
+ int cap);
+ int security_real_capable_noaudit(struct task_struct *tsk,
+@@ -1880,6 +1882,12 @@ static inline int security_capable(struct user_namespace *ns,
+ return cap_capable(current, cred, ns, cap, SECURITY_CAP_AUDIT);
+ }
+
++static inline int security_capable_noaudit(struct user_namespace *ns,
++ const struct cred *cred, int cap)
++{
++ return cap_capable(current, cred, ns, cap, SECURITY_CAP_NOAUDIT);
++}
++
+ static inline int security_real_capable(struct task_struct *tsk, struct user_namespace *ns, int cap)
+ {
+ int ret;
+diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
+index dc368b8..e895209 100644
+--- a/include/linux/semaphore.h
++++ b/include/linux/semaphore.h
+@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
+ }
+
+ extern void down(struct semaphore *sem);
+-extern int __must_check down_interruptible(struct semaphore *sem);
++extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
+ extern int __must_check down_killable(struct semaphore *sem);
+ extern int __must_check down_trylock(struct semaphore *sem);
+ extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
+diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
+index 0b69a46..b2ffa4c 100644
+--- a/include/linux/seq_file.h
++++ b/include/linux/seq_file.h
+@@ -24,6 +24,9 @@ struct seq_file {
+ struct mutex lock;
+ const struct seq_operations *op;
+ int poll_event;
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ u64 exec_id;
++#endif
+ void *private;
+ };
+
+@@ -33,6 +36,7 @@ struct seq_operations {
+ void * (*next) (struct seq_file *m, void *v, loff_t *pos);
+ int (*show) (struct seq_file *m, void *v);
+ };
++typedef struct seq_operations __no_const seq_operations_no_const;
+
+ #define SEQ_SKIP 1
+
+diff --git a/include/linux/shm.h b/include/linux/shm.h
+index 92808b8..c28cac4 100644
+--- a/include/linux/shm.h
++++ b/include/linux/shm.h
+@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
+
+ /* The task created the shm object. NULL if the task is dead. */
+ struct task_struct *shm_creator;
++#ifdef CONFIG_GRKERNSEC
++ time_t shm_createtime;
++ pid_t shm_lapid;
++#endif
+ };
+
+ /* shm_mode upper byte flags */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 85180bf..cc75886 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -538,7 +538,7 @@ extern void consume_skb(struct sk_buff *skb);
+ extern void __kfree_skb(struct sk_buff *skb);
+ extern struct sk_buff *__alloc_skb(unsigned int size,
+ gfp_t priority, int fclone, int node);
+-static inline struct sk_buff *alloc_skb(unsigned int size,
++static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
+ gfp_t priority)
+ {
+ return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
+@@ -640,7 +640,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
+ */
+ static inline int skb_queue_empty(const struct sk_buff_head *list)
+ {
+- return list->next == (struct sk_buff *)list;
++ return list->next == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -653,7 +653,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
+ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+ {
+- return skb->next == (struct sk_buff *)list;
++ return skb->next == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -666,7 +666,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+ {
+- return skb->prev == (struct sk_buff *)list;
++ return skb->prev == (const struct sk_buff *)list;
+ }
+
+ /**
+@@ -1561,7 +1561,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+ #ifndef NET_SKB_PAD
+-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
++#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
+ #endif
+
+ extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+@@ -2100,7 +2100,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+ int noblock, int *err);
+ extern unsigned int datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
++extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
+ int offset, struct iovec *to,
+ int size);
+ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
+@@ -2380,6 +2380,9 @@ static inline void nf_reset(struct sk_buff *skb)
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+ #endif
++#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
++ skb->nf_trace = 0;
++#endif
+ }
+
+ static inline void nf_reset_trace(struct sk_buff *skb)
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index a595dce..e710d26 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -11,14 +11,29 @@
+
+ #include <linux/gfp.h>
+ #include <linux/types.h>
++#include <linux/err.h>
+
+ /*
+ * Flags to pass to kmem_cache_create().
+ * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
+ */
+ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
++#else
++#define SLAB_USERCOPY 0x00000000UL
++#endif
++
+ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
+ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
++#else
++#define SLAB_NO_SANITIZE 0x00000000UL
++#endif
++
+ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
+ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
+ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
+@@ -87,10 +102,22 @@
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+-#define ZERO_SIZE_PTR ((void *)16)
++#define ZERO_SIZE_PTR \
++({ \
++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
++ (void *)(-MAX_ERRNO-1L); \
++})
+
+-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+- (unsigned long)ZERO_SIZE_PTR)
++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++#ifdef CONFIG_X86_64
++#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
++#else
++#define PAX_MEMORY_SANITIZE_VALUE '\xff'
++#endif
++extern bool pax_sanitize_slab;
++#endif
+
+ /*
+ * struct kmem_cache related prototypes
+@@ -161,6 +188,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
+ void kfree(const void *);
+ void kzfree(const void *);
+ size_t ksize(const void *);
++const char *check_heap_object(const void *ptr, unsigned long n);
++bool is_usercopy_object(const void *ptr);
+
+ /*
+ * Allocator specific definitions. These are mainly used to establish optimized
+diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
+index d00e0ba..e075bd20 100644
+--- a/include/linux/slab_def.h
++++ b/include/linux/slab_def.h
+@@ -68,10 +68,14 @@ struct kmem_cache {
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+- atomic_t allochit;
+- atomic_t allocmiss;
+- atomic_t freehit;
+- atomic_t freemiss;
++ atomic_unchecked_t allochit;
++ atomic_unchecked_t allocmiss;
++ atomic_unchecked_t freehit;
++ atomic_unchecked_t freemiss;
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ atomic_unchecked_t sanitized;
++ atomic_unchecked_t not_sanitized;
++#endif
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+@@ -105,6 +109,11 @@ struct cache_sizes {
+ #ifdef CONFIG_ZONE_DMA
+ struct kmem_cache *cs_dmacachep;
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ struct kmem_cache *cs_usercopycachep;
++#endif
++
+ };
+ extern struct cache_sizes malloc_sizes[];
+
+@@ -152,6 +161,13 @@ found:
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & GFP_USERCOPY)
++ cachep = malloc_sizes[i].cs_usercopycachep;
++ else
++#endif
++
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_trace(size, cachep, flags);
+@@ -181,6 +197,7 @@ kmem_cache_alloc_node_trace(size_t size,
+ }
+ #endif
+
++static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
+ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+ {
+ struct kmem_cache *cachep;
+@@ -205,6 +222,13 @@ found:
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & GFP_USERCOPY)
++ cachep = malloc_sizes[i].cs_usercopycachep;
++ else
++#endif
++
+ cachep = malloc_sizes[i].cs_cachep;
+
+ return kmem_cache_alloc_node_trace(size, cachep, flags, node);
+diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
+index a32bcfd..f0246c3 100644
+--- a/include/linux/slub_def.h
++++ b/include/linux/slub_def.h
+@@ -89,7 +89,7 @@ struct kmem_cache {
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+- int refcount; /* Refcount for slab cache destroy */
++ atomic_t refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *);
+ int inuse; /* Offset to metadata */
+ int align; /* Alignment */
+@@ -150,7 +150,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
+ * Sorry that the following has to be that ugly but some versions of GCC
+ * have trouble with constant propagation and loops.
+ */
+-static __always_inline int kmalloc_index(size_t size)
++static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
+ {
+ if (!size)
+ return 0;
+@@ -215,9 +215,9 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
+ }
+
+ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+-void *__kmalloc(size_t size, gfp_t flags);
++void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
+
+-static __always_inline void *
++static __always_inline __size_overflow(1) void *
+ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+ {
+ void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+diff --git a/include/linux/sonet.h b/include/linux/sonet.h
+index de8832d..0147b46 100644
+--- a/include/linux/sonet.h
++++ b/include/linux/sonet.h
+@@ -61,7 +61,7 @@ struct sonet_stats {
+ #include <linux/atomic.h>
+
+ struct k_sonet_stats {
+-#define __HANDLE_ITEM(i) atomic_t i
++#define __HANDLE_ITEM(i) atomic_unchecked_t i
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ };
+diff --git a/include/linux/stddef.h b/include/linux/stddef.h
+index 6a40c76..1747b67 100644
+--- a/include/linux/stddef.h
++++ b/include/linux/stddef.h
+@@ -3,14 +3,10 @@
+
+ #include <linux/compiler.h>
+
++#ifdef __KERNEL__
++
+ #undef NULL
+-#if defined(__cplusplus)
+-#define NULL 0
+-#else
+ #define NULL ((void *)0)
+-#endif
+-
+-#ifdef __KERNEL__
+
+ enum {
+ false = 0,
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 3d8f9c4..349a695 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -98,7 +98,7 @@ struct rpc_procinfo {
+ unsigned int p_timer; /* Which RTT timer to use */
+ u32 p_statidx; /* Which procedure to account */
+ char * p_name; /* name of procedure */
+-};
++} __do_const;
+
+ #ifdef __KERNEL__
+
+@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+ {
+ switch (sap->sa_family) {
+ case AF_INET:
+- return ntohs(((struct sockaddr_in *)sap)->sin_port);
++ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
++ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+ }
+@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
+ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+ const struct sockaddr *src)
+ {
+- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
++ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
+ struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+ dsin->sin_family = ssin->sin_family;
+@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+ if (sa->sa_family != AF_INET6)
+ return 0;
+
+- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
++ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
+ }
+
+ #endif /* __KERNEL__ */
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 35b37b1..c39eab4 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -408,7 +408,7 @@ struct svc_procedure {
+ unsigned int pc_count; /* call count */
+ unsigned int pc_cachetype; /* cache info (NFS) */
+ unsigned int pc_xdrressize; /* maximum size of XDR reply */
+-};
++} __do_const;
+
+ /*
+ * Function prototypes.
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index c14fe86..d04f36c 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
+ extern unsigned int svcrdma_max_requests;
+ extern unsigned int svcrdma_max_req_size;
+
+-extern atomic_t rdma_stat_recv;
+-extern atomic_t rdma_stat_read;
+-extern atomic_t rdma_stat_write;
+-extern atomic_t rdma_stat_sq_starve;
+-extern atomic_t rdma_stat_rq_starve;
+-extern atomic_t rdma_stat_rq_poll;
+-extern atomic_t rdma_stat_rq_prod;
+-extern atomic_t rdma_stat_sq_poll;
+-extern atomic_t rdma_stat_sq_prod;
++extern atomic_unchecked_t rdma_stat_recv;
++extern atomic_unchecked_t rdma_stat_read;
++extern atomic_unchecked_t rdma_stat_write;
++extern atomic_unchecked_t rdma_stat_sq_starve;
++extern atomic_unchecked_t rdma_stat_rq_starve;
++extern atomic_unchecked_t rdma_stat_rq_poll;
++extern atomic_unchecked_t rdma_stat_rq_prod;
++extern atomic_unchecked_t rdma_stat_sq_poll;
++extern atomic_unchecked_t rdma_stat_sq_prod;
+
+ #define RPCRDMA_VERSION 1
+
+@@ -292,7 +292,7 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
+ if (wr_ary) {
+ rp_ary = (struct rpcrdma_write_array *)
+ &wr_ary->
+- wc_array[wr_ary->wc_nchunks].wc_target.rs_length;
++ wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
+
+ goto found_it;
+ }
+diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
+index 25d333c..a722ca0 100644
+--- a/include/linux/sunrpc/svcauth.h
++++ b/include/linux/sunrpc/svcauth.h
+@@ -100,7 +100,7 @@ struct auth_ops {
+ int (*release)(struct svc_rqst *rq);
+ void (*domain_release)(struct auth_domain *);
+ int (*set_client)(struct svc_rqst *rq);
+-};
++} __do_const;
+
+ #define SVC_GARBAGE 1
+ #define SVC_SYSERR 2
+diff --git a/include/linux/swab.h b/include/linux/swab.h
+index ea0c02f..0eed39d 100644
+--- a/include/linux/swab.h
++++ b/include/linux/swab.h
+@@ -43,7 +43,7 @@
+ * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
+ */
+
+-static inline __attribute_const__ __u16 __fswab16(__u16 val)
++static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
+ {
+ #ifdef __arch_swab16
+ return __arch_swab16(val);
+@@ -52,7 +52,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
+ #endif
+ }
+
+-static inline __attribute_const__ __u32 __fswab32(__u32 val)
++static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
+ {
+ #ifdef __arch_swab32
+ return __arch_swab32(val);
+@@ -61,7 +61,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
+ #endif
+ }
+
+-static inline __attribute_const__ __u64 __fswab64(__u64 val)
++static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
+ {
+ #ifdef __arch_swab64
+ return __arch_swab64(val);
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 86a24b1..e6974f1 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -83,12 +83,20 @@ struct file_handle;
+ #define __SC_DECL5(t5, a5, ...) t5 a5, __SC_DECL4(__VA_ARGS__)
+ #define __SC_DECL6(t6, a6, ...) t6 a6, __SC_DECL5(__VA_ARGS__)
+
+-#define __SC_LONG1(t1, a1) long a1
+-#define __SC_LONG2(t2, a2, ...) long a2, __SC_LONG1(__VA_ARGS__)
+-#define __SC_LONG3(t3, a3, ...) long a3, __SC_LONG2(__VA_ARGS__)
+-#define __SC_LONG4(t4, a4, ...) long a4, __SC_LONG3(__VA_ARGS__)
+-#define __SC_LONG5(t5, a5, ...) long a5, __SC_LONG4(__VA_ARGS__)
+-#define __SC_LONG6(t6, a6, ...) long a6, __SC_LONG5(__VA_ARGS__)
++#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
++#define __SC_TYPE(t, a) __typeof( \
++ __builtin_choose_expr( \
++ sizeof(t) > sizeof(int), \
++ (t) 0, \
++ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
++ )) a
++
++#define __SC_LONG1(t1, a1) __SC_TYPE(t1, a1)
++#define __SC_LONG2(t2, a2, ...) __SC_TYPE(t2, a2), __SC_LONG1(__VA_ARGS__)
++#define __SC_LONG3(t3, a3, ...) __SC_TYPE(t3, a3), __SC_LONG2(__VA_ARGS__)
++#define __SC_LONG4(t4, a4, ...) __SC_TYPE(t4, a4), __SC_LONG3(__VA_ARGS__)
++#define __SC_LONG5(t5, a5, ...) __SC_TYPE(t5, a5), __SC_LONG4(__VA_ARGS__)
++#define __SC_LONG6(t6, a6, ...) __SC_TYPE(t6, a6), __SC_LONG5(__VA_ARGS__)
+
+ #define __SC_CAST1(t1, a1) (t1) a1
+ #define __SC_CAST2(t2, a2, ...) (t2) a2, __SC_CAST1(__VA_ARGS__)
+@@ -392,11 +400,11 @@ asmlinkage long sys_sync(void);
+ asmlinkage long sys_fsync(unsigned int fd);
+ asmlinkage long sys_fdatasync(unsigned int fd);
+ asmlinkage long sys_bdflush(int func, long data);
+-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
+- char __user *type, unsigned long flags,
++asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
++ const char __user *type, unsigned long flags,
+ void __user *data);
+-asmlinkage long sys_umount(char __user *name, int flags);
+-asmlinkage long sys_oldumount(char __user *name);
++asmlinkage long sys_umount(const char __user *name, int flags);
++asmlinkage long sys_oldumount(const char __user *name);
+ asmlinkage long sys_truncate(const char __user *path, long length);
+ asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+ asmlinkage long sys_stat(const char __user *filename,
+@@ -608,7 +616,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
+ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
+ asmlinkage long sys_send(int, void __user *, size_t, unsigned);
+ asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
+- struct sockaddr __user *, int);
++ struct sockaddr __user *, int) __intentional_overflow(0);
+ asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+ asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags);
+diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
+index 27b3b0b..e093dd9 100644
+--- a/include/linux/syscore_ops.h
++++ b/include/linux/syscore_ops.h
+@@ -16,7 +16,7 @@ struct syscore_ops {
+ int (*suspend)(void);
+ void (*resume)(void);
+ void (*shutdown)(void);
+-};
++} __do_const;
+
+ extern void register_syscore_ops(struct syscore_ops *ops);
+ extern void unregister_syscore_ops(struct syscore_ops *ops);
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 703cfa33..dff53c0 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -155,7 +155,11 @@ enum
+ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ };
+
+-
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
++};
++#endif
+
+ /* CTL_VM names: */
+ enum
+@@ -961,13 +965,13 @@ extern void sysctl_head_finish(struct ctl_table_header *prev);
+ extern int sysctl_perm(struct ctl_table_root *root,
+ struct ctl_table *table, int op);
+
+-typedef struct ctl_table ctl_table;
+-
+ typedef int proc_handler (struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+ extern int proc_dostring(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
++extern int proc_dostring_modpriv(struct ctl_table *, int,
++ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_minmax(struct ctl_table *, int,
+@@ -1045,7 +1049,9 @@ struct ctl_table
+ struct ctl_table_poll *poll;
+ void *extra1;
+ void *extra2;
+-};
++} __do_const;
++typedef struct ctl_table __no_const ctl_table_no_const;
++typedef struct ctl_table ctl_table;
+
+ struct ctl_table_root {
+ struct list_head root_list;
+diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
+index 20f63d3..fdd3cbb 100644
+--- a/include/linux/sysdev.h
++++ b/include/linux/sysdev.h
+@@ -98,7 +98,7 @@ struct sysdev_attribute {
+ ssize_t (*store)(struct sys_device *, struct sysdev_attribute *,
+ const char *, size_t);
+ };
+-
++typedef struct sysdev_attribute __no_const sysdev_attribute_no_const;
+
+ #define _SYSDEV_ATTR(_name, _mode, _show, _store) \
+ { \
+diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
+index dac0859..4ea79a9 100644
+--- a/include/linux/sysfs.h
++++ b/include/linux/sysfs.h
+@@ -30,7 +30,8 @@ struct attribute {
+ struct lock_class_key *key;
+ struct lock_class_key skey;
+ #endif
+-};
++} __do_const;
++typedef struct attribute __no_const attribute_no_const;
+
+ /**
+ * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
+@@ -58,8 +59,8 @@ struct attribute_group {
+ mode_t (*is_visible)(struct kobject *,
+ struct attribute *, int);
+ struct attribute **attrs;
+-};
+-
++} __do_const;
++typedef struct attribute_group __no_const attribute_group_no_const;
+
+
+ /**
+@@ -95,7 +96,8 @@ struct bin_attribute {
+ char *, loff_t, size_t);
+ int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
+ struct vm_area_struct *vma);
+-};
++} __do_const;
++typedef struct bin_attribute __no_const bin_attribute_no_const;
+
+ /**
+ * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
+diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
+index 7faf933..9b85a0c 100644
+--- a/include/linux/sysrq.h
++++ b/include/linux/sysrq.h
+@@ -16,6 +16,7 @@
+
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/compiler.h>
+
+ /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
+ #define SYSRQ_DEFAULT_ENABLE 1
+@@ -36,7 +37,7 @@ struct sysrq_key_op {
+ char *help_msg;
+ char *action_msg;
+ int enable_mask;
+-};
++} __do_const;
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index 8d03f07..e723aa8 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -123,6 +123,13 @@ static inline void set_restore_sigmask(void)
+ }
+ #endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
+
++extern void __check_object_size(const void *ptr, unsigned long n, bool to);
++static inline void check_object_size(const void *ptr, unsigned long n, bool to)
++{
++ if (!__builtin_constant_p(n))
++ __check_object_size(ptr, n, to);
++}
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _LINUX_THREAD_INFO_H */
+diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
+index a71a292..51bd91d 100644
+--- a/include/linux/tracehook.h
++++ b/include/linux/tracehook.h
+@@ -54,12 +54,12 @@ struct linux_binprm;
+ /*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+-static inline void ptrace_report_syscall(struct pt_regs *regs)
++static inline int ptrace_report_syscall(struct pt_regs *regs)
+ {
+ int ptrace = current->ptrace;
+
+ if (!(ptrace & PT_PTRACED))
+- return;
++ return 0;
+
+ ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+
+@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
++
++ return fatal_signal_pending(current);
+ }
+
+ /**
+@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
+ static inline __must_check int tracehook_report_syscall_entry(
+ struct pt_regs *regs)
+ {
+- ptrace_report_syscall(regs);
+- return 0;
++ return ptrace_report_syscall(regs);
+ }
+
+ /**
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index ecdaeb9..8d27e20 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -286,7 +286,7 @@ struct tty_operations {
+ void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+ #endif
+ const struct file_operations *proc_fops;
+-};
++} __do_const;
+
+ struct tty_driver {
+ int magic; /* magic number for this structure */
+diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
+index ff7dc08..893e1bd 100644
+--- a/include/linux/tty_ldisc.h
++++ b/include/linux/tty_ldisc.h
+@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
+
+ struct module *owner;
+
+- int refcount;
++ atomic_t refcount;
+ };
+
+ struct tty_ldisc {
+diff --git a/include/linux/types.h b/include/linux/types.h
+index 57a9723..dbe234a 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -213,10 +213,26 @@ typedef struct {
+ int counter;
+ } atomic_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ int counter;
++} atomic_unchecked_t;
++#else
++typedef atomic_t atomic_unchecked_t;
++#endif
++
+ #ifdef CONFIG_64BIT
+ typedef struct {
+ long counter;
+ } atomic64_t;
++
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
+ #endif
+
+ struct list_head {
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index 5ca0951..ab496a5 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
+ long ret; \
+ mm_segment_t old_fs = get_fs(); \
+ \
+- set_fs(KERNEL_DS); \
+ pagefault_disable(); \
+- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
++ set_fs(KERNEL_DS); \
++ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
+ set_fs(old_fs); \
++ pagefault_enable(); \
+ ret; \
+ })
+
+diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
+index 99c1b4d..562e6f3 100644
+--- a/include/linux/unaligned/access_ok.h
++++ b/include/linux/unaligned/access_ok.h
+@@ -4,34 +4,34 @@
+ #include <linux/kernel.h>
+ #include <asm/byteorder.h>
+
+-static inline u16 get_unaligned_le16(const void *p)
++static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
+ {
+- return le16_to_cpup((__le16 *)p);
++ return le16_to_cpup((const __le16 *)p);
+ }
+
+-static inline u32 get_unaligned_le32(const void *p)
++static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
+ {
+- return le32_to_cpup((__le32 *)p);
++ return le32_to_cpup((const __le32 *)p);
+ }
+
+-static inline u64 get_unaligned_le64(const void *p)
++static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
+ {
+- return le64_to_cpup((__le64 *)p);
++ return le64_to_cpup((const __le64 *)p);
+ }
+
+-static inline u16 get_unaligned_be16(const void *p)
++static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
+ {
+- return be16_to_cpup((__be16 *)p);
++ return be16_to_cpup((const __be16 *)p);
+ }
+
+-static inline u32 get_unaligned_be32(const void *p)
++static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
+ {
+- return be32_to_cpup((__be32 *)p);
++ return be32_to_cpup((const __be32 *)p);
+ }
+
+-static inline u64 get_unaligned_be64(const void *p)
++static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
+ {
+- return be64_to_cpup((__be64 *)p);
++ return be64_to_cpup((const __be64 *)p);
+ }
+
+ static inline void put_unaligned_le16(u16 val, void *p)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 93629fc..be16802 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -497,7 +497,7 @@ struct usb_device {
+ struct usb_device *children[USB_MAXCHILDREN];
+
+ u32 quirks;
+- atomic_t urbnum;
++ atomic_unchecked_t urbnum;
+
+ unsigned long active_duration;
+
+@@ -1442,7 +1442,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
+
+ extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+- void *data, __u16 size, int timeout);
++ void *data, __u16 size, int timeout) __intentional_overflow(-1);
+ extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout);
+ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
+index e5a40c3..d75f068 100644
+--- a/include/linux/usb/renesas_usbhs.h
++++ b/include/linux/usb/renesas_usbhs.h
+@@ -39,7 +39,7 @@ enum {
+ */
+ struct renesas_usbhs_driver_callback {
+ int (*notify_hotplug)(struct platform_device *pdev);
+-};
++} __no_const;
+
+ /*
+ * callback functions for platform
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 76f4396..3e0a4a8 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -33,6 +33,7 @@ struct usbnet {
+ wait_queue_head_t *wait;
+ struct mutex phy_mutex;
+ unsigned char suspend_count;
++ unsigned char pkt_cnt, pkt_err;
+
+ /* i/o info: pipes etc */
+ unsigned in, out;
+@@ -69,6 +70,8 @@ struct usbnet {
+ # define EVENT_DEV_WAKING 6
+ # define EVENT_DEV_ASLEEP 7
+ # define EVENT_DEV_OPEN 8
++# define EVENT_NO_RUNTIME_PM 9
++# define EVENT_RX_KILL 10
+ };
+
+ static inline struct usb_driver *driver_of(struct usb_interface *intf)
+diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
+index 6f8fbcf..8259001 100644
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -25,9 +25,35 @@
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
+
++#ifdef CONFIG_PAX_REFCOUNT
++#define MODULE_PAX_REFCOUNT "REFCOUNT "
++#else
++#define MODULE_PAX_REFCOUNT ""
++#endif
++
++#ifdef CONSTIFY_PLUGIN
++#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
++#else
++#define MODULE_CONSTIFY_PLUGIN ""
++#endif
++
++#ifdef STACKLEAK_PLUGIN
++#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
++#else
++#define MODULE_STACKLEAK_PLUGIN ""
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++#define MODULE_GRSEC "GRSEC "
++#else
++#define MODULE_GRSEC ""
++#endif
++
+ #define VERMAGIC_STRING \
+ UTS_RELEASE " " \
+ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
+ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
+- MODULE_ARCH_VERMAGIC
++ MODULE_ARCH_VERMAGIC \
++ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
++ MODULE_GRSEC
+
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index 4bde182..943f335 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
+ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
+ #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
++#endif
++
+ /* bits [20..32] reserved for arch specific ioremap internals */
+
+ /*
+@@ -124,7 +129,7 @@ extern void free_vm_area(struct vm_struct *area);
+
+ /* for /dev/kmem */
+ extern long vread(char *buf, char *addr, unsigned long count);
+-extern long vwrite(char *buf, char *addr, unsigned long count);
++extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
+
+ /*
+ * Internals. Dont't use..
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index 65efb92..a90154f 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
+ /*
+ * Zone based page accounting with per cpu differentials.
+ */
+-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+ static inline void zone_page_state_add(long x, struct zone *zone,
+ enum zone_stat_item item)
+ {
+- atomic_long_add(x, &zone->vm_stat[item]);
+- atomic_long_add(x, &vm_stat[item]);
++ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
++ atomic_long_add_unchecked(x, &vm_stat[item]);
+ }
+
+-static inline unsigned long global_page_state(enum zone_stat_item item)
++static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&vm_stat[item]);
++ long x = atomic_long_read_unchecked(&vm_stat[item]);
+ #ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+@@ -106,10 +106,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
+ return x;
+ }
+
+-static inline unsigned long zone_page_state(struct zone *zone,
++static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
+ enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&zone->vm_stat[item]);
++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+ #ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
+ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+ {
+- long x = atomic_long_read(&zone->vm_stat[item]);
++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
+
+ #ifdef CONFIG_SMP
+ int cpu;
+@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
+
+ static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+- atomic_long_inc(&zone->vm_stat[item]);
+- atomic_long_inc(&vm_stat[item]);
++ atomic_long_inc_unchecked(&zone->vm_stat[item]);
++ atomic_long_inc_unchecked(&vm_stat[item]);
+ }
+
+ static inline void __inc_zone_page_state(struct page *page,
+@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
+
+ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ {
+- atomic_long_dec(&zone->vm_stat[item]);
+- atomic_long_dec(&vm_stat[item]);
++ atomic_long_dec_unchecked(&zone->vm_stat[item]);
++ atomic_long_dec_unchecked(&vm_stat[item]);
+ }
+
+ static inline void __dec_zone_page_state(struct page *page,
+diff --git a/include/linux/xattr.h b/include/linux/xattr.h
+index e5d1220..5a87d07 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -57,6 +57,11 @@
+ #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
++
+ #ifdef __KERNEL__
+
+ #include <linux/types.h>
+@@ -73,7 +78,7 @@ struct xattr_handler {
+ size_t size, int handler_flags);
+ int (*set)(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags);
+-};
++} __do_const;
+
+ struct xattr {
+ char *name;
+@@ -82,6 +87,9 @@ struct xattr {
+ };
+
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ssize_t pax_getxattr(struct dentry *, void *, size_t);
++#endif
+ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+diff --git a/include/linux/yam.h b/include/linux/yam.h
+index 7fe2822..512cdc2 100644
+--- a/include/linux/yam.h
++++ b/include/linux/yam.h
+@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
+
+ struct yamdrv_ioctl_mcs {
+ int cmd;
+- int bitrate;
++ unsigned int bitrate;
+ unsigned char bits[YAM_FPGA_SIZE];
+ };
+diff --git a/include/linux/zlib.h b/include/linux/zlib.h
+index 9c5a6b4..09c9438 100644
+--- a/include/linux/zlib.h
++++ b/include/linux/zlib.h
+@@ -31,6 +31,7 @@
+ #define _ZLIB_H
+
+ #include <linux/zconf.h>
++#include <linux/compiler.h>
+
+ /* zlib deflate based on ZLIB_VERSION "1.1.3" */
+ /* zlib inflate based on ZLIB_VERSION "1.2.3" */
+@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
+
+ /* basic functions */
+
+-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
++extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
+ /*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace with the specified parameters. A pointer to this
+diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
+index c7c40f1..5c31482 100644
+--- a/include/media/v4l2-dev.h
++++ b/include/media/v4l2-dev.h
+@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
+
+
+ struct v4l2_file_operations {
+- struct module *owner;
++ struct module * const owner;
+ ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
+ unsigned int (*poll) (struct file *, struct poll_table_struct *);
+diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
+index d61febf..f0094f6 100644
+--- a/include/media/v4l2-device.h
++++ b/include/media/v4l2-device.h
+@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
+ this function returns 0. If the name ends with a digit (e.g. cx18),
+ then the name will be set to cx18-0 since cx180 looks really odd. */
+ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+- atomic_t *instance);
++ atomic_unchecked_t *instance);
+
+ /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
+ Since the parent disappears this ensures that v4l2_dev doesn't have an
+diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
+index 4d1c74a..8e58054 100644
+--- a/include/media/v4l2-ioctl.h
++++ b/include/media/v4l2-ioctl.h
+@@ -275,7 +275,6 @@ struct v4l2_ioctl_ops {
+ bool valid_prio, int cmd, void *arg);
+ };
+
+-
+ /* v4l debugging and diagnostics */
+
+ /* Debug bitmask flags to be used on V4L2 */
+diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
+index adcbb20..62c2559 100644
+--- a/include/net/9p/transport.h
++++ b/include/net/9p/transport.h
+@@ -57,7 +57,7 @@ struct p9_trans_module {
+ int (*cancel) (struct p9_client *, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *, struct p9_req_t *,
+ char *, char *, int , int, int, int);
+-};
++} __do_const;
+
+ void v9fs_register_trans(struct p9_trans_module *m);
+ void v9fs_unregister_trans(struct p9_trans_module *m);
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index 6cc18f3..b0df15a 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -387,7 +387,7 @@ struct l2cap_ops {
+ int (*recv) (void *data, struct sk_buff *skb);
+ void (*close) (void *data);
+ void (*state_change) (void *data, int state);
+-};
++} __do_const;
+
+ struct l2cap_conn {
+ struct hci_conn *hcon;
+diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
+index 9e5425b..8136ffc 100644
+--- a/include/net/caif/cfctrl.h
++++ b/include/net/caif/cfctrl.h
+@@ -52,7 +52,7 @@ struct cfctrl_rsp {
+ void (*radioset_rsp)(void);
+ void (*reject_rsp)(struct cflayer *layer, u8 linkid,
+ struct cflayer *client_layer);
+-};
++} __no_const;
+
+ /* Link Setup Parameters for CAIF-Links. */
+ struct cfctrl_link_param {
+@@ -101,8 +101,8 @@ struct cfctrl_request_info {
+ struct cfctrl {
+ struct cfsrvl serv;
+ struct cfctrl_rsp res;
+- atomic_t req_seq_no;
+- atomic_t rsp_seq_no;
++ atomic_unchecked_t req_seq_no;
++ atomic_unchecked_t rsp_seq_no;
+ struct list_head list;
+ /* Protects from simultaneous access to first_req list */
+ spinlock_t info_list_lock;
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 2a7eefd..3250f3b 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
+
+ extern void flow_cache_flush(void);
+ extern void flow_cache_flush_deferred(void);
+-extern atomic_t flow_cache_genid;
++extern atomic_unchecked_t flow_cache_genid;
+
+ #endif
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index 82d8d09..d1e04ff 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -116,7 +116,7 @@ struct genl_ops {
+ struct netlink_callback *cb);
+ int (*done)(struct netlink_callback *cb);
+ struct list_head ops_list;
+-};
++} __do_const;
+
+ extern int genl_register_family(struct genl_family *family);
+ extern int genl_register_family_with_ops(struct genl_family *family,
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index ca2755f..85ec88c 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -61,7 +61,7 @@ struct inet_connection_sock_af_ops {
+ void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+ int (*bind_conflict)(const struct sock *sk,
+ const struct inet_bind_bucket *tb);
+-};
++} __do_const;
+
+ /** inet_connection_sock - INET connection oriented sock
+ *
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 34b06da..03b1d34 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -52,8 +52,8 @@ struct inet_peer {
+ */
+ union {
+ struct {
+- atomic_t rid; /* Frag reception counter */
+- atomic_t ip_id_count; /* IP ID for the next packet */
++ atomic_unchecked_t rid; /* Frag reception counter */
++ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ };
+@@ -115,16 +115,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
+ /* can be called with or without local BH being disabled */
+ static inline int inet_getid(struct inet_peer *p, int more)
+ {
+- int old, new;
++ int id;
+ more++;
+ inet_peer_refcheck(p);
+- do {
+- old = atomic_read(&p->ip_id_count);
+- new = old + more;
+- if (!new)
+- new = 1;
+- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
+- return new;
++ id = atomic_add_return_unchecked(more, &p->ip_id_count);
++ if (!id)
++ id = atomic_inc_return_unchecked(&p->ip_id_count);
++ return id;
+ }
+
+ #endif /* _NET_INETPEER_H */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index b935e6c..511250f 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -214,7 +214,7 @@ extern struct local_ports {
+ } sysctl_local_ports;
+ extern void inet_get_local_port_range(int *low, int *high);
+
+-extern unsigned long *sysctl_local_reserved_ports;
++extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
+ static inline int inet_is_reserved_local_port(int port)
+ {
+ return test_bit(port, sysctl_local_reserved_ports);
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 2124004..3713897 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -144,7 +144,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+
+ #define FIB_RES_SADDR(net, res) \
+ ((FIB_RES_NH(res).nh_saddr_genid == \
+- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
++ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
+ FIB_RES_NH(res).nh_saddr : \
+ fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
+ #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 416dcb0..e203877 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -509,7 +509,7 @@ struct ip_vs_conn {
+ struct ip_vs_conn *control; /* Master control connection */
+ atomic_t n_control; /* Number of controlled ones */
+ struct ip_vs_dest *dest; /* real server */
+- atomic_t in_pkts; /* incoming packet counter */
++ atomic_unchecked_t in_pkts; /* incoming packet counter */
+
+ /* packet transmitter for different forwarding methods. If it
+ mangles the packet, it must return NF_DROP or better NF_STOLEN,
+@@ -647,7 +647,7 @@ struct ip_vs_dest {
+ __be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
+ volatile unsigned flags; /* dest status flags */
+- atomic_t conn_flags; /* flags to copy to conn */
++ atomic_unchecked_t conn_flags; /* flags to copy to conn */
+ atomic_t weight; /* server weight */
+
+ atomic_t refcnt; /* reference counter */
+@@ -878,11 +878,11 @@ struct netns_ipvs {
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+- struct ctl_table *lblc_ctl_table;
++ ctl_table_no_const *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+- struct ctl_table *lblcr_ctl_table;
++ ctl_table_no_const *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
+index 59ba38bc..d515662 100644
+--- a/include/net/irda/ircomm_tty.h
++++ b/include/net/irda/ircomm_tty.h
+@@ -35,6 +35,7 @@
+ #include <linux/termios.h>
+ #include <linux/timer.h>
+ #include <linux/tty.h> /* struct tty_struct */
++#include <asm/local.h>
+
+ #include <net/irda/irias_object.h>
+ #include <net/irda/ircomm_core.h>
+@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+- int open_count;
+- int blocked_open; /* # of blocked opens */
++ local_t open_count;
++ local_t blocked_open; /* # of blocked opens */
+
+ /* Protect concurent access to :
+ * o self->open_count
+diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
+index f2419cf..473679f 100644
+--- a/include/net/iucv/af_iucv.h
++++ b/include/net/iucv/af_iucv.h
+@@ -139,7 +139,7 @@ struct iucv_sock {
+ struct iucv_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+- atomic_t autobind_name;
++ atomic_unchecked_t autobind_name;
+ };
+
+ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
+index df83f69..9b640b8 100644
+--- a/include/net/llc_c_ac.h
++++ b/include/net/llc_c_ac.h
+@@ -87,7 +87,7 @@
+ #define LLC_CONN_AC_STOP_SENDACK_TMR 70
+ #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
+
+-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+
+ extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+ extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
+index 23a4093..6d106df 100644
+--- a/include/net/llc_c_ev.h
++++ b/include/net/llc_c_ev.h
+@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
+ return (struct llc_conn_state_ev *)skb->cb;
+ }
+
+-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
++typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+
+ extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+ extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
+index 0e79cfb..f46db31 100644
+--- a/include/net/llc_c_st.h
++++ b/include/net/llc_c_st.h
+@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
+ u8 next_state;
+ llc_conn_ev_qfyr_t *ev_qualifiers;
+ llc_conn_action_t *ev_actions;
+-};
++} __do_const;
+
+ struct llc_conn_state {
+ u8 current_state;
+diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
+index 37a3bbd..55a4241 100644
+--- a/include/net/llc_s_ac.h
++++ b/include/net/llc_s_ac.h
+@@ -23,7 +23,7 @@
+ #define SAP_ACT_TEST_IND 9
+
+ /* All action functions must look like this */
+-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
++typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+
+ extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
+ struct sk_buff *skb);
+diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
+index 567c681..cd73ac0 100644
+--- a/include/net/llc_s_st.h
++++ b/include/net/llc_s_st.h
+@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
+ llc_sap_ev_t ev;
+ u8 next_state;
+ llc_sap_action_t *ev_actions;
+-};
++} __do_const;
+
+ struct llc_sap_state {
+ u8 curr_state;
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 1a6201a..66d9531 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -3529,7 +3529,7 @@ struct rate_control_ops {
+ void (*add_sta_debugfs)(void *priv, void *priv_sta,
+ struct dentry *dir);
+ void (*remove_sta_debugfs)(void *priv, void *priv_sta);
+-};
++} __do_const;
+
+ static inline int rate_supported(struct ieee80211_sta *sta,
+ enum ieee80211_band band,
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 2720884..3aa5c25 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -122,7 +122,7 @@ struct neigh_ops {
+ void (*error_report)(struct neighbour *, struct sk_buff *);
+ int (*output)(struct neighbour *, struct sk_buff *);
+ int (*connected_output)(struct neighbour *, struct sk_buff *);
+-};
++} __do_const;
+
+ struct pneigh_entry {
+ struct pneigh_entry *next;
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 3bb6fa0..3af7393 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -240,10 +240,16 @@ static inline struct net *read_pnet(struct net * const *pnet)
+ #define __net_init
+ #define __net_exit
+ #define __net_initdata
++#define __net_initconst
+ #else
+ #define __net_init __init
+ #define __net_exit __exit_refok
+ #define __net_initdata __initdata
++#ifdef CONSTIFY_PLUGIN
++#define __net_initconst __initconst
++#else
++#define __net_initconst __initdata
++#endif
+ #endif
+
+ struct pernet_operations {
+@@ -253,7 +259,7 @@ struct pernet_operations {
+ void (*exit_batch)(struct list_head *net_exit_list);
+ int *id;
+ size_t size;
+-};
++} __do_const;
+
+ /*
+ * Use these carefully. If you implement a network device and it
+diff --git a/include/net/netdma.h b/include/net/netdma.h
+index 8ba8ce2..99b7fff 100644
+--- a/include/net/netdma.h
++++ b/include/net/netdma.h
+@@ -24,7 +24,7 @@
+ #include <linux/dmaengine.h>
+ #include <linux/skbuff.h>
+
+-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
++int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
+ struct sk_buff *skb, int offset, struct iovec *to,
+ size_t len, struct dma_pinned_list *pinned_list);
+
+diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
+index 252fd10..aa1421f 100644
+--- a/include/net/netfilter/nf_queue.h
++++ b/include/net/netfilter/nf_queue.h
+@@ -22,7 +22,7 @@ struct nf_queue_handler {
+ int (*outfn)(struct nf_queue_entry *entry,
+ unsigned int queuenum);
+ char *name;
+-};
++} __do_const;
+
+ extern int nf_register_queue_handler(u_int8_t pf,
+ const struct nf_queue_handler *qh);
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index cb1f350..3279d2c 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
+ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
+ {
+ if (mark)
+- skb_trim(skb, (unsigned char *) mark - skb->data);
++ skb_trim(skb, (const unsigned char *) mark - skb->data);
+ }
+
+ /**
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index d786b4f..4c3dd41 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -56,8 +56,8 @@ struct netns_ipv4 {
+
+ unsigned int sysctl_ping_group_range[2];
+
+- atomic_t rt_genid;
+- atomic_t dev_addr_genid;
++ atomic_unchecked_t rt_genid;
++ atomic_unchecked_t dev_addr_genid;
+
+ #ifdef CONFIG_IP_MROUTE
+ #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index 6f7eb80..f9838be 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -44,7 +44,7 @@ struct net_protocol {
+ int (*gro_complete)(struct sk_buff *skb);
+ unsigned int no_policy:1,
+ netns_ok:1;
+-};
++} __do_const;
+
+ #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ struct inet6_protocol {
+@@ -63,7 +63,7 @@ struct inet6_protocol {
+ int (*gro_complete)(struct sk_buff *skb);
+
+ unsigned int flags; /* INET6_PROTO_xxx */
+-};
++} __do_const;
+
+ #define INET6_PROTO_NOPOLICY 0x1
+ #define INET6_PROTO_FINAL 0x2
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 3702939..cf9e78e 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -78,7 +78,7 @@ struct rtnl_link_ops {
+ int (*get_tx_queues)(struct net *net, struct nlattr *tb[],
+ unsigned int *tx_queues,
+ unsigned int *real_tx_queues);
+-};
++} __do_const;
+
+ extern int __rtnl_link_register(struct rtnl_link_ops *ops);
+ extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index ad03988..0c5a964 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -318,9 +318,9 @@ do { \
+
+ #else /* SCTP_DEBUG */
+
+-#define SCTP_DEBUG_PRINTK(whatever...)
+-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
+-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
+ #define SCTP_ENABLE_DEBUG
+ #define SCTP_DISABLE_DEBUG
+ #define SCTP_ASSERT(expr, str, func)
+diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
+index 9148632..be3c5ac 100644
+--- a/include/net/sctp/sm.h
++++ b/include/net/sctp/sm.h
+@@ -86,7 +86,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
+ typedef struct {
+ sctp_state_fn_t *fn;
+ const char *name;
+-} sctp_sm_table_entry_t;
++} __do_const sctp_sm_table_entry_t;
+
+ /* A naming convention of "sctp_sf_xxx" applies to all the state functions
+ * currently in use.
+@@ -295,7 +295,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
+ __u32 sctp_generate_tsn(const struct sctp_endpoint *);
+
+ /* Extern declarations for major data structures. */
+-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
++extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+
+
+ /* Get the size of a DATA chunk payload. */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index a15432da..9459dcc 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -644,7 +644,7 @@ struct sctp_pf {
+ struct sctp_association *asoc);
+ void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
+ struct sctp_af *af;
+-};
++} __do_const;
+
+
+ /* Structure to track chunk fragments that have been acked, but peer
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+index c2e542b..6ca975b 100644
+--- a/include/net/secure_seq.h
++++ b/include/net/secure_seq.h
+@@ -3,6 +3,7 @@
+
+ #include <linux/types.h>
+
++extern void net_secret_init(void);
+ extern __u32 secure_ip_id(__be32 daddr);
+ extern __u32 secure_ipv6_id(const __be32 daddr[4]);
+ extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index e6454b6..cda5eaf 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -278,7 +278,7 @@ struct sock {
+ #ifdef CONFIG_RPS
+ __u32 sk_rxhash;
+ #endif
+- atomic_t sk_drops;
++ atomic_unchecked_t sk_drops;
+ int sk_rcvbuf;
+
+ struct sk_filter __rcu *sk_filter;
+@@ -1416,7 +1416,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
+ }
+
+ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+- char __user *from, char *to,
++ char __user *from, unsigned char *to,
+ int copy, int offset)
+ {
+ if (skb->ip_summed == CHECKSUM_NONE) {
+@@ -1678,7 +1678,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+ }
+ }
+
+-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
++struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+
+ static inline struct page *sk_stream_alloc_page(struct sock *sk)
+ {
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index fe46019..1422c5a 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -470,7 +470,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
+ extern void tcp_xmit_retransmit_queue(struct sock *);
+ extern void tcp_simple_retransmit(struct sock *);
+ extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
++extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+
+ extern void tcp_send_probe0(struct sock *);
+ extern void tcp_send_partial(struct sock *);
+@@ -633,8 +633,8 @@ struct tcp_skb_cb {
+ struct inet6_skb_parm h6;
+ #endif
+ } header; /* For incoming frames */
+- __u32 seq; /* Starting sequence number */
+- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
++ __u32 seq __intentional_overflow(0); /* Starting sequence number */
++ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
+ __u32 when; /* used to compute rtt's */
+ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
+ __u8 sacked; /* State flags for SACK/FACK. */
+@@ -647,7 +647,7 @@ struct tcp_skb_cb {
+ #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
+ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
+
+- __u32 ack_seq; /* Sequence number ACK'd */
++ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
+ };
+
+ #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 921f627..3744fe8 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -282,7 +282,6 @@ struct xfrm_dst;
+ struct xfrm_policy_afinfo {
+ unsigned short family;
+ struct dst_ops *dst_ops;
+- void (*garbage_collect)(struct net *net);
+ struct dst_entry *(*dst_lookup)(struct net *net, int tos,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr);
+@@ -298,7 +297,7 @@ struct xfrm_policy_afinfo {
+ struct net_device *dev,
+ const struct flowi *fl);
+ struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
+-};
++} __do_const;
+
+ extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+ extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+@@ -334,7 +333,7 @@ struct xfrm_state_afinfo {
+ struct sk_buff *skb);
+ int (*transport_finish)(struct sk_buff *skb,
+ int async);
+-};
++} __do_const;
+
+ extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+ extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+@@ -417,7 +416,7 @@ struct xfrm_mode {
+ struct module *owner;
+ unsigned int encap;
+ int flags;
+-};
++} __do_const;
+
+ /* Flags for xfrm_mode. */
+ enum {
+@@ -508,7 +507,7 @@ struct xfrm_policy {
+ struct timer_list timer;
+
+ struct flow_cache_object flo;
+- atomic_t genid;
++ atomic_unchecked_t genid;
+ u32 priority;
+ u32 index;
+ struct xfrm_mark mark;
+@@ -1141,6 +1140,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
+ }
+ }
+
++extern void xfrm_garbage_collect_deferred(struct net *net);
++
+ #else
+
+ static inline void xfrm_sk_free_policy(struct sock *sk) {}
+@@ -1175,6 +1176,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
+ {
+ return 1;
+ }
++static inline void xfrm_garbage_collect_deferred(struct net *net)
++{
++}
+ #endif
+
+ static __inline__
+diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
+index 1a046b1..ee0bef0 100644
+--- a/include/rdma/iw_cm.h
++++ b/include/rdma/iw_cm.h
+@@ -122,7 +122,7 @@ struct iw_cm_verbs {
+ int backlog);
+
+ int (*destroy_listen)(struct iw_cm_id *cm_id);
+-};
++} __no_const;
+
+ /**
+ * iw_create_cm_id - Create an IW CM identifier.
+diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
+index 5d1a758..1dbf795 100644
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -748,6 +748,7 @@ struct libfc_function_template {
+ */
+ void (*disc_stop_final) (struct fc_lport *);
+ };
++typedef struct libfc_function_template __no_const libfc_function_template_no_const;
+
+ /**
+ * struct fc_disc - Discovery context
+@@ -851,7 +852,7 @@ struct fc_lport {
+ struct fc_vport *vport;
+
+ /* Operational Information */
+- struct libfc_function_template tt;
++ libfc_function_template_no_const tt;
+ u8 link_up;
+ u8 qfull;
+ enum fc_lport_state state;
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 5591ed5..13eb457 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -161,9 +161,9 @@ struct scsi_device {
+ unsigned int max_device_blocked; /* what device_blocked counts down from */
+ #define SCSI_DEFAULT_DEVICE_BLOCKED 3
+
+- atomic_t iorequest_cnt;
+- atomic_t iodone_cnt;
+- atomic_t ioerr_cnt;
++ atomic_unchecked_t iorequest_cnt;
++ atomic_unchecked_t iodone_cnt;
++ atomic_unchecked_t ioerr_cnt;
+
+ struct device sdev_gendev,
+ sdev_dev;
+diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
+index 2a65167..91e01f8 100644
+--- a/include/scsi/scsi_transport_fc.h
++++ b/include/scsi/scsi_transport_fc.h
+@@ -711,7 +711,7 @@ struct fc_function_template {
+ unsigned long show_host_system_hostname:1;
+
+ unsigned long disable_target_scan:1;
+-};
++} __do_const;
+
+
+ /**
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 11cfb59..808afef 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -641,7 +641,7 @@ struct snd_soc_codec_driver {
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
+-};
++} __do_const;
+
+ /* SoC platform interface */
+ struct snd_soc_platform_driver {
+@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
+ /* platform IO - used for platform DAPM */
+ unsigned int (*read)(struct snd_soc_platform *, unsigned int);
+ int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
+-};
++} __do_const;
+
+ struct snd_soc_platform {
+ const char *name;
+diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
+index 444cd6b..3327cc5 100644
+--- a/include/sound/ymfpci.h
++++ b/include/sound/ymfpci.h
+@@ -358,7 +358,7 @@ struct snd_ymfpci {
+ spinlock_t reg_lock;
+ spinlock_t voice_lock;
+ wait_queue_head_t interrupt_sleep;
+- atomic_t interrupt_sleep_count;
++ atomic_unchecked_t interrupt_sleep_count;
+ struct snd_info_entry *proc_entry;
+ const struct firmware *dsp_microcode;
+ const struct firmware *controller_microcode;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 6ee550e..ebec4cc 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -466,8 +466,8 @@ struct se_cmd {
+ atomic_t t_se_count;
+ atomic_t t_task_cdbs_left;
+ atomic_t t_task_cdbs_ex_left;
+- atomic_t t_task_cdbs_sent;
+- atomic_t t_transport_aborted;
++ atomic_unchecked_t t_task_cdbs_sent;
++ atomic_unchecked_t t_transport_aborted;
+ atomic_t t_transport_active;
+ atomic_t t_transport_complete;
+ atomic_t t_transport_queue_active;
+@@ -706,7 +706,7 @@ struct se_device {
+ /* Active commands on this virtual SE device */
+ atomic_t simple_cmds;
+ atomic_t depth_left;
+- atomic_t dev_ordered_id;
++ atomic_unchecked_t dev_ordered_id;
+ atomic_t execute_tasks;
+ atomic_t dev_ordered_sync;
+ atomic_t dev_qf_count;
+diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
+new file mode 100644
+index 0000000..2efe49d
+--- /dev/null
++++ b/include/trace/events/fs.h
+@@ -0,0 +1,53 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM fs
++
++#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_FS_H
++
++#include <linux/fs.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(do_sys_open,
++
++ TP_PROTO(char *filename, int flags, int mode),
++
++ TP_ARGS(filename, flags, mode),
++
++ TP_STRUCT__entry(
++ __string( filename, filename )
++ __field( int, flags )
++ __field( int, mode )
++ ),
++
++ TP_fast_assign(
++ __assign_str(filename, filename);
++ __entry->flags = flags;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("\"%s\" %x %o",
++ __get_str(filename), __entry->flags, __entry->mode)
++);
++
++TRACE_EVENT(open_exec,
++
++ TP_PROTO(const char *filename),
++
++ TP_ARGS(filename),
++
++ TP_STRUCT__entry(
++ __string( filename, filename )
++ ),
++
++ TP_fast_assign(
++ __assign_str(filename, filename);
++ ),
++
++ TP_printk("\"%s\"",
++ __get_str(filename))
++);
++
++#endif /* _TRACE_FS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
+index 1c09820..7f5ec79 100644
+--- a/include/trace/events/irq.h
++++ b/include/trace/events/irq.h
+@@ -36,7 +36,7 @@ struct softirq_action;
+ */
+ TRACE_EVENT(irq_handler_entry,
+
+- TP_PROTO(int irq, struct irqaction *action),
++ TP_PROTO(int irq, const struct irqaction *action),
+
+ TP_ARGS(irq, action),
+
+@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
+ */
+ TRACE_EVENT(irq_handler_exit,
+
+- TP_PROTO(int irq, struct irqaction *action, int ret),
++ TP_PROTO(int irq, const struct irqaction *action, int ret),
+
+ TP_ARGS(irq, action, ret),
+
+diff --git a/include/trace/events/random.h b/include/trace/events/random.h
+new file mode 100644
+index 0000000..805af6d
+--- /dev/null
++++ b/include/trace/events/random.h
+@@ -0,0 +1,315 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM random
++
++#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RANDOM_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(add_device_randomness,
++ TP_PROTO(int bytes, unsigned long IP),
++
++ TP_ARGS(bytes, IP),
++
++ TP_STRUCT__entry(
++ __field( int, bytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->bytes = bytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("bytes %d caller %pF",
++ __entry->bytes, (void *)__entry->IP)
++);
++
++DECLARE_EVENT_CLASS(random__mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, bytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->bytes = bytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: bytes %d caller %pF",
++ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
++);
++
++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++);
++
++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++);
++
++TRACE_EVENT(credit_entropy_bits,
++ TP_PROTO(const char *pool_name, int bits, int entropy_count,
++ int entropy_total, unsigned long IP),
++
++ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, bits )
++ __field( int, entropy_count )
++ __field( int, entropy_total )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->bits = bits;
++ __entry->entropy_count = entropy_count;
++ __entry->entropy_total = entropy_total;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
++ "caller %pF", __entry->pool_name, __entry->bits,
++ __entry->entropy_count, __entry->entropy_total,
++ (void *)__entry->IP)
++);
++
++TRACE_EVENT(push_to_pool,
++ TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
++
++ TP_ARGS(pool_name, pool_bits, input_bits),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, pool_bits )
++ __field( int, input_bits )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->pool_bits = pool_bits;
++ __entry->input_bits = input_bits;
++ ),
++
++ TP_printk("%s: pool_bits %d input_pool_bits %d",
++ __entry->pool_name, __entry->pool_bits,
++ __entry->input_bits)
++);
++
++TRACE_EVENT(debit_entropy,
++ TP_PROTO(const char *pool_name, int debit_bits),
++
++ TP_ARGS(pool_name, debit_bits),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, debit_bits )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->debit_bits = debit_bits;
++ ),
++
++ TP_printk("%s: debit_bits %d", __entry->pool_name,
++ __entry->debit_bits)
++);
++
++TRACE_EVENT(add_input_randomness,
++ TP_PROTO(int input_bits),
++
++ TP_ARGS(input_bits),
++
++ TP_STRUCT__entry(
++ __field( int, input_bits )
++ ),
++
++ TP_fast_assign(
++ __entry->input_bits = input_bits;
++ ),
++
++ TP_printk("input_pool_bits %d", __entry->input_bits)
++);
++
++TRACE_EVENT(add_disk_randomness,
++ TP_PROTO(dev_t dev, int input_bits),
++
++ TP_ARGS(dev, input_bits),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, input_bits )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dev;
++ __entry->input_bits = input_bits;
++ ),
++
++ TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
++ MINOR(__entry->dev), __entry->input_bits)
++);
++
++TRACE_EVENT(xfer_secondary_pool,
++ TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
++ int pool_entropy, int input_entropy),
++
++ TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
++ input_entropy),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, xfer_bits )
++ __field( int, request_bits )
++ __field( int, pool_entropy )
++ __field( int, input_entropy )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->xfer_bits = xfer_bits;
++ __entry->request_bits = request_bits;
++ __entry->pool_entropy = pool_entropy;
++ __entry->input_entropy = input_entropy;
++ ),
++
++ TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
++ "input_entropy %d", __entry->pool_name, __entry->xfer_bits,
++ __entry->request_bits, __entry->pool_entropy,
++ __entry->input_entropy)
++);
++
++DECLARE_EVENT_CLASS(random__get_random_bytes,
++ TP_PROTO(int nbytes, unsigned long IP),
++
++ TP_ARGS(nbytes, IP),
++
++ TP_STRUCT__entry(
++ __field( int, nbytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->nbytes = nbytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
++);
++
++DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
++ TP_PROTO(int nbytes, unsigned long IP),
++
++ TP_ARGS(nbytes, IP)
++);
++
++DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
++ TP_PROTO(int nbytes, unsigned long IP),
++
++ TP_ARGS(nbytes, IP)
++);
++
++DECLARE_EVENT_CLASS(random__extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, nbytes )
++ __field( int, entropy_count )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->nbytes = nbytes;
++ __entry->entropy_count = entropy_count;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
++ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
++ (void *)__entry->IP)
++);
++
++
++DEFINE_EVENT(random__extract_entropy, extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++);
++
++DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++);
++
++TRACE_EVENT(random_read,
++ TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
++
++ TP_ARGS(got_bits, need_bits, pool_left, input_left),
++
++ TP_STRUCT__entry(
++ __field( int, got_bits )
++ __field( int, need_bits )
++ __field( int, pool_left )
++ __field( int, input_left )
++ ),
++
++ TP_fast_assign(
++ __entry->got_bits = got_bits;
++ __entry->need_bits = need_bits;
++ __entry->pool_left = pool_left;
++ __entry->input_left = input_left;
++ ),
++
++ TP_printk("got_bits %d still_needed_bits %d "
++ "blocking_pool_entropy_left %d input_entropy_left %d",
++ __entry->got_bits, __entry->got_bits, __entry->pool_left,
++ __entry->input_left)
++);
++
++TRACE_EVENT(urandom_read,
++ TP_PROTO(int got_bits, int pool_left, int input_left),
++
++ TP_ARGS(got_bits, pool_left, input_left),
++
++ TP_STRUCT__entry(
++ __field( int, got_bits )
++ __field( int, pool_left )
++ __field( int, input_left )
++ ),
++
++ TP_fast_assign(
++ __entry->got_bits = got_bits;
++ __entry->pool_left = pool_left;
++ __entry->input_left = input_left;
++ ),
++
++ TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
++ "input_entropy_left %d", __entry->got_bits,
++ __entry->pool_left, __entry->input_left)
++);
++
++#endif /* _TRACE_RANDOM_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/include/video/udlfb.h b/include/video/udlfb.h
+index c41f308..6918de3 100644
+--- a/include/video/udlfb.h
++++ b/include/video/udlfb.h
+@@ -52,10 +52,10 @@ struct dlfb_data {
+ u32 pseudo_palette[256];
+ int blank_mode; /*one of FB_BLANK_ */
+ /* blit-only rendering path metrics, exposed through sysfs */
+- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+- atomic_t bytes_sent; /* to usb, after compression including overhead */
+- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
++ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
++ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
++ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
++ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
+ };
+
+ #define NR_USB_REQUEST_I2C_SUB_IO 0x02
+diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
+index 0993a22..32ba2fe 100644
+--- a/include/video/uvesafb.h
++++ b/include/video/uvesafb.h
+@@ -177,6 +177,7 @@ struct uvesafb_par {
+ u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
+ u8 pmi_setpal; /* PMI for palette changes */
+ u16 *pmi_base; /* protected mode interface location */
++ u8 *pmi_code; /* protected mode code location */
+ void *pmi_start;
+ void *pmi_pal;
+ u8 *vbe_state_orig; /*
+diff --git a/init/Kconfig b/init/Kconfig
+index 43298f9..7e4816c 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
+
+ config COMPAT_BRK
+ bool "Disable heap randomization"
+- default y
++ default n
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+@@ -1397,7 +1397,7 @@ config INIT_ALL_POSSIBLE
+ config STOP_MACHINE
+ bool
+ default y
+- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
++ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
+ help
+ Need stop_machine() primitive.
+
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index d6c229f..4746631 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
+
+ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
+ {
+- int err = sys_mount(name, "/root", fs, flags, data);
++ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
+ if (err)
+ return err;
+
+- sys_chdir((const char __user __force *)"/root");
++ sys_chdir((const char __force_user*)"/root");
+ ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
+ printk(KERN_INFO
+ "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
+@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
+ va_start(args, fmt);
+ vsprintf(buf, fmt, args);
+ va_end(args);
+- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, FDEJECT, 0);
+ sys_close(fd);
+ }
+ printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
+- fd = sys_open("/dev/console", O_RDWR, 0);
++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, TCGETS, (long)&termios);
+ termios.c_lflag &= ~ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+- sys_read(fd, &c, 1);
++ sys_read(fd, (char __user *)&c, 1);
+ termios.c_lflag |= ICANON;
+ sys_ioctl(fd, TCSETSF, (long)&termios);
+ sys_close(fd);
+@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
+ mount_root();
+ out:
+ devtmpfs_mount("dev");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot((const char __user __force *)".");
++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((const char __force_user *)".");
+ }
+diff --git a/init/do_mounts.h b/init/do_mounts.h
+index f5b978a..69dbfe8 100644
+--- a/init/do_mounts.h
++++ b/init/do_mounts.h
+@@ -15,15 +15,15 @@ extern int root_mountflags;
+
+ static inline int create_dev(char *name, dev_t dev)
+ {
+- sys_unlink(name);
+- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
++ sys_unlink((char __force_user *)name);
++ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
+ }
+
+ #if BITS_PER_LONG == 32
+ static inline u32 bstat(char *name)
+ {
+ struct stat64 stat;
+- if (sys_stat64(name, &stat) != 0)
++ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
+ return 0;
+ if (!S_ISBLK(stat.st_mode))
+ return 0;
+@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
+ static inline u32 bstat(char *name)
+ {
+ struct stat stat;
+- if (sys_newstat(name, &stat) != 0)
++ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
+ return 0;
+ if (!S_ISBLK(stat.st_mode))
+ return 0;
+diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
+index 3098a38..253064e 100644
+--- a/init/do_mounts_initrd.c
++++ b/init/do_mounts_initrd.c
+@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
+ create_dev("/dev/root.old", Root_RAM0);
+ /* mount initrd on rootfs' /root */
+ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
+- sys_mkdir("/old", 0700);
+- root_fd = sys_open("/", 0, 0);
+- old_fd = sys_open("/old", 0, 0);
++ sys_mkdir((const char __force_user *)"/old", 0700);
++ root_fd = sys_open((const char __force_user *)"/", 0, 0);
++ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
+ /* move initrd over / and chdir/chroot in initrd root */
+- sys_chdir("/root");
+- sys_mount(".", "/", NULL, MS_MOVE, NULL);
+- sys_chroot(".");
++ sys_chdir((const char __force_user *)"/root");
++ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
++ sys_chroot((const char __force_user *)".");
+
+ /*
+ * In case that a resume from disk is carried out by linuxrc or one of
+@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
+
+ /* move initrd to rootfs' /old */
+ sys_fchdir(old_fd);
+- sys_mount("/", ".", NULL, MS_MOVE, NULL);
++ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
+ /* switch root and cwd back to / of rootfs */
+ sys_fchdir(root_fd);
+- sys_chroot(".");
++ sys_chroot((const char __force_user *)".");
+ sys_close(old_fd);
+ sys_close(root_fd);
+
+ if (new_decode_dev(real_root_dev) == Root_RAM0) {
+- sys_chdir("/old");
++ sys_chdir((const char __force_user *)"/old");
+ return;
+ }
+
+@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
+ mount_root();
+
+ printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
+- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
++ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
+ if (!error)
+ printk("okay\n");
+ else {
+- int fd = sys_open("/dev/root.old", O_RDWR, 0);
++ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
+ if (error == -ENOENT)
+ printk("/initrd does not exist. Ignored.\n");
+ else
+ printk("failed\n");
+ printk(KERN_NOTICE "Unmounting old root\n");
+- sys_umount("/old", MNT_DETACH);
++ sys_umount((char __force_user *)"/old", MNT_DETACH);
+ printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
+ if (fd < 0) {
+ error = fd;
+@@ -116,11 +116,11 @@ int __init initrd_load(void)
+ * mounted in the normal path.
+ */
+ if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+- sys_unlink("/initrd.image");
++ sys_unlink((const char __force_user *)"/initrd.image");
+ handle_initrd();
+ return 1;
+ }
+ }
+- sys_unlink("/initrd.image");
++ sys_unlink((const char __force_user *)"/initrd.image");
+ return 0;
+ }
+diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
+index 32c4799..c27ee74 100644
+--- a/init/do_mounts_md.c
++++ b/init/do_mounts_md.c
+@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
+ partitioned ? "_d" : "", minor,
+ md_setup_args[ent].device_names);
+
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((char __force_user *)name, 0, 0);
+ if (fd < 0) {
+ printk(KERN_ERR "md: open failed - cannot start "
+ "array %s\n", name);
+@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
+ * array without it
+ */
+ sys_close(fd);
+- fd = sys_open(name, 0, 0);
++ fd = sys_open((char __force_user *)name, 0, 0);
+ sys_ioctl(fd, BLKRRPART, 0);
+ }
+ sys_close(fd);
+@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
+
+ wait_for_device_probe();
+
+- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
++ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
+ sys_close(fd);
+diff --git a/init/initramfs.c b/init/initramfs.c
+index 2531811..4f036c4 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -74,7 +74,7 @@ static void __init free_hash(void)
+ }
+ }
+
+-static long __init do_utime(char __user *filename, time_t mtime)
++static long __init do_utime(__force char __user *filename, time_t mtime)
+ {
+ struct timespec t[2];
+
+@@ -109,7 +109,7 @@ static void __init dir_utime(void)
+ struct dir_entry *de, *tmp;
+ list_for_each_entry_safe(de, tmp, &dir_list, list) {
+ list_del(&de->list);
+- do_utime(de->name, de->mtime);
++ do_utime((char __force_user *)de->name, de->mtime);
+ kfree(de->name);
+ kfree(de);
+ }
+@@ -271,7 +271,7 @@ static int __init maybe_link(void)
+ if (nlink >= 2) {
+ char *old = find_link(major, minor, ino, mode, collected);
+ if (old)
+- return (sys_link(old, collected) < 0) ? -1 : 1;
++ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
+ }
+ return 0;
+ }
+@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
+ {
+ struct stat st;
+
+- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
++ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
+ if (S_ISDIR(st.st_mode))
+- sys_rmdir(path);
++ sys_rmdir((char __force_user *)path);
+ else
+- sys_unlink(path);
++ sys_unlink((char __force_user *)path);
+ }
+ }
+
+@@ -305,7 +305,7 @@ static int __init do_name(void)
+ int openflags = O_WRONLY|O_CREAT;
+ if (ml != 1)
+ openflags |= O_TRUNC;
+- wfd = sys_open(collected, openflags, mode);
++ wfd = sys_open((char __force_user *)collected, openflags, mode);
+
+ if (wfd >= 0) {
+ sys_fchown(wfd, uid, gid);
+@@ -317,17 +317,17 @@ static int __init do_name(void)
+ }
+ }
+ } else if (S_ISDIR(mode)) {
+- sys_mkdir(collected, mode);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
++ sys_mkdir((char __force_user *)collected, mode);
++ sys_chown((char __force_user *)collected, uid, gid);
++ sys_chmod((char __force_user *)collected, mode);
+ dir_add(collected, mtime);
+ } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
+ S_ISFIFO(mode) || S_ISSOCK(mode)) {
+ if (maybe_link() == 0) {
+- sys_mknod(collected, mode, rdev);
+- sys_chown(collected, uid, gid);
+- sys_chmod(collected, mode);
+- do_utime(collected, mtime);
++ sys_mknod((char __force_user *)collected, mode, rdev);
++ sys_chown((char __force_user *)collected, uid, gid);
++ sys_chmod((char __force_user *)collected, mode);
++ do_utime((char __force_user *)collected, mtime);
+ }
+ }
+ return 0;
+@@ -336,15 +336,15 @@ static int __init do_name(void)
+ static int __init do_copy(void)
+ {
+ if (count >= body_len) {
+- sys_write(wfd, victim, body_len);
++ sys_write(wfd, (char __force_user *)victim, body_len);
+ sys_close(wfd);
+- do_utime(vcollected, mtime);
++ do_utime((char __force_user *)vcollected, mtime);
+ kfree(vcollected);
+ eat(body_len);
+ state = SkipIt;
+ return 0;
+ } else {
+- sys_write(wfd, victim, count);
++ sys_write(wfd, (char __force_user *)victim, count);
+ body_len -= count;
+ eat(count);
+ return 1;
+@@ -355,9 +355,9 @@ static int __init do_symlink(void)
+ {
+ collected[N_ALIGN(name_len) + body_len] = '\0';
+ clean_path(collected, 0);
+- sys_symlink(collected + N_ALIGN(name_len), collected);
+- sys_lchown(collected, uid, gid);
+- do_utime(collected, mtime);
++ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
++ sys_lchown((char __force_user *)collected, uid, gid);
++ do_utime((char __force_user *)collected, mtime);
+ state = SkipIt;
+ next_state = Reset;
+ return 0;
+@@ -573,7 +573,7 @@ static int __init populate_rootfs(void)
+ {
+ char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
+ if (err)
+- panic(err); /* Failed to decompress INTERNAL initramfs */
++ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
+ if (initrd_start) {
+ #ifdef CONFIG_BLK_DEV_RAM
+ int fd;
+diff --git a/init/main.c b/init/main.c
+index 7474450..caef7e7 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -97,6 +97,8 @@ static inline void mark_rodata_ro(void) { }
+ extern void tc_init(void);
+ #endif
+
++extern void grsecurity_init(void);
++
+ /*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled. This means
+@@ -150,6 +152,64 @@ static int __init set_reset_devices(char *str)
+
+ __setup("reset_devices", set_reset_devices);
+
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
++static int __init setup_grsec_proc_gid(char *str)
++{
++ grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
++ return 1;
++}
++__setup("grsec_proc_gid=", setup_grsec_proc_gid);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++unsigned long pax_user_shadow_base __read_only = 1UL << TASK_SIZE_MAX_SHIFT;
++EXPORT_SYMBOL(pax_user_shadow_base);
++extern char pax_enter_kernel_user[];
++extern char pax_exit_kernel_user[];
++extern pgdval_t clone_pgd_mask;
++#endif
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
++static int __init setup_pax_nouderef(char *str)
++{
++#ifdef CONFIG_X86_32
++ unsigned int cpu;
++ struct desc_struct *gdt;
++
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
++ }
++ loadsegment(ds, __KERNEL_DS);
++ loadsegment(es, __KERNEL_DS);
++ loadsegment(ss, __KERNEL_DS);
++#else
++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
++ clone_pgd_mask = ~(pgdval_t)0UL;
++ pax_user_shadow_base = 0UL;
++#endif
++
++ return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++int pax_softmode;
++
++static int __init setup_pax_softmode(char *str)
++{
++ get_option(&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+ const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+ static const char *panic_later, *panic_param;
+@@ -679,6 +739,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
+ {
+ int count = preempt_count();
+ int ret;
++ const char *msg1 = "", *msg2 = "";
+
+ if (initcall_debug)
+ ret = do_one_initcall_debug(fn);
+@@ -691,17 +752,18 @@ int __init_or_module do_one_initcall(initcall_t fn)
+ sprintf(msgbuf, "error code %d ", ret);
+
+ if (preempt_count() != count) {
+- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
++ msg1 = " preemption imbalance";
+ preempt_count() = count;
+ }
+ if (irqs_disabled()) {
+- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
++ msg2 = " disabled interrupts";
+ local_irq_enable();
+ }
+- if (msgbuf[0]) {
+- printk("initcall %pF returned with %s\n", fn, msgbuf);
++ if (msgbuf[0] || *msg1 || *msg2) {
++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
+ }
+
++ add_latent_entropy();
+ return ret;
+ }
+
+@@ -750,6 +812,10 @@ static void run_init_process(const char *init_filename)
+ kernel_execve(init_filename, argv_init, envp_init);
+ }
+
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++extern int gr_init_ran;
++#endif
++
+ /* This is a non __init function. Force it to be noinline otherwise gcc
+ * makes it inline to init() and it becomes part of init.text section
+ */
+@@ -771,6 +837,11 @@ static noinline int init_post(void)
+ ramdisk_execute_command);
+ }
+
++#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
++ /* if no initrd was used, be extra sure we enforce chroot restrictions */
++ gr_init_ran = 1;
++#endif
++
+ /*
+ * We try each of these until one succeeds.
+ *
+@@ -823,7 +894,7 @@ static int __init kernel_init(void * unused)
+ do_basic_setup();
+
+ /* Open the /dev/console on the rootfs, this should never fail */
+- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
++ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
+ printk(KERN_WARNING "Warning: unable to open an initial console.\n");
+
+ (void) sys_dup(0);
+@@ -836,11 +907,13 @@ static int __init kernel_init(void * unused)
+ if (!ramdisk_execute_command)
+ ramdisk_execute_command = "/init";
+
+- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
++ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
+ ramdisk_execute_command = NULL;
+ prepare_namespace();
+ }
+
++ grsecurity_init();
++
+ /*
+ * Ok, we have completed the initial bootup, and
+ * we're essentially up and running. Get rid of the
+diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
+index 00fba2b..9afd545 100644
+--- a/ipc/ipc_sysctl.c
++++ b/ipc/ipc_sysctl.c
+@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
+ static int proc_ipc_dointvec(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table ipc_table;
++ ctl_table_no_const ipc_table;
+
+ memcpy(&ipc_table, table, sizeof(ipc_table));
+ ipc_table.data = get_ipc(table);
+@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
+ static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table ipc_table;
++ ctl_table_no_const ipc_table;
+
+ memcpy(&ipc_table, table, sizeof(ipc_table));
+ ipc_table.data = get_ipc(table);
+@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
+ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table ipc_table;
++ ctl_table_no_const ipc_table;
+ size_t lenp_bef = *lenp;
+ int rc;
+
+@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+ static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table ipc_table;
++ ctl_table_no_const ipc_table;
+ memcpy(&ipc_table, table, sizeof(ipc_table));
+ ipc_table.data = get_ipc(table);
+
+@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
+ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table ipc_table;
++ ctl_table_no_const ipc_table;
+ size_t lenp_bef = *lenp;
+ int oldval;
+ int rc;
+diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
+index 0c09366..c81a8ec 100644
+--- a/ipc/mq_sysctl.c
++++ b/ipc/mq_sysctl.c
+@@ -34,7 +34,7 @@ static void *get_mq(ctl_table *table)
+ static int proc_mq_dointvec(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table mq_table;
++ ctl_table_no_const mq_table;
+ memcpy(&mq_table, table, sizeof(mq_table));
+ mq_table.data = get_mq(table);
+
+@@ -44,7 +44,7 @@ static int proc_mq_dointvec(ctl_table *table, int write,
+ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table mq_table;
++ ctl_table_no_const mq_table;
+ memcpy(&mq_table, table, sizeof(mq_table));
+ mq_table.data = get_mq(table);
+
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 5b4293d..f179875 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
+ mq_bytes = (mq_msg_tblsz +
+ (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+
++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
+ spin_lock(&mq_lock);
+ if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+ u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 7385de2..a8180e08 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
+ return security_msg_queue_associate(msq, msgflg);
+ }
+
++static struct ipc_ops msg_ops = {
++ .getnew = newque,
++ .associate = msg_security,
++ .more_checks = NULL
++};
++
+ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
+ {
+ struct ipc_namespace *ns;
+- struct ipc_ops msg_ops;
+ struct ipc_params msg_params;
+
+ ns = current->nsproxy->ipc_ns;
+
+- msg_ops.getnew = newque;
+- msg_ops.associate = msg_security;
+- msg_ops.more_checks = NULL;
+-
+ msg_params.key = key;
+ msg_params.flg = msgflg;
+
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 5215a81..cfc0cac 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
+ return 0;
+ }
+
++static struct ipc_ops sem_ops = {
++ .getnew = newary,
++ .associate = sem_security,
++ .more_checks = sem_more_checks
++};
++
+ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+ {
+ struct ipc_namespace *ns;
+- struct ipc_ops sem_ops;
+ struct ipc_params sem_params;
+
+ ns = current->nsproxy->ipc_ns;
+@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+ if (nsems < 0 || nsems > ns->sc_semmsl)
+ return -EINVAL;
+
+- sem_ops.getnew = newary;
+- sem_ops.associate = sem_security;
+- sem_ops.more_checks = sem_more_checks;
+-
+ sem_params.key = key;
+ sem_params.flg = semflg;
+ sem_params.u.nsems = nsems;
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 326a20b..62e6b7e 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
+ static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid,
++ const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime);
++#endif
++
+ void shm_init_ns(struct ipc_namespace *ns)
+ {
+ ns->shm_ctlmax = SHMMAX;
+@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ shp->shm_lprid = 0;
+ shp->shm_atim = shp->shm_dtim = 0;
+ shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++ {
++ struct timespec timeval;
++ do_posix_clock_monotonic_gettime(&timeval);
++
++ shp->shm_createtime = timeval.tv_sec;
++ }
++#endif
+ shp->shm_segsz = size;
+ shp->shm_nattch = 0;
+ shp->shm_file = file;
+@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
+ return 0;
+ }
+
++static struct ipc_ops shm_ops = {
++ .getnew = newseg,
++ .associate = shm_security,
++ .more_checks = shm_more_checks
++};
++
+ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
+ {
+ struct ipc_namespace *ns;
+- struct ipc_ops shm_ops;
+ struct ipc_params shm_params;
+
+ ns = current->nsproxy->ipc_ns;
+
+- shm_ops.getnew = newseg;
+- shm_ops.associate = shm_security;
+- shm_ops.more_checks = shm_more_checks;
+-
+ shm_params.key = key;
+ shm_params.flg = shmflg;
+ shm_params.u.size = size;
+@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
+ f_mode = FMODE_READ | FMODE_WRITE;
+ }
+ if (shmflg & SHM_EXEC) {
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ goto out;
++#endif
++
+ prot |= PROT_EXEC;
+ acc_mode |= S_IXUGO;
+ }
+@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
+ if (err)
+ goto out_unlock;
+
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++ shp->shm_perm.cuid, shmid) ||
++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++ err = -EACCES;
++ goto out_unlock;
++ }
++#endif
++
+ path = shp->shm_file->f_path;
+ path_get(&path);
+ shp->shm_nattch++;
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_lapid = current->pid;
++#endif
+ size = i_size_read(path.dentry->d_inode);
+ shm_unlock(shp);
+
+diff --git a/kernel/acct.c b/kernel/acct.c
+index fa7eb3d..7faf116 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
+ */
+ flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+- file->f_op->write(file, (char *)&ac,
++ file->f_op->write(file, (char __force_user *)&ac,
+ sizeof(acct_t), &file->f_pos);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
+ set_fs(fs);
+diff --git a/kernel/audit.c b/kernel/audit.c
+index e14bc74..bdf7f6c 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
+ 3) suppressed due to audit_rate_limit
+ 4) suppressed due to audit_backlog_limit
+ */
+-static atomic_t audit_lost = ATOMIC_INIT(0);
++static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
+
+ /* The netlink socket. */
+ static struct sock *audit_sock;
+@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
+ unsigned long now;
+ int print;
+
+- atomic_inc(&audit_lost);
++ atomic_inc_unchecked(&audit_lost);
+
+ print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
+
+@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
+ printk(KERN_WARNING
+ "audit: audit_lost=%d audit_rate_limit=%d "
+ "audit_backlog_limit=%d\n",
+- atomic_read(&audit_lost),
++ atomic_read_unchecked(&audit_lost),
+ audit_rate_limit,
+ audit_backlog_limit);
+ audit_panic(message);
+@@ -690,7 +690,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ status_set.pid = audit_pid;
+ status_set.rate_limit = audit_rate_limit;
+ status_set.backlog_limit = audit_backlog_limit;
+- status_set.lost = atomic_read(&audit_lost);
++ status_set.lost = atomic_read_unchecked(&audit_lost);
+ status_set.backlog = skb_queue_len(&audit_skb_queue);
+ audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
+ &status_set, sizeof(status_set));
+@@ -1261,12 +1261,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
+ avail = audit_expand(ab,
+ max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
+ if (!avail)
+- goto out;
++ goto out_va_end;
+ len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
+ }
+- va_end(args2);
+ if (len > 0)
+ skb_put(skb, len);
++out_va_end:
++ va_end(args2);
+ out:
+ return;
+ }
+@@ -1307,7 +1308,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
+ int i, avail, new_len;
+ unsigned char *ptr;
+ struct sk_buff *skb;
+- static const unsigned char *hex = "0123456789ABCDEF";
++ static const unsigned char hex[] = "0123456789ABCDEF";
+
+ if (!ab)
+ return;
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 47b7fc1..b8e1e47 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -67,6 +67,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/capability.h>
+ #include <linux/fs_struct.h>
++#include <linux/compat.h>
+
+ #include "audit.h"
+
+@@ -1166,8 +1167,8 @@ static void audit_log_execve_info(struct audit_context *context,
+ struct audit_buffer **ab,
+ struct audit_aux_data_execve *axi)
+ {
+- int i;
+- size_t len, len_sent = 0;
++ int i, len;
++ size_t len_sent = 0;
+ const char __user *p;
+ char *buf;
+
+@@ -2118,7 +2119,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
+ }
+
+ /* global counter which is incremented every time something logs in */
+-static atomic_t session_id = ATOMIC_INIT(0);
++static atomic_unchecked_t session_id = ATOMIC_INIT(0);
+
+ /**
+ * audit_set_loginuid - set a task's audit_context loginuid
+@@ -2129,9 +2130,9 @@ static atomic_t session_id = ATOMIC_INIT(0);
+ *
+ * Called (set) from fs/proc/base.c::proc_loginuid_write().
+ */
+-int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
++int __intentional_overflow(-1) audit_set_loginuid(struct task_struct *task, uid_t loginuid)
+ {
+- unsigned int sessionid = atomic_inc_return(&session_id);
++ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
+ struct audit_context *context = task->audit_context;
+
+ if (context && context->in_syscall) {
+@@ -2499,46 +2500,59 @@ void __audit_mmap_fd(int fd, int flags)
+ context->type = AUDIT_MMAP;
+ }
+
+-/**
+- * audit_core_dumps - record information about processes that end abnormally
+- * @signr: signal value
+- *
+- * If a process ends with a core dump, something fishy is going on and we
+- * should record the event for investigation.
+- */
+-void audit_core_dumps(long signr)
++static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+ {
+- struct audit_buffer *ab;
+- u32 sid;
+- uid_t auid = audit_get_loginuid(current), uid;
++ uid_t auid, uid;
+ gid_t gid;
+- unsigned int sessionid = audit_get_sessionid(current);
++ unsigned int sessionid;
+
+- if (!audit_enabled)
+- return;
+-
+- if (signr == SIGQUIT) /* don't care for those */
+- return;
+-
+- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
++ auid = audit_get_loginuid(current);
++ sessionid = audit_get_sessionid(current);
+ current_uid_gid(&uid, &gid);
++
+ audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
+ auid, uid, gid, sessionid);
+- security_task_getsecid(current, &sid);
+- if (sid) {
+- char *ctx = NULL;
+- u32 len;
+-
+- if (security_secid_to_secctx(sid, &ctx, &len))
+- audit_log_format(ab, " ssid=%u", sid);
+- else {
+- audit_log_format(ab, " subj=%s", ctx);
+- security_release_secctx(ctx, len);
+- }
+- }
++ audit_log_task_context(ab);
+ audit_log_format(ab, " pid=%d comm=", current->pid);
+ audit_log_untrustedstring(ab, current->comm);
++ audit_log_format(ab, " reason=");
++ audit_log_string(ab, reason);
+ audit_log_format(ab, " sig=%ld", signr);
++}
++/**
++ * audit_core_dumps - record information about processes that end abnormally
++ * @signr: signal value
++ *
++ * If a process ends with a core dump, something fishy is going on and we
++ * should record the event for investigation.
++ */
++void audit_core_dumps(long signr)
++{
++ struct audit_buffer *ab;
++
++ if (!audit_enabled)
++ return;
++
++ if (signr == SIGQUIT) /* don't care for those */
++ return;
++
++ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
++ audit_log_abend(ab, "memory violation", signr);
++ audit_log_end(ab);
++}
++
++void __audit_seccomp(unsigned long syscall, long signr, int code)
++{
++ struct audit_buffer *ab;
++
++ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
++ audit_log_abend(ab, "seccomp", signr);
++ audit_log_format(ab, " syscall=%ld", syscall);
++#ifdef CONFIG_COMPAT
++ audit_log_format(ab, " compat=%d", is_compat_task());
++#endif
++ audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
++ audit_log_format(ab, " code=0x%x", code);
+ audit_log_end(ab);
+ }
+
+diff --git a/kernel/capability.c b/kernel/capability.c
+index b463871..59495fd 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
+ * before modification is attempted and the application
+ * fails.
+ */
++ if (tocopy > ARRAY_SIZE(kdata))
++ return -EFAULT;
++
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
+ BUG();
+ }
+
+- if (security_capable(ns, current_cred(), cap) == 0) {
++ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return true;
+ }
+@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
+ }
+ EXPORT_SYMBOL(ns_capable);
+
++bool ns_capable_nolog(struct user_namespace *ns, int cap)
++{
++ if (unlikely(!cap_valid(cap))) {
++ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
++ BUG();
++ }
++
++ if (security_capable_noaudit(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
++ current->flags |= PF_SUPERPRIV;
++ return true;
++ }
++ return false;
++}
++EXPORT_SYMBOL(ns_capable_nolog);
++
++bool capable_nolog(int cap)
++{
++ return ns_capable_nolog(&init_user_ns, cap);
++}
++EXPORT_SYMBOL(capable_nolog);
++
+ /**
+ * task_ns_capable - Determine whether current task has a superior
+ * capability targeted at a specific task's user namespace.
+@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
+ }
+ EXPORT_SYMBOL(task_ns_capable);
+
++bool task_ns_capable_nolog(struct task_struct *t, int cap)
++{
++ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
++}
++EXPORT_SYMBOL(task_ns_capable_nolog);
++
+ /**
+ * nsown_capable - Check superior capability to one's own user_ns
+ * @cap: The capability in question
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 2a1ffb7..b99a595 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5164,7 +5164,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
+ struct css_set *cg = link->cg;
+ struct task_struct *task;
+ int count = 0;
+- seq_printf(seq, "css_set %p\n", cg);
++ seq_printf(seq, "css_set %pK\n", cg);
+ list_for_each_entry(task, &cg->tasks, cg_list) {
+ if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
+ seq_puts(seq, " ...\n");
+diff --git a/kernel/compat.c b/kernel/compat.c
+index a6d0649..f44fb27 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/linkage.h>
+ #include <linux/compat.h>
++#include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/time.h>
+ #include <linux/signal.h>
+@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
+ mm_segment_t oldfs;
+ long ret;
+
+- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
++ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = hrtimer_nanosleep_restart(restart);
+@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = hrtimer_nanosleep(&tu,
+- rmtp ? (struct timespec __user *)&rmt : NULL,
++ rmtp ? (struct timespec __force_user *)&rmt : NULL,
+ HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ set_fs(oldfs);
+
+@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_sigpending((old_sigset_t __user *) &s);
++ ret = sys_sigpending((old_sigset_t __force_user *) &s);
+ set_fs(old_fs);
+ if (ret == 0)
+ ret = put_user(s, set);
+@@ -399,7 +400,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_old_getrlimit(resource, &r);
++ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
+ set_fs(old_fs);
+
+ if (!ret) {
+@@ -471,7 +472,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_getrusage(who, (struct rusage __user *) &r);
++ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
+ set_fs(old_fs);
+
+ if (ret)
+@@ -498,8 +499,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
+ set_fs (KERNEL_DS);
+ ret = sys_wait4(pid,
+ (stat_addr ?
+- (unsigned int __user *) &status : NULL),
+- options, (struct rusage __user *) &r);
++ (unsigned int __force_user *) &status : NULL),
++ options, (struct rusage __force_user *) &r);
+ set_fs (old_fs);
+
+ if (ret > 0) {
+@@ -524,8 +525,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
+ memset(&info, 0, sizeof(info));
+
+ set_fs(KERNEL_DS);
+- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
+- uru ? (struct rusage __user *)&ru : NULL);
++ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
++ uru ? (struct rusage __force_user *)&ru : NULL);
+ set_fs(old_fs);
+
+ if ((ret < 0) || (info.si_signo == 0))
+@@ -655,8 +656,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_settime(timer_id, flags,
+- (struct itimerspec __user *) &newts,
+- (struct itimerspec __user *) &oldts);
++ (struct itimerspec __force_user *) &newts,
++ (struct itimerspec __force_user *) &oldts);
+ set_fs(oldfs);
+ if (!err && old && put_compat_itimerspec(old, &oldts))
+ return -EFAULT;
+@@ -673,7 +674,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_gettime(timer_id,
+- (struct itimerspec __user *) &ts);
++ (struct itimerspec __force_user *) &ts);
+ set_fs(oldfs);
+ if (!err && put_compat_itimerspec(setting, &ts))
+ return -EFAULT;
+@@ -692,7 +693,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_settime(which_clock,
+- (struct timespec __user *) &ts);
++ (struct timespec __force_user *) &ts);
+ set_fs(oldfs);
+ return err;
+ }
+@@ -707,7 +708,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_gettime(which_clock,
+- (struct timespec __user *) &ts);
++ (struct timespec __force_user *) &ts);
+ set_fs(oldfs);
+ if (!err && put_compat_timespec(&ts, tp))
+ return -EFAULT;
+@@ -727,7 +728,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
++ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
+ set_fs(oldfs);
+
+ err = compat_put_timex(utp, &txc);
+@@ -747,7 +748,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_getres(which_clock,
+- (struct timespec __user *) &ts);
++ (struct timespec __force_user *) &ts);
+ set_fs(oldfs);
+ if (!err && tp && put_compat_timespec(&ts, tp))
+ return -EFAULT;
+@@ -759,9 +760,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
+ long err;
+ mm_segment_t oldfs;
+ struct timespec tu;
+- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
++ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
+
+- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
++ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = clock_nanosleep_restart(restart);
+@@ -793,8 +794,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_clock_nanosleep(which_clock, flags,
+- (struct timespec __user *) &in,
+- (struct timespec __user *) &out);
++ (struct timespec __force_user *) &in,
++ (struct timespec __force_user *) &out);
+ set_fs(oldfs);
+
+ if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
+diff --git a/kernel/configs.c b/kernel/configs.c
+index 42e8fa0..9e7406b 100644
+--- a/kernel/configs.c
++++ b/kernel/configs.c
+@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
+ struct proc_dir_entry *entry;
+
+ /* create the current config file */
++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
++ &ikconfig_file_ops);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
++ &ikconfig_file_ops);
++#endif
++#else
+ entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+ &ikconfig_file_ops);
++#endif
++
+ if (!entry)
+ return -ENOMEM;
+
+diff --git a/kernel/cred.c b/kernel/cred.c
+index 48c6fd3..8398912 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
+ validate_creds(cred);
+ put_cred(cred);
+ }
++
++#ifdef CONFIG_GRKERNSEC_SETXID
++ cred = (struct cred *) tsk->delayed_cred;
++ if (cred) {
++ tsk->delayed_cred = NULL;
++ validate_creds(cred);
++ put_cred(cred);
++ }
++#endif
+ }
+
+ /**
+@@ -472,7 +481,7 @@ error_put:
+ * Always returns 0 thus allowing this function to be tail-called at the end
+ * of, say, sys_setgid().
+ */
+-int commit_creds(struct cred *new)
++static int __commit_creds(struct cred *new)
+ {
+ struct task_struct *task = current;
+ const struct cred *old = task->real_cred;
+@@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
+
+ get_cred(new); /* we will require a ref for the subj creds too */
+
++ gr_set_role_label(task, new->uid, new->gid);
++
+ /* dumpability changes */
+ if (old->euid != new->euid ||
+ old->egid != new->egid ||
+@@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
+ put_cred(old);
+ return 0;
+ }
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern int set_user(struct cred *new);
++
++void gr_delayed_cred_worker(void)
++{
++ const struct cred *new = current->delayed_cred;
++ struct cred *ncred;
++
++ current->delayed_cred = NULL;
++
++ if (current_uid() && new != NULL) {
++ // from doing get_cred on it when queueing this
++ put_cred(new);
++ return;
++ } else if (new == NULL)
++ return;
++
++ ncred = prepare_creds();
++ if (!ncred)
++ goto die;
++ // uids
++ ncred->uid = new->uid;
++ ncred->euid = new->euid;
++ ncred->suid = new->suid;
++ ncred->fsuid = new->fsuid;
++ // gids
++ ncred->gid = new->gid;
++ ncred->egid = new->egid;
++ ncred->sgid = new->sgid;
++ ncred->fsgid = new->fsgid;
++ // groups
++ if (set_groups(ncred, new->group_info) < 0) {
++ abort_creds(ncred);
++ goto die;
++ }
++ // caps
++ ncred->securebits = new->securebits;
++ ncred->cap_inheritable = new->cap_inheritable;
++ ncred->cap_permitted = new->cap_permitted;
++ ncred->cap_effective = new->cap_effective;
++ ncred->cap_bset = new->cap_bset;
++
++ if (set_user(ncred)) {
++ abort_creds(ncred);
++ goto die;
++ }
++
++ // from doing get_cred on it when queueing this
++ put_cred(new);
++
++ __commit_creds(ncred);
++ return;
++die:
++ // from doing get_cred on it when queueing this
++ put_cred(new);
++ do_group_exit(SIGKILL);
++}
++#endif
++
++int commit_creds(struct cred *new)
++{
++#ifdef CONFIG_GRKERNSEC_SETXID
++ int ret;
++ int schedule_it = 0;
++ struct task_struct *t;
++
++ /* we won't get called with tasklist_lock held for writing
++ and interrupts disabled as the cred struct in that case is
++ init_cred
++ */
++ if (grsec_enable_setxid && !current_is_single_threaded() &&
++ !current_uid() && new->uid) {
++ schedule_it = 1;
++ }
++ ret = __commit_creds(new);
++ if (schedule_it) {
++ rcu_read_lock();
++ read_lock(&tasklist_lock);
++ for (t = next_thread(current); t != current;
++ t = next_thread(t)) {
++ if (t->delayed_cred == NULL) {
++ t->delayed_cred = get_cred(new);
++ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
++ set_tsk_need_resched(t);
++ }
++ }
++ read_unlock(&tasklist_lock);
++ rcu_read_unlock();
++ }
++ return ret;
++#else
++ return __commit_creds(new);
++#endif
++}
++
+ EXPORT_SYMBOL(commit_creds);
+
+ /**
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 7fda904..59f620c 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
+ */
+ static atomic_t masters_in_kgdb;
+ static atomic_t slaves_in_kgdb;
+-static atomic_t kgdb_break_tasklet_var;
++static atomic_unchecked_t kgdb_break_tasklet_var;
+ atomic_t kgdb_setting_breakpoint;
+
+ struct task_struct *kgdb_usethread;
+@@ -129,7 +129,7 @@ int kgdb_single_step;
+ static pid_t kgdb_sstep_pid;
+
+ /* to keep track of the CPU which is doing the single stepping*/
+-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
++atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+ /*
+ * If you are debugging a problem where roundup (the collection of
+@@ -537,7 +537,7 @@ return_normal:
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
+ */
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+ atomic_set(&kgdb_active, -1);
+@@ -631,8 +631,8 @@ cpu_master_loop:
+ }
+
+ kgdb_restore:
+- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
++ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
++ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+@@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
+ static void kgdb_tasklet_bpt(unsigned long ing)
+ {
+ kgdb_breakpoint();
+- atomic_set(&kgdb_break_tasklet_var, 0);
++ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
+ }
+
+ static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+ void kgdb_schedule_breakpoint(void)
+ {
+- if (atomic_read(&kgdb_break_tasklet_var) ||
++ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
+ atomic_read(&kgdb_active) != -1 ||
+ atomic_read(&kgdb_setting_breakpoint))
+ return;
+- atomic_inc(&kgdb_break_tasklet_var);
++ atomic_inc_unchecked(&kgdb_break_tasklet_var);
+ tasklet_schedule(&kgdb_tasklet_breakpoint);
+ }
+ EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 63786e7..0780cac 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
+ list_for_each_entry(mod, kdb_modules, list) {
+
+ kdb_printf("%-20s%8u 0x%p ", mod->name,
+- mod->core_size, (void *)mod);
++ mod->core_size_rx + mod->core_size_rw, (void *)mod);
+ #ifdef CONFIG_MODULE_UNLOAD
+ kdb_printf("%4d ", module_refcount(mod));
+ #endif
+@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
+ kdb_printf(" (Loading)");
+ else
+ kdb_printf(" (Live)");
+- kdb_printf(" 0x%p", mod->module_core);
++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
+
+ #ifdef CONFIG_MODULE_UNLOAD
+ {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 83d5621..8c6738d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -145,8 +145,15 @@ static struct srcu_struct pmus_srcu;
+ * 0 - disallow raw tracepoint access for unpriv
+ * 1 - disallow cpu events for unpriv
+ * 2 - disallow kernel profiling for unpriv
++ * 3 - disallow all unpriv perf event use
+ */
+-int sysctl_perf_event_paranoid __read_mostly = 1;
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
++#elif defined(CONFIG_GRKERNSEC_HIDESYM)
++int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
++#else
++int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
++#endif
+
+ /* Minimum for 512 kiB + 1 user control page */
+ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
+@@ -173,7 +180,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
+ return 0;
+ }
+
+-static atomic64_t perf_event_id;
++static atomic64_unchecked_t perf_event_id;
+
+ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
+@@ -2575,7 +2582,7 @@ static void __perf_event_read(void *info)
+
+ static inline u64 perf_event_count(struct perf_event *event)
+ {
+- return local64_read(&event->count) + atomic64_read(&event->child_count);
++ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
+ }
+
+ static u64 perf_event_read(struct perf_event *event)
+@@ -3121,9 +3128,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+ mutex_lock(&event->child_mutex);
+ total += perf_event_read(event);
+ *enabled += event->total_time_enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+ *running += event->total_time_running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+
+ list_for_each_entry(child, &event->child_list, child_list) {
+ total += perf_event_read(child);
+@@ -3515,10 +3522,10 @@ void perf_event_update_userpage(struct perf_event *event)
+ userpg->offset -= local64_read(&event->hw.prev_count);
+
+ userpg->time_enabled = enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+
+ userpg->time_running = running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+
+ barrier();
+ ++userpg->lock;
+@@ -4026,11 +4033,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+ values[n++] = perf_event_count(event);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ values[n++] = enabled +
+- atomic64_read(&event->child_total_time_enabled);
++ atomic64_read_unchecked(&event->child_total_time_enabled);
+ }
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ values[n++] = running +
+- atomic64_read(&event->child_total_time_running);
++ atomic64_read_unchecked(&event->child_total_time_running);
+ }
+ if (read_format & PERF_FORMAT_ID)
+ values[n++] = primary_event_id(event);
+@@ -4681,12 +4688,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ * need to add enough zero bytes after the string to handle
+ * the 64bit alignment we do later.
+ */
+- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
++ buf = kzalloc(PATH_MAX, GFP_KERNEL);
+ if (!buf) {
+ name = strncpy(tmp, "//enomem", sizeof(tmp));
+ goto got_name;
+ }
+- name = d_path(&file->f_path, buf, PATH_MAX);
++ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
+ if (IS_ERR(name)) {
+ name = strncpy(tmp, "//toolong", sizeof(tmp));
+ goto got_name;
+@@ -6043,7 +6050,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ event->parent = parent_event;
+
+ event->ns = get_pid_ns(current->nsproxy->pid_ns);
+- event->id = atomic64_inc_return(&perf_event_id);
++ event->id = atomic64_inc_return_unchecked(&perf_event_id);
+
+ event->state = PERF_EVENT_STATE_INACTIVE;
+
+@@ -6289,6 +6296,11 @@ SYSCALL_DEFINE5(perf_event_open,
+ if (flags & ~PERF_FLAG_ALL)
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
++ return -EACCES;
++#endif
++
+ err = perf_copy_attr(attr_uptr, &attr);
+ if (err)
+ return err;
+@@ -6584,10 +6596,10 @@ static void sync_child_event(struct perf_event *child_event,
+ /*
+ * Add back the child's count to the parent's count:
+ */
+- atomic64_add(child_val, &parent_event->child_count);
+- atomic64_add(child_event->total_time_enabled,
++ atomic64_add_unchecked(child_val, &parent_event->child_count);
++ atomic64_add_unchecked(child_event->total_time_enabled,
+ &parent_event->child_total_time_enabled);
+- atomic64_add(child_event->total_time_running,
++ atomic64_add_unchecked(child_event->total_time_running,
+ &parent_event->child_total_time_running);
+
+ /*
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index a2101bb..f2e0354 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -78,7 +78,7 @@ static unsigned long perf_data_size(struct ring_buffer *rb)
+
+ static inline void
+ __output_copy(struct perf_output_handle *handle,
+- const void *buf, unsigned int len)
++ const void *buf, unsigned long len)
+ {
+ do {
+ unsigned long size = min_t(unsigned long, handle->size, len);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 234e152..4c61aa3 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -168,6 +168,10 @@ void release_task(struct task_struct * p)
+ struct task_struct *leader;
+ int zap_leader;
+ repeat:
++#ifdef CONFIG_NET
++ gr_del_task_from_ip_table(p);
++#endif
++
+ /* don't need to get the RCU readlock here - the process is dead and
+ * can't be modifying its own credentials. But shut RCU-lockdep up */
+ rcu_read_lock();
+@@ -380,7 +384,7 @@ int allow_signal(int sig)
+ * know it'll be handled, so that they don't get converted to
+ * SIGKILL or just silently dropped.
+ */
+- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return 0;
+@@ -402,56 +406,6 @@ int disallow_signal(int sig)
+
+ EXPORT_SYMBOL(disallow_signal);
+
+-/*
+- * Put all the gunge required to become a kernel thread without
+- * attached user resources in one place where it belongs.
+- */
+-
+-void daemonize(const char *name, ...)
+-{
+- va_list args;
+- sigset_t blocked;
+-
+- va_start(args, name);
+- vsnprintf(current->comm, sizeof(current->comm), name, args);
+- va_end(args);
+-
+- /*
+- * If we were started as result of loading a module, close all of the
+- * user space pages. We don't need them, and if we didn't close them
+- * they would be locked into memory.
+- */
+- exit_mm(current);
+- /*
+- * We don't want to have TIF_FREEZE set if the system-wide hibernation
+- * or suspend transition begins right now.
+- */
+- current->flags |= (PF_NOFREEZE | PF_KTHREAD);
+-
+- if (current->nsproxy != &init_nsproxy) {
+- get_nsproxy(&init_nsproxy);
+- switch_task_namespaces(current, &init_nsproxy);
+- }
+- set_special_pids(&init_struct_pid);
+- proc_clear_tty(current);
+-
+- /* Block and flush all signals */
+- sigfillset(&blocked);
+- sigprocmask(SIG_BLOCK, &blocked, NULL);
+- flush_signals(current);
+-
+- /* Become as one with the init task */
+-
+- daemonize_fs_struct();
+- exit_files(current);
+- current->files = init_task.files;
+- atomic_inc(&current->files->count);
+-
+- reparent_to_kthreadd();
+-}
+-
+-EXPORT_SYMBOL(daemonize);
+-
+ static void close_files(struct files_struct * files)
+ {
+ int i, j;
+@@ -874,6 +828,8 @@ NORET_TYPE void do_exit(long code)
+ struct task_struct *tsk = current;
+ int group_dead;
+
++ set_fs(USER_DS);
++
+ profile_task_exit(tsk);
+
+ WARN_ON(blk_needs_flush_plug(tsk));
+@@ -890,7 +846,6 @@ NORET_TYPE void do_exit(long code)
+ * mm_release()->clear_child_tid() from writing to a user-controlled
+ * kernel address.
+ */
+- set_fs(USER_DS);
+
+ ptrace_event(PTRACE_EVENT_EXIT, code);
+
+@@ -952,6 +907,9 @@ NORET_TYPE void do_exit(long code)
+ tsk->exit_code = code;
+ taskstats_exit(tsk, group_dead);
+
++ gr_acl_handle_psacct(tsk, code);
++ gr_acl_handle_exit();
++
+ exit_mm(tsk);
+
+ if (group_dead)
+@@ -1065,7 +1023,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
+ * Take down every thread in the group. This is called by fatal signals
+ * as well as by sys_exit_group (below).
+ */
+-NORET_TYPE void
++__noreturn void
+ do_group_exit(int exit_code)
+ {
+ struct signal_struct *sig = current->signal;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index ce0c182..c6ec99a 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -34,6 +34,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/security.h>
+ #include <linux/hugetlb.h>
++#include <linux/seccomp.h>
+ #include <linux/swap.h>
+ #include <linux/syscalls.h>
+ #include <linux/jiffies.h>
+@@ -168,6 +169,7 @@ void free_task(struct task_struct *tsk)
+ free_thread_info(tsk->stack);
+ rt_mutex_debug_task_free(tsk);
+ ftrace_graph_exit_task(tsk);
++ put_seccomp_filter(tsk);
+ free_task_struct(tsk);
+ }
+ EXPORT_SYMBOL(free_task);
+@@ -270,19 +272,24 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ }
+
+ err = arch_dup_task_struct(tsk, orig);
+- if (err)
+- goto out;
+
++ /*
++ * We defer looking at err, because we will need this setup
++ * for the clean up path to work correctly.
++ */
+ tsk->stack = ti;
+-
+ setup_thread_stack(tsk, orig);
++
++ if (err)
++ goto out;
++
+ clear_user_return_notifier(tsk);
+ clear_tsk_need_resched(tsk);
+ stackend = end_of_stack(tsk);
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+- tsk->stack_canary = get_random_int();
++ tsk->stack_canary = pax_get_random_long();
+ #endif
+
+ /*
+@@ -306,13 +313,78 @@ out:
+ }
+
+ #ifdef CONFIG_MMU
+-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
+ {
+- struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
+- struct rb_node **rb_link, *rb_parent;
+- int retval;
++ struct vm_area_struct *tmp;
+ unsigned long charge;
+ struct mempolicy *pol;
++ struct file *file;
++
++ charge = 0;
++ if (mpnt->vm_flags & VM_ACCOUNT) {
++ unsigned long len;
++ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++ if (security_vm_enough_memory(len))
++ goto fail_nomem;
++ charge = len;
++ }
++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!tmp)
++ goto fail_nomem;
++ *tmp = *mpnt;
++ tmp->vm_mm = mm;
++ INIT_LIST_HEAD(&tmp->anon_vma_chain);
++ pol = mpol_dup(vma_policy(mpnt));
++ if (IS_ERR(pol))
++ goto fail_nomem_policy;
++ vma_set_policy(tmp, pol);
++ if (anon_vma_fork(tmp, mpnt))
++ goto fail_nomem_anon_vma_fork;
++ tmp->vm_flags &= ~VM_LOCKED;
++ tmp->vm_next = tmp->vm_prev = NULL;
++ tmp->vm_mirror = NULL;
++ file = tmp->vm_file;
++ if (file) {
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct address_space *mapping = file->f_mapping;
++
++ get_file(file);
++ if (tmp->vm_flags & VM_DENYWRITE)
++ atomic_dec(&inode->i_writecount);
++ mutex_lock(&mapping->i_mmap_mutex);
++ if (tmp->vm_flags & VM_SHARED)
++ mapping->i_mmap_writable++;
++ flush_dcache_mmap_lock(mapping);
++ /* insert tmp into the share list, just after mpnt */
++ vma_prio_tree_add(tmp, mpnt);
++ flush_dcache_mmap_unlock(mapping);
++ mutex_unlock(&mapping->i_mmap_mutex);
++ }
++
++ /*
++ * Clear hugetlb-related page reserves for children. This only
++ * affects MAP_PRIVATE mappings. Faults generated by the child
++ * are not guaranteed to succeed, even if read-only
++ */
++ if (is_vm_hugetlb_page(tmp))
++ reset_vma_resv_huge_pages(tmp);
++
++ return tmp;
++
++fail_nomem_anon_vma_fork:
++ mpol_put(pol);
++fail_nomem_policy:
++ kmem_cache_free(vm_area_cachep, tmp);
++fail_nomem:
++ vm_unacct_memory(charge);
++ return NULL;
++}
++
++static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
++{
++ struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
++ struct rb_node **rb_link, *rb_parent;
++ int retval;
+
+ down_write(&oldmm->mmap_sem);
+ flush_cache_dup_mm(oldmm);
+@@ -324,8 +396,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ mm->locked_vm = 0;
+ mm->mmap = NULL;
+ mm->mmap_cache = NULL;
+- mm->free_area_cache = oldmm->mmap_base;
+- mm->cached_hole_size = ~0UL;
++ mm->free_area_cache = oldmm->free_area_cache;
++ mm->cached_hole_size = oldmm->cached_hole_size;
+ mm->map_count = 0;
+ cpumask_clear(mm_cpumask(mm));
+ mm->mm_rb = RB_ROOT;
+@@ -341,63 +413,16 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+
+ prev = NULL;
+ for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+- struct file *file;
+-
+ if (mpnt->vm_flags & VM_DONTCOPY) {
+- long pages = vma_pages(mpnt);
+- mm->total_vm -= pages;
+ vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
+- -pages);
++ -vma_pages(mpnt));
+ continue;
+ }
+- charge = 0;
+- if (mpnt->vm_flags & VM_ACCOUNT) {
+- unsigned long len;
+- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+- if (security_vm_enough_memory(len))
+- goto fail_nomem;
+- charge = len;
++ tmp = dup_vma(mm, mpnt);
++ if (!tmp) {
++ retval = -ENOMEM;
++ goto out;
+ }
+- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+- if (!tmp)
+- goto fail_nomem;
+- *tmp = *mpnt;
+- INIT_LIST_HEAD(&tmp->anon_vma_chain);
+- pol = mpol_dup(vma_policy(mpnt));
+- retval = PTR_ERR(pol);
+- if (IS_ERR(pol))
+- goto fail_nomem_policy;
+- vma_set_policy(tmp, pol);
+- tmp->vm_mm = mm;
+- if (anon_vma_fork(tmp, mpnt))
+- goto fail_nomem_anon_vma_fork;
+- tmp->vm_flags &= ~VM_LOCKED;
+- tmp->vm_next = tmp->vm_prev = NULL;
+- file = tmp->vm_file;
+- if (file) {
+- struct inode *inode = file->f_path.dentry->d_inode;
+- struct address_space *mapping = file->f_mapping;
+-
+- get_file(file);
+- if (tmp->vm_flags & VM_DENYWRITE)
+- atomic_dec(&inode->i_writecount);
+- mutex_lock(&mapping->i_mmap_mutex);
+- if (tmp->vm_flags & VM_SHARED)
+- mapping->i_mmap_writable++;
+- flush_dcache_mmap_lock(mapping);
+- /* insert tmp into the share list, just after mpnt */
+- vma_prio_tree_add(tmp, mpnt);
+- flush_dcache_mmap_unlock(mapping);
+- mutex_unlock(&mapping->i_mmap_mutex);
+- }
+-
+- /*
+- * Clear hugetlb-related page reserves for children. This only
+- * affects MAP_PRIVATE mappings. Faults generated by the child
+- * are not guaranteed to succeed, even if read-only
+- */
+- if (is_vm_hugetlb_page(tmp))
+- reset_vma_resv_huge_pages(tmp);
+
+ /*
+ * Link in the new vma and copy the page table entries.
+@@ -420,6 +445,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ if (retval)
+ goto out;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
++ struct vm_area_struct *mpnt_m;
++
++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
++
++ if (!mpnt->vm_mirror)
++ continue;
++
++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
++ mpnt->vm_mirror = mpnt_m;
++ } else {
++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
++ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
++ mpnt->vm_mirror->vm_mirror = mpnt;
++ }
++ }
++ BUG_ON(mpnt_m);
++ }
++#endif
++
+ /* a new mm has just been created */
+ arch_dup_mmap(oldmm, mm);
+ retval = 0;
+@@ -428,14 +478,6 @@ out:
+ flush_tlb_mm(oldmm);
+ up_write(&oldmm->mmap_sem);
+ return retval;
+-fail_nomem_anon_vma_fork:
+- mpol_put(pol);
+-fail_nomem_policy:
+- kmem_cache_free(vm_area_cachep, tmp);
+-fail_nomem:
+- retval = -ENOMEM;
+- vm_unacct_memory(charge);
+- goto out;
+ }
+
+ static inline int mm_alloc_pgd(struct mm_struct *mm)
+@@ -647,6 +689,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
+ }
+ EXPORT_SYMBOL_GPL(get_task_mm);
+
++struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
++{
++ struct mm_struct *mm;
++ int err;
++
++ err = mutex_lock_killable(&task->signal->cred_guard_mutex);
++ if (err)
++ return ERR_PTR(err);
++
++ mm = get_task_mm(task);
++ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
++ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
++ mmput(mm);
++ mm = ERR_PTR(-EACCES);
++ }
++ mutex_unlock(&task->signal->cred_guard_mutex);
++
++ return mm;
++}
++
+ /* Please note the differences between mmput and mm_release.
+ * mmput is called whenever we stop holding onto a mm_struct,
+ * error success whatever.
+@@ -832,13 +894,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+ spin_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+- fs->users++;
++ atomic_inc(&fs->users);
+ spin_unlock(&fs->lock);
+ return 0;
+ }
+ tsk->fs = copy_fs_struct(fs);
+ if (!tsk->fs)
+ return -ENOMEM;
++ /* Carry through gr_chroot_dentry and is_chrooted instead
++ of recomputing it here. Already copied when the task struct
++ is duplicated. This allows pivot_root to not be treated as
++ a chroot
++ */
++ //gr_set_chroot_entries(tsk, &tsk->fs->root);
++
+ return 0;
+ }
+
+@@ -1047,7 +1116,7 @@ static void posix_cpu_timers_init(struct task_struct *tsk)
+ * parts of the process environment (as per the clone
+ * flags). The actual kick-off is left to the caller.
+ */
+-static struct task_struct *copy_process(unsigned long clone_flags,
++static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size,
+@@ -1096,6 +1165,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ goto fork_out;
+
+ ftrace_graph_init_task(p);
++ get_seccomp_filter(p);
+
+ rt_mutex_init_task(p);
+
+@@ -1104,10 +1174,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
+ retval = -EAGAIN;
++
++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
++
+ if (atomic_read(&p->real_cred->user->processes) >=
+ task_rlimit(p, RLIMIT_NPROC)) {
+- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+- p->real_cred->user != INIT_USER)
++ if (p->real_cred->user != INIT_USER &&
++ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
+ goto bad_fork_free;
+ }
+ current->flags &= ~PF_NPROC_EXCEEDED;
+@@ -1341,6 +1414,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ goto bad_fork_free_pid;
+ }
+
++ /* synchronizes with gr_set_acls()
++ we need to call this past the point of no return for fork()
++ */
++ gr_copy_label(p);
++
+ if (clone_flags & CLONE_THREAD) {
+ current->signal->nr_threads++;
+ atomic_inc(&current->signal->live);
+@@ -1421,6 +1499,8 @@ bad_fork_cleanup_count:
+ bad_fork_free:
+ free_task(p);
+ fork_out:
++ gr_log_forkfail(retval);
++
+ return ERR_PTR(retval);
+ }
+
+@@ -1507,6 +1587,7 @@ long do_fork(unsigned long clone_flags,
+
+ p = copy_process(clone_flags, stack_start, regs, stack_size,
+ child_tidptr, NULL, trace);
++ add_latent_entropy();
+ /*
+ * Do this prior waking up the new thread - the thread pointer
+ * might get invalid after that point, if the thread exits quickly.
+@@ -1521,6 +1602,8 @@ long do_fork(unsigned long clone_flags,
+ if (clone_flags & CLONE_PARENT_SETTID)
+ put_user(nr, parent_tidptr);
+
++ gr_handle_brute_check();
++
+ if (clone_flags & CLONE_VFORK) {
+ p->vfork_done = &vfork;
+ init_completion(&vfork);
+@@ -1591,7 +1674,7 @@ void __init proc_caches_init(void)
+ mm_cachep = kmem_cache_create("mm_struct",
+ sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
+- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
++ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
+ mmap_init();
+ nsproxy_cache_init();
+ }
+@@ -1630,7 +1713,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+- if (fs->users == 1)
++ if (atomic_read(&fs->users) == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+@@ -1719,7 +1802,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+ fs = current->fs;
+ spin_lock(&fs->lock);
+ current->fs = new_fs;
+- if (--fs->users)
++ gr_set_chroot_entries(current, &current->fs->root);
++ if (atomic_dec_return(&fs->users))
+ new_fs = NULL;
+ else
+ new_fs = fs;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 8888815..9a6f6fb 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -54,6 +54,7 @@
+ #include <linux/mount.h>
+ #include <linux/pagemap.h>
+ #include <linux/syscalls.h>
++#include <linux/ptrace.h>
+ #include <linux/signal.h>
+ #include <linux/export.h>
+ #include <linux/magic.h>
+@@ -240,6 +241,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ struct page *page, *page_head;
+ int err, ro = 0;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
++ return -EFAULT;
++#endif
++
+ /*
+ * The futex address must be "naturally" aligned.
+ */
+@@ -438,7 +444,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+
+ static int get_futex_value_locked(u32 *dest, u32 __user *from)
+ {
+- int ret;
++ unsigned long ret;
+
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
+@@ -2744,6 +2750,7 @@ static int __init futex_init(void)
+ {
+ u32 curval;
+ int i;
++ mm_segment_t oldfs;
+
+ /*
+ * This will fail and we want it. Some arch implementations do
+@@ -2755,8 +2762,11 @@ static int __init futex_init(void)
+ * implementation, the non-functional ones will return
+ * -ENOSYS.
+ */
++ oldfs = get_fs();
++ set_fs(USER_DS);
+ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+ futex_cmpxchg_enabled = 1;
++ set_fs(oldfs);
+
+ for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ plist_head_init(&futex_queues[i].chain);
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index a9642d5..51eb98c 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -31,7 +31,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+ return 0;
+ }
+
+-static void __user *futex_uaddr(struct robust_list __user *entry,
++static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
+ compat_long_t futex_offset)
+ {
+ compat_uptr_t base = ptr_to_compat(entry);
+diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
+index 9b22d03..6295b62 100644
+--- a/kernel/gcov/base.c
++++ b/kernel/gcov/base.c
+@@ -102,11 +102,6 @@ void gcov_enable_events(void)
+ }
+
+ #ifdef CONFIG_MODULES
+-static inline int within(void *addr, void *start, unsigned long size)
+-{
+- return ((addr >= start) && (addr < start + size));
+-}
+-
+ /* Update list and generate events when modules are unloaded. */
+ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
+ prev = NULL;
+ /* Remove entries located in module from linked list. */
+ for (info = gcov_info_head; info; info = info->next) {
+- if (within(info, mod->module_core, mod->core_size)) {
++ if (within_module_core_rw((unsigned long)info, mod)) {
+ if (prev)
+ prev->next = info->next;
+ else
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 60f7e32..d703ad4 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -1414,7 +1414,7 @@ void hrtimer_peek_ahead_timers(void)
+ local_irq_restore(flags);
+ }
+
+-static void run_hrtimer_softirq(struct softirq_action *h)
++static __latent_entropy void run_hrtimer_softirq(void)
+ {
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+@@ -1756,7 +1756,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata hrtimers_nb = {
++static struct notifier_block hrtimers_nb = {
+ .notifier_call = hrtimer_cpu_notify,
+ };
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 52bdd58..56002b5 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -900,22 +900,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ return -ENOSYS;
+ if (!try_module_get(desc->owner))
+ return -ENODEV;
+- /*
+- * Some drivers like serial.c use request_irq() heavily,
+- * so we have to be careful not to interfere with a
+- * running system.
+- */
+- if (new->flags & IRQF_SAMPLE_RANDOM) {
+- /*
+- * This function might sleep, we want to call it first,
+- * outside of the atomic block.
+- * Yes, this might clear the entropy pool if the wrong
+- * driver is attempted to be loaded, without actually
+- * installing a new handler, but is this really a problem,
+- * only the sysadmin is able to do this.
+- */
+- rand_initialize_irq(irq);
+- }
+
+ /*
+ * Check whether the interrupt nests into another interrupt
+@@ -1361,7 +1345,6 @@ EXPORT_SYMBOL(free_irq);
+ * Flags:
+ *
+ * IRQF_SHARED Interrupt is shared
+- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
+ * IRQF_TRIGGER_* Specify active edge(s) or level
+ *
+ */
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 66ff710..794bc5a 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -13,6 +13,7 @@
+ #include <linux/sort.h>
+ #include <linux/err.h>
+ #include <linux/jump_label.h>
++#include <linux/mm.h>
+
+ #ifdef HAVE_JUMP_LABEL
+
+@@ -55,7 +56,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
+
+ size = (((unsigned long)stop - (unsigned long)start)
+ / sizeof(struct jump_entry));
++ pax_open_kernel();
+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
++ pax_close_kernel();
+ }
+
+ static void jump_label_update(struct jump_label_key *key, int enable);
+@@ -303,10 +306,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
+ struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
+ struct jump_entry *iter;
+
++ pax_open_kernel();
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ if (within_module_init(iter->code, mod))
+ iter->code = 0;
+ }
++ pax_close_kernel();
+ }
+
+ static int
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 079f1d3..d712c9c 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -11,6 +11,9 @@
+ * Changed the compression method from stem compression to "table lookup"
+ * compression (see scripts/kallsyms.c for a more complete description)
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
+
+ static inline int is_kernel_inittext(unsigned long addr)
+ {
++ if (system_state != SYSTEM_BOOTING)
++ return 0;
++
+ if (addr >= (unsigned long)_sinittext
+ && addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+ }
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#ifdef CONFIG_MODULES
++static inline int is_module_text(unsigned long addr)
++{
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
++ return 1;
++
++ addr = ktla_ktva(addr);
++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
++}
++#else
++static inline int is_module_text(unsigned long addr)
++{
++ return 0;
++}
++#endif
++#endif
++
+ static inline int is_kernel_text(unsigned long addr)
+ {
+ if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
+@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
+
+ static inline int is_kernel(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (is_kernel_text(addr) || is_kernel_inittext(addr))
++ return 1;
++
++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
++#else
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
++#endif
++
+ return 1;
+ return in_gate_area_no_mm(addr);
+ }
+
+ static int is_ksym_addr(unsigned long addr)
+ {
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (is_module_text(addr))
++ return 0;
++#endif
++
+ if (all_var)
+ return is_kernel(addr);
+
+@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
+
+ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
+ {
+- iter->name[0] = '\0';
+ iter->nameoff = get_symbol_offset(new_pos);
+ iter->pos = new_pos;
+ }
+@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
+ {
+ struct kallsym_iter *iter = m->private;
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ if (current_uid())
++ return 0;
++#endif
++
+ /* Some debugging symbols have no name. Ignore them. */
+ if (!iter->name[0])
+ return 0;
+@@ -515,6 +558,7 @@ static int s_show(struct seq_file *m, void *p)
+ */
+ type = iter->exported ? toupper(iter->type) :
+ tolower(iter->type);
++
+ seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
+ type, iter->name, iter->module_name);
+ } else
+@@ -540,7 +584,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
+ struct kallsym_iter *iter;
+ int ret;
+
+- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
++ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+ reset_iter(iter, 0);
+diff --git a/kernel/kexec.c b/kernel/kexec.c
+index dc7bc08..4601964 100644
+--- a/kernel/kexec.c
++++ b/kernel/kexec.c
+@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
+ unsigned long flags)
+ {
+ struct compat_kexec_segment in;
+- struct kexec_segment out, __user *ksegments;
++ struct kexec_segment out;
++ struct kexec_segment __user *ksegments;
+ unsigned long i, result;
+
+ /* Don't allow clients that don't understand the native
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index a16dac1..3227c2c 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -64,7 +64,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
+ kfree(info->argv);
+ }
+
+-static int call_modprobe(char *module_name, int wait)
++static int call_modprobe(char *module_name, char *module_param, int wait)
+ {
+ static char *envp[] = {
+ "HOME=/",
+@@ -73,7 +73,7 @@ static int call_modprobe(char *module_name, int wait)
+ NULL
+ };
+
+- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
++ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
+ if (!argv)
+ goto out;
+
+@@ -85,7 +85,8 @@ static int call_modprobe(char *module_name, int wait)
+ argv[1] = "-q";
+ argv[2] = "--";
+ argv[3] = module_name; /* check free_modprobe_argv() */
+- argv[4] = NULL;
++ argv[4] = module_param;
++ argv[5] = NULL;
+
+ return call_usermodehelper_fns(modprobe_path, argv, envp,
+ wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
+@@ -110,9 +111,8 @@ out:
+ * If module auto-loading support is disabled then this function
+ * becomes a no-operation.
+ */
+-int __request_module(bool wait, const char *fmt, ...)
++static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
+ {
+- va_list args;
+ char module_name[MODULE_NAME_LEN];
+ unsigned int max_modprobes;
+ int ret;
+@@ -120,9 +120,7 @@ int __request_module(bool wait, const char *fmt, ...)
+ #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
+ static int kmod_loop_msg;
+
+- va_start(args, fmt);
+- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
+- va_end(args);
++ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
+ if (ret >= MODULE_NAME_LEN)
+ return -ENAMETOOLONG;
+
+@@ -130,6 +128,20 @@ int __request_module(bool wait, const char *fmt, ...)
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (!current_uid()) {
++ /* hack to workaround consolekit/udisks stupidity */
++ read_lock(&tasklist_lock);
++ if (!strcmp(current->comm, "mount") &&
++ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
++ read_unlock(&tasklist_lock);
++ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
++ return -EPERM;
++ }
++ read_unlock(&tasklist_lock);
++ }
++#endif
++
+ /* If modprobe needs a service that is in a module, we get a recursive
+ * loop. Limit the number of running kmod threads to max_threads/2 or
+ * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
+@@ -158,11 +170,52 @@ int __request_module(bool wait, const char *fmt, ...)
+
+ trace_module_request(module_name, wait, _RET_IP_);
+
+- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
++ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+
+ atomic_dec(&kmod_concurrent);
+ return ret;
+ }
++
++int ___request_module(bool wait, char *module_param, const char *fmt, ...)
++{
++ va_list args;
++ int ret;
++
++ va_start(args, fmt);
++ ret = ____request_module(wait, module_param, fmt, args);
++ va_end(args);
++
++ return ret;
++}
++
++int __request_module(bool wait, const char *fmt, ...)
++{
++ va_list args;
++ int ret;
++
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (current_uid()) {
++ char module_param[MODULE_NAME_LEN];
++
++ memset(module_param, 0, sizeof(module_param));
++
++ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
++
++ va_start(args, fmt);
++ ret = ____request_module(wait, module_param, fmt, args);
++ va_end(args);
++
++ return ret;
++ }
++#endif
++
++ va_start(args, fmt);
++ ret = ____request_module(wait, NULL, fmt, args);
++ va_end(args);
++
++ return ret;
++}
++
+ EXPORT_SYMBOL(__request_module);
+ #endif /* CONFIG_MODULES */
+
+@@ -188,6 +241,19 @@ static int ____call_usermodehelper(void *data)
+ */
+ set_user_nice(current, 0);
+
++#ifdef CONFIG_GRKERNSEC
++ /* this is race-free as far as userland is concerned as we copied
++ out the path to be used prior to this point and are now operating
++ on that copy
++ */
++ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
++ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7)) || strstr(sub_info->path, "..")) {
++ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
++ retval = -EPERM;
++ goto fail;
++ }
++#endif
++
+ retval = -ENOMEM;
+ new = prepare_kernel_cred(current);
+ if (!new)
+@@ -221,6 +287,10 @@ fail:
+
+ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+ {
++#ifdef CONFIG_GRKERNSEC
++ kfree(info->path);
++ info->path = info->origpath;
++#endif
+ if (info->cleanup)
+ (*info->cleanup)(info);
+ kfree(info);
+@@ -265,7 +335,7 @@ static int wait_for_helper(void *data)
+ *
+ * Thus the __user pointer cast is valid here.
+ */
+- sys_wait4(pid, (int __user *)&ret, 0, NULL);
++ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
+
+ /*
+ * If ret is 0, either ____call_usermodehelper failed and the
+@@ -413,7 +483,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
+ goto out;
+
+ INIT_WORK(&sub_info->work, __call_usermodehelper);
++#ifdef CONFIG_GRKERNSEC
++ sub_info->origpath = path;
++ sub_info->path = kstrdup(path, gfp_mask);
++#else
+ sub_info->path = path;
++#endif
+ sub_info->argv = argv;
+ sub_info->envp = envp;
+ out:
+@@ -512,7 +587,7 @@ EXPORT_SYMBOL(call_usermodehelper_exec);
+ static int proc_cap_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table t;
++ ctl_table_no_const t;
+ unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
+ kernel_cap_t new_cap;
+ int err, i;
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index bc90b87..32da385 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -31,6 +31,9 @@
+ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ * <prasanna@in.ibm.com> added function-return probes.
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kprobes.h>
+ #include <linux/hash.h>
+ #include <linux/init.h>
+@@ -185,7 +188,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
+ * kernel image and loaded module images reside. This is required
+ * so x86_64 can correctly handle the %rip-relative fixups.
+ */
+- kip->insns = module_alloc(PAGE_SIZE);
++ kip->insns = module_alloc_exec(PAGE_SIZE);
+ if (!kip->insns) {
+ kfree(kip);
+ return NULL;
+@@ -225,7 +228,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
+ */
+ if (!list_is_singular(&kip->list)) {
+ list_del(&kip->list);
+- module_free(NULL, kip->insns);
++ module_free_exec(NULL, kip->insns);
+ kfree(kip);
+ }
+ return 1;
+@@ -1955,7 +1958,7 @@ static int __init init_kprobes(void)
+ {
+ int i, err = 0;
+ unsigned long offset = 0, size = 0;
+- char *modname, namebuf[128];
++ char *modname, namebuf[KSYM_NAME_LEN];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
+@@ -2040,11 +2043,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
+ kprobe_type = "k";
+
+ if (sym)
+- seq_printf(pi, "%p %s %s+0x%x %s ",
++ seq_printf(pi, "%pK %s %s+0x%x %s ",
+ p->addr, kprobe_type, sym, offset,
+ (modname ? modname : " "));
+ else
+- seq_printf(pi, "%p %s %p ",
++ seq_printf(pi, "%pK %s %pK ",
+ p->addr, kprobe_type, p->addr);
+
+ if (!pp)
+@@ -2081,7 +2084,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
+ const char *sym = NULL;
+ unsigned int i = *(loff_t *) v;
+ unsigned long offset = 0;
+- char *modname, namebuf[128];
++ char *modname, namebuf[KSYM_NAME_LEN];
+
+ head = &kprobe_table[i];
+ preempt_disable();
+@@ -2204,7 +2207,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+ {
+ char buf[32];
+- int buf_size;
++ size_t buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index 4e316e1..a2879b6 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
+ {
+ if (count+1 > UEVENT_HELPER_PATH_LEN)
+ return -ENOENT;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
+ memcpy(uevent_helper, buf, count);
+ uevent_helper[count] = '\0';
+ if (count && uevent_helper[count-1] == '\n')
+@@ -156,7 +158,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
+ return count;
+ }
+
+-static struct bin_attribute notes_attr = {
++static bin_attribute_no_const notes_attr __read_only = {
+ .attr = {
+ .name = "notes",
+ .mode = S_IRUGO,
+diff --git a/kernel/lockdep.c b/kernel/lockdep.c
+index b2e08c9..01d8049 100644
+--- a/kernel/lockdep.c
++++ b/kernel/lockdep.c
+@@ -592,6 +592,10 @@ static int static_obj(void *obj)
+ end = (unsigned long) &_end,
+ addr = (unsigned long) obj;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ start = ktla_ktva(start);
++#endif
++
+ /*
+ * static variable?
+ */
+@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+ if (!static_obj(lock->key)) {
+ debug_locks_off();
+ printk("INFO: trying to register non-static key.\n");
++ printk("lock:%pS key:%pS.\n", lock, lock->key);
+ printk("the code is fine but needs lockdep annotation.\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ if (!class)
+ return 0;
+ }
+- atomic_inc((atomic_t *)&class->ops);
++ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
+ if (very_verbose(class)) {
+ printk("\nacquire class [%p] %s", class->key, class->name);
+ if (class->name_version > 1)
+diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
+index 91c32a0..7b88d63 100644
+--- a/kernel/lockdep_proc.c
++++ b/kernel/lockdep_proc.c
+@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
+
+ static void print_name(struct seq_file *m, struct lock_class *class)
+ {
+- char str[128];
++ char str[KSYM_NAME_LEN];
+ const char *name = class->name;
+
+ if (!name) {
+@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
+ return 0;
+ }
+
+- seq_printf(m, "%p", class->key);
++ seq_printf(m, "%pK", class->key);
+ #ifdef CONFIG_DEBUG_LOCKDEP
+ seq_printf(m, " OPS:%8ld", class->ops);
+ #endif
+@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
+
+ list_for_each_entry(entry, &class->locks_after, entry) {
+ if (entry->distance == 1) {
+- seq_printf(m, " -> [%p] ", entry->class->key);
++ seq_printf(m, " -> [%pK] ", entry->class->key);
+ print_name(m, entry->class);
+ seq_puts(m, "\n");
+ }
+@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
+ if (!class->key)
+ continue;
+
+- seq_printf(m, "[%p] ", class->key);
++ seq_printf(m, "[%pK] ", class->key);
+ print_name(m, class);
+ seq_puts(m, "\n");
+ }
+@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+ if (!i)
+ seq_line(m, '-', 40-namelen, namelen);
+
+- snprintf(ip, sizeof(ip), "[<%p>]",
++ snprintf(ip, sizeof(ip), "[<%pK>]",
+ (void *)class->contention_point[i]);
+ seq_printf(m, "%40s %14lu %29s %pS\n",
+ name, stats->contention_point[i],
+@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+ if (!i)
+ seq_line(m, '-', 40-namelen, namelen);
+
+- snprintf(ip, sizeof(ip), "[<%p>]",
++ snprintf(ip, sizeof(ip), "[<%pK>]",
+ (void *)class->contending_point[i]);
+ seq_printf(m, "%40s %14lu %29s %pS\n",
+ name, stats->contending_point[i],
+diff --git a/kernel/module.c b/kernel/module.c
+index 65362d9..96ac6ba 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -58,6 +58,7 @@
+ #include <linux/jump_label.h>
+ #include <linux/pfn.h>
+ #include <linux/bsearch.h>
++#include <linux/grsecurity.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/module.h>
+@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
+
+ /* Bounds of module allocation, for speeding __module_address.
+ * Protected by module_mutex. */
+-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
+
+ int register_module_notifier(struct notifier_block * nb)
+ {
+@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+ return true;
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+- struct symsearch arr[] = {
++ struct symsearch modarr[] = {
+ { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+ NOT_GPL_ONLY, false },
+ { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+ #endif
+ };
+
+- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
+ return true;
+ }
+ return false;
+@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
+ static int percpu_modalloc(struct module *mod,
+ unsigned long size, unsigned long align)
+ {
+- if (align > PAGE_SIZE) {
++ if (align-1 >= PAGE_SIZE) {
+ printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+ mod->name, align, PAGE_SIZE);
+ align = PAGE_SIZE;
+@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
+ */
+ #ifdef CONFIG_SYSFS
+
+-#ifdef CONFIG_KALLSYMS
++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
+ static inline bool sect_empty(const Elf_Shdr *sect)
+ {
+ return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+@@ -1323,7 +1325,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
+ {
+ unsigned int notes, loaded, i;
+ struct module_notes_attrs *notes_attrs;
+- struct bin_attribute *nattr;
++ bin_attribute_no_const *nattr;
+
+ /* failed to create section attributes, so can't create notes */
+ if (!mod->sect_attrs)
+@@ -1435,7 +1437,7 @@ static void del_usage_links(struct module *mod)
+ static int module_add_modinfo_attrs(struct module *mod)
+ {
+ struct module_attribute *attr;
+- struct module_attribute *temp_attr;
++ module_attribute_no_const *temp_attr;
+ int error = 0;
+ int i;
+
+@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
+
+ static void unset_module_core_ro_nx(struct module *mod)
+ {
+- set_page_attributes(mod->module_core + mod->core_text_size,
+- mod->module_core + mod->core_size,
++ set_page_attributes(mod->module_core_rw,
++ mod->module_core_rw + mod->core_size_rw,
+ set_memory_x);
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_ro_size,
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_rw);
+ }
+
+ static void unset_module_init_ro_nx(struct module *mod)
+ {
+- set_page_attributes(mod->module_init + mod->init_text_size,
+- mod->module_init + mod->init_size,
++ set_page_attributes(mod->module_init_rw,
++ mod->module_init_rw + mod->init_size_rw,
+ set_memory_x);
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_ro_size,
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_rw);
+ }
+
+@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry_rcu(mod, &modules, list) {
+- if ((mod->module_core) && (mod->core_text_size)) {
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_text_size,
++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_rw);
+ }
+- if ((mod->module_init) && (mod->init_text_size)) {
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_text_size,
++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_rw);
+ }
+ }
+@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry_rcu(mod, &modules, list) {
+- if ((mod->module_core) && (mod->core_text_size)) {
+- set_page_attributes(mod->module_core,
+- mod->module_core + mod->core_text_size,
++ if ((mod->module_core_rx) && (mod->core_size_rx)) {
++ set_page_attributes(mod->module_core_rx,
++ mod->module_core_rx + mod->core_size_rx,
+ set_memory_ro);
+ }
+- if ((mod->module_init) && (mod->init_text_size)) {
+- set_page_attributes(mod->module_init,
+- mod->module_init + mod->init_text_size,
++ if ((mod->module_init_rx) && (mod->init_size_rx)) {
++ set_page_attributes(mod->module_init_rx,
++ mod->module_init_rx + mod->init_size_rx,
+ set_memory_ro);
+ }
+ }
+@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
+
+ /* This may be NULL, but that's OK */
+ unset_module_init_ro_nx(mod);
+- module_free(mod, mod->module_init);
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
+ kfree(mod->args);
+ percpu_modfree(mod);
+
+ /* Free lock-classes: */
+- lockdep_free_key_range(mod->module_core, mod->core_size);
++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
+
+ /* Finally, free the core (containing the module structure) */
+ unset_module_core_ro_nx(mod);
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_core_rw);
+
+ #ifdef CONFIG_MPU
+ update_protections(current->mm);
+@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ unsigned int i;
+ int ret = 0;
+ const struct kernel_symbol *ksym;
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ int is_fs_load = 0;
++ int register_filesystem_found = 0;
++ char *p;
++
++ p = strstr(mod->args, "grsec_modharden_fs");
++ if (p) {
++ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
++ /* copy \0 as well */
++ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
++ is_fs_load = 1;
++ }
++#endif
+
+ for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
+ const char *name = info->strtab + sym[i].st_name;
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ /* it's a real shame this will never get ripped and copied
++ upstream! ;(
++ */
++ if (is_fs_load && !strcmp(name, "register_filesystem"))
++ register_filesystem_found = 1;
++#endif
++
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* We compiled with -fno-common. These are not
+@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ ksym = resolve_symbol_wait(mod, info, name);
+ /* Ok if resolved. */
+ if (ksym && !IS_ERR(ksym)) {
++ pax_open_kernel();
+ sym[i].st_value = ksym->value;
++ pax_close_kernel();
+ break;
+ }
+
+@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ secbase = (unsigned long)mod_percpu(mod);
+ else
+ secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
++ pax_open_kernel();
+ sym[i].st_value += secbase;
++ pax_close_kernel();
+ break;
+ }
+ }
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ if (is_fs_load && !register_filesystem_found) {
++ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
++ ret = -EPERM;
++ }
++#endif
++
+ return ret;
+ }
+
+@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
+ || s->sh_entsize != ~0UL
+ || strstarts(sname, ".init"))
+ continue;
+- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
+ DEBUGP("\t%s\n", name);
+ }
+- switch (m) {
+- case 0: /* executable */
+- mod->core_size = debug_align(mod->core_size);
+- mod->core_text_size = mod->core_size;
+- break;
+- case 1: /* RO: text and ro-data */
+- mod->core_size = debug_align(mod->core_size);
+- mod->core_ro_size = mod->core_size;
+- break;
+- case 3: /* whole core */
+- mod->core_size = debug_align(mod->core_size);
+- break;
+- }
+ }
+
+ DEBUGP("Init section allocation order:\n");
+@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
+ || s->sh_entsize != ~0UL
+ || !strstarts(sname, ".init"))
+ continue;
+- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
+- | INIT_OFFSET_MASK);
++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
++ else
++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
++ s->sh_entsize |= INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", sname);
+ }
+- switch (m) {
+- case 0: /* executable */
+- mod->init_size = debug_align(mod->init_size);
+- mod->init_text_size = mod->init_size;
+- break;
+- case 1: /* RO: text and ro-data */
+- mod->init_size = debug_align(mod->init_size);
+- mod->init_ro_size = mod->init_size;
+- break;
+- case 3: /* whole init */
+- mod->init_size = debug_align(mod->init_size);
+- break;
+- }
+ }
+ }
+
+@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+
+ /* Put symbol section at end of init part of module. */
+ symsect->sh_flags |= SHF_ALLOC;
+- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
+ info->index.sym) | INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
+
+@@ -2206,19 +2223,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+ }
+
+ /* Append room for core symbols at end of core part. */
+- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
++ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
++ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
+
+ /* Put string table section at end of init part of module. */
+ strsect->sh_flags |= SHF_ALLOC;
+- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
+ info->index.str) | INIT_OFFSET_MASK;
+ DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
+
+ /* Append room for core symbols' strings at end of core part. */
+- info->stroffs = mod->core_size;
++ info->stroffs = mod->core_size_rx;
+ __set_bit(0, info->strmap);
+- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
++ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
+ }
+
+ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2234,11 +2251,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ /* Make sure we get permanent strtab: don't use info->strtab. */
+ mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+
++ pax_open_kernel();
++
+ /* Set types up while we still have access to sections. */
+ for (i = 0; i < mod->num_symtab; i++)
+ mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+
+- mod->core_symtab = dst = mod->module_core + info->symoffs;
++ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
+ src = mod->symtab;
+ for (ndst = i = 0; i < mod->num_symtab; i++) {
+ if (i == 0 ||
+@@ -2251,10 +2270,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ }
+ mod->core_num_syms = ndst;
+
+- mod->core_strtab = s = mod->module_core + info->stroffs;
++ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
+ for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
+ if (test_bit(i, info->strmap))
+ *++s = mod->strtab[i];
++
++ pax_close_kernel();
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2288,17 +2309,33 @@ void * __weak module_alloc(unsigned long size)
+ return size == 0 ? NULL : vmalloc_exec(size);
+ }
+
+-static void *module_alloc_update_bounds(unsigned long size)
++static void *module_alloc_update_bounds_rw(unsigned long size)
+ {
+ void *ret = module_alloc(size);
+
+ if (ret) {
+ mutex_lock(&module_mutex);
+ /* Update module bounds. */
+- if ((unsigned long)ret < module_addr_min)
+- module_addr_min = (unsigned long)ret;
+- if ((unsigned long)ret + size > module_addr_max)
+- module_addr_max = (unsigned long)ret + size;
++ if ((unsigned long)ret < module_addr_min_rw)
++ module_addr_min_rw = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rw)
++ module_addr_max_rw = (unsigned long)ret + size;
++ mutex_unlock(&module_mutex);
++ }
++ return ret;
++}
++
++static void *module_alloc_update_bounds_rx(unsigned long size)
++{
++ void *ret = module_alloc_exec(size);
++
++ if (ret) {
++ mutex_lock(&module_mutex);
++ /* Update module bounds. */
++ if ((unsigned long)ret < module_addr_min_rx)
++ module_addr_min_rx = (unsigned long)ret;
++ if ((unsigned long)ret + size > module_addr_max_rx)
++ module_addr_max_rx = (unsigned long)ret + size;
+ mutex_unlock(&module_mutex);
+ }
+ return ret;
+@@ -2475,8 +2512,14 @@ static struct module *setup_load_info(struct load_info *info)
+ static int check_modinfo(struct module *mod, struct load_info *info)
+ {
+ const char *modmagic = get_modinfo(info, "vermagic");
++ const char *license = get_modinfo(info, "license");
+ int err;
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ if (!license || !license_is_gpl_compatible(license))
++ return -ENOEXEC;
++#endif
++
+ /* This is allowed: modprobe --force will invalidate it. */
+ if (!modmagic) {
+ err = try_to_force_load(mod, "bad vermagic");
+@@ -2499,7 +2542,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
+ }
+
+ /* Set up license info based on the info section */
+- set_license(mod, get_modinfo(info, "license"));
++ set_license(mod, license);
+
+ return 0;
+ }
+@@ -2593,7 +2636,7 @@ static int move_module(struct module *mod, struct load_info *info)
+ void *ptr;
+
+ /* Do the allocs. */
+- ptr = module_alloc_update_bounds(mod->core_size);
++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. Just mark it as not being a
+@@ -2603,10 +2646,10 @@ static int move_module(struct module *mod, struct load_info *info)
+ if (!ptr)
+ return -ENOMEM;
+
+- memset(ptr, 0, mod->core_size);
+- mod->module_core = ptr;
++ memset(ptr, 0, mod->core_size_rw);
++ mod->module_core_rw = ptr;
+
+- ptr = module_alloc_update_bounds(mod->init_size);
++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. This block doesn't need to be
+@@ -2614,12 +2657,39 @@ static int move_module(struct module *mod, struct load_info *info)
+ * after the module is initialized.
+ */
+ kmemleak_ignore(ptr);
+- if (!ptr && mod->init_size) {
+- module_free(mod, mod->module_core);
++ if (!ptr && mod->init_size_rw) {
++ module_free(mod, mod->module_core_rw);
+ return -ENOMEM;
+ }
+- memset(ptr, 0, mod->init_size);
+- mod->module_init = ptr;
++ memset(ptr, 0, mod->init_size_rw);
++ mod->module_init_rw = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
++ kmemleak_not_leak(ptr);
++ if (!ptr) {
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
++ return -ENOMEM;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->core_size_rx);
++ pax_close_kernel();
++ mod->module_core_rx = ptr;
++
++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
++ kmemleak_ignore(ptr);
++ if (!ptr && mod->init_size_rx) {
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
++ return -ENOMEM;
++ }
++
++ pax_open_kernel();
++ memset(ptr, 0, mod->init_size_rx);
++ pax_close_kernel();
++ mod->module_init_rx = ptr;
+
+ /* Transfer each section which specifies SHF_ALLOC */
+ DEBUGP("final section addresses:\n");
+@@ -2630,16 +2700,45 @@ static int move_module(struct module *mod, struct load_info *info)
+ if (!(shdr->sh_flags & SHF_ALLOC))
+ continue;
+
+- if (shdr->sh_entsize & INIT_OFFSET_MASK)
+- dest = mod->module_init
+- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+- else
+- dest = mod->module_core + shdr->sh_entsize;
++ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++ dest = mod->module_init_rw
++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++ else
++ dest = mod->module_init_rx
++ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
++ } else {
++ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
++ dest = mod->module_core_rw + shdr->sh_entsize;
++ else
++ dest = mod->module_core_rx + shdr->sh_entsize;
++ }
++
++ if (shdr->sh_type != SHT_NOBITS) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_64
++ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
++ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
++#endif
++ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
++ pax_open_kernel();
++ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++ pax_close_kernel();
++ } else
++#endif
+
+- if (shdr->sh_type != SHT_NOBITS)
+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
++ }
+ /* Update sh_addr to point to copy in image. */
+- shdr->sh_addr = (unsigned long)dest;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if (shdr->sh_flags & SHF_EXECINSTR)
++ shdr->sh_addr = ktva_ktla((unsigned long)dest);
++ else
++#endif
++
++ shdr->sh_addr = (unsigned long)dest;
+ DEBUGP("\t0x%lx %s\n",
+ shdr->sh_addr, info->secstrings + shdr->sh_name);
+ }
+@@ -2694,12 +2793,12 @@ static void flush_module_icache(const struct module *mod)
+ * Do it before processing of module parameters, so the module
+ * can provide parameter accessor functions of its own.
+ */
+- if (mod->module_init)
+- flush_icache_range((unsigned long)mod->module_init,
+- (unsigned long)mod->module_init
+- + mod->init_size);
+- flush_icache_range((unsigned long)mod->module_core,
+- (unsigned long)mod->module_core + mod->core_size);
++ if (mod->module_init_rx)
++ flush_icache_range((unsigned long)mod->module_init_rx,
++ (unsigned long)mod->module_init_rx
++ + mod->init_size_rx);
++ flush_icache_range((unsigned long)mod->module_core_rx,
++ (unsigned long)mod->module_core_rx + mod->core_size_rx);
+
+ set_fs(old_fs);
+ }
+@@ -2779,8 +2878,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
+ {
+ kfree(info->strmap);
+ percpu_modfree(mod);
+- module_free(mod, mod->module_init);
+- module_free(mod, mod->module_core);
++ module_free_exec(mod, mod->module_init_rx);
++ module_free_exec(mod, mod->module_core_rx);
++ module_free(mod, mod->module_init_rw);
++ module_free(mod, mod->module_core_rw);
+ }
+
+ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -2844,9 +2945,38 @@ static struct module *load_module(void __user *umod,
+ if (err)
+ goto free_unload;
+
++ /* Now copy in args */
++ mod->args = strndup_user(uargs, ~0UL >> 1);
++ if (IS_ERR(mod->args)) {
++ err = PTR_ERR(mod->args);
++ goto free_unload;
++ }
++
+ /* Set up MODINFO_ATTR fields */
+ setup_modinfo(mod, &info);
+
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ {
++ char *p, *p2;
++
++ if (strstr(mod->args, "grsec_modharden_netdev")) {
++ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
++ err = -EPERM;
++ goto free_modinfo;
++ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
++ p += sizeof("grsec_modharden_normal") - 1;
++ p2 = strstr(p, "_");
++ if (p2) {
++ *p2 = '\0';
++ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
++ *p2 = '_';
++ }
++ err = -EPERM;
++ goto free_modinfo;
++ }
++ }
++#endif
++
+ /* Fix up syms, so that st_value is a pointer to location. */
+ err = simplify_symbols(mod, &info);
+ if (err < 0)
+@@ -2862,13 +2992,6 @@ static struct module *load_module(void __user *umod,
+
+ flush_module_icache(mod);
+
+- /* Now copy in args */
+- mod->args = strndup_user(uargs, ~0UL >> 1);
+- if (IS_ERR(mod->args)) {
+- err = PTR_ERR(mod->args);
+- goto free_arch_cleanup;
+- }
+-
+ /* Mark state as coming so strong_try_module_get() ignores us. */
+ mod->state = MODULE_STATE_COMING;
+
+@@ -2926,11 +3049,10 @@ static struct module *load_module(void __user *umod,
+ unlock:
+ mutex_unlock(&module_mutex);
+ synchronize_sched();
+- kfree(mod->args);
+- free_arch_cleanup:
+ module_arch_cleanup(mod);
+ free_modinfo:
+ free_modinfo(mod);
++ kfree(mod->args);
+ free_unload:
+ module_unload_free(mod);
+ free_module:
+@@ -2971,16 +3093,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+ MODULE_STATE_COMING, mod);
+
+ /* Set RO and NX regions for core */
+- set_section_ro_nx(mod->module_core,
+- mod->core_text_size,
+- mod->core_ro_size,
+- mod->core_size);
++ set_section_ro_nx(mod->module_core_rx,
++ mod->core_size_rx,
++ mod->core_size_rx,
++ mod->core_size_rx);
+
+ /* Set RO and NX regions for init */
+- set_section_ro_nx(mod->module_init,
+- mod->init_text_size,
+- mod->init_ro_size,
+- mod->init_size);
++ set_section_ro_nx(mod->module_init_rx,
++ mod->init_size_rx,
++ mod->init_size_rx,
++ mod->init_size_rx);
+
+ do_mod_ctors(mod);
+ /* Start the module */
+@@ -3026,11 +3148,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+ mod->strtab = mod->core_strtab;
+ #endif
+ unset_module_init_ro_nx(mod);
+- module_free(mod, mod->module_init);
+- mod->module_init = NULL;
+- mod->init_size = 0;
+- mod->init_ro_size = 0;
+- mod->init_text_size = 0;
++ module_free(mod, mod->module_init_rw);
++ module_free_exec(mod, mod->module_init_rx);
++ mod->module_init_rw = NULL;
++ mod->module_init_rx = NULL;
++ mod->init_size_rw = 0;
++ mod->init_size_rx = 0;
+ mutex_unlock(&module_mutex);
+
+ return 0;
+@@ -3061,10 +3184,16 @@ static const char *get_ksymbol(struct module *mod,
+ unsigned long nextval;
+
+ /* At worse, next value is at end of module */
+- if (within_module_init(addr, mod))
+- nextval = (unsigned long)mod->module_init+mod->init_text_size;
++ if (within_module_init_rx(addr, mod))
++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
++ else if (within_module_init_rw(addr, mod))
++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
++ else if (within_module_core_rx(addr, mod))
++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
++ else if (within_module_core_rw(addr, mod))
++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
+ else
+- nextval = (unsigned long)mod->module_core+mod->core_text_size;
++ return NULL;
+
+ /* Scan for closest preceding symbol, and next symbol. (ELF
+ starts real symbols at 1). */
+@@ -3312,7 +3441,7 @@ static int m_show(struct seq_file *m, void *p)
+ char buf[8];
+
+ seq_printf(m, "%s %u",
+- mod->name, mod->init_size + mod->core_size);
++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
+ print_unload_info(m, mod);
+
+ /* Informative for users. */
+@@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
+ mod->state == MODULE_STATE_COMING ? "Loading":
+ "Live");
+ /* Used by oprofile and other similar tools. */
+- seq_printf(m, " 0x%pK", mod->module_core);
++ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
+
+ /* Taints info */
+ if (mod->taints)
+@@ -3357,7 +3486,17 @@ static const struct file_operations proc_modules_operations = {
+
+ static int __init proc_modules_init(void)
+ {
++#ifndef CONFIG_GRKERNSEC_HIDESYM
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
++#else
+ proc_create("modules", 0, NULL, &proc_modules_operations);
++#endif
++#else
++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
++#endif
+ return 0;
+ }
+ module_init(proc_modules_init);
+@@ -3416,12 +3555,12 @@ struct module *__module_address(unsigned long addr)
+ {
+ struct module *mod;
+
+- if (addr < module_addr_min || addr > module_addr_max)
++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
++ (addr < module_addr_min_rw || addr > module_addr_max_rw))
+ return NULL;
+
+ list_for_each_entry_rcu(mod, &modules, list)
+- if (within_module_core(addr, mod)
+- || within_module_init(addr, mod))
++ if (within_module_init(addr, mod) || within_module_core(addr, mod))
+ return mod;
+ return NULL;
+ }
+@@ -3455,11 +3594,20 @@ bool is_module_text_address(unsigned long addr)
+ */
+ struct module *__module_text_address(unsigned long addr)
+ {
+- struct module *mod = __module_address(addr);
++ struct module *mod;
++
++#ifdef CONFIG_X86_32
++ addr = ktla_ktva(addr);
++#endif
++
++ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
++ return NULL;
++
++ mod = __module_address(addr);
++
+ if (mod) {
+ /* Make sure it's within the text section. */
+- if (!within(addr, mod->module_init, mod->init_text_size)
+- && !within(addr, mod->module_core, mod->core_text_size))
++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
+ mod = NULL;
+ }
+ return mod;
+diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
+index 7e3443f..b2a1e6b 100644
+--- a/kernel/mutex-debug.c
++++ b/kernel/mutex-debug.c
+@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
+ }
+
+ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti)
++ struct task_struct *task)
+ {
+ SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+
+ /* Mark the current thread as blocked on the lock: */
+- ti->task->blocked_on = waiter;
++ task->blocked_on = waiter;
+ }
+
+ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti)
++ struct task_struct *task)
+ {
+ DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
+- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
+- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
+- ti->task->blocked_on = NULL;
++ DEBUG_LOCKS_WARN_ON(waiter->task != task);
++ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
++ task->blocked_on = NULL;
+
+ list_del_init(&waiter->list);
+ waiter->task = NULL;
+diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
+index 0799fd3..d06ae3b 100644
+--- a/kernel/mutex-debug.h
++++ b/kernel/mutex-debug.h
+@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
+ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+ extern void debug_mutex_add_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter,
+- struct thread_info *ti);
++ struct task_struct *task);
+ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+- struct thread_info *ti);
++ struct task_struct *task);
+ extern void debug_mutex_unlock(struct mutex *lock);
+ extern void debug_mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+diff --git a/kernel/mutex.c b/kernel/mutex.c
+index 89096dd..f91ebc5 100644
+--- a/kernel/mutex.c
++++ b/kernel/mutex.c
+@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ spin_lock_mutex(&lock->wait_lock, flags);
+
+ debug_mutex_lock_common(lock, &waiter);
+- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
++ debug_mutex_add_waiter(lock, &waiter, task);
+
+ /* add waiting tasks to the end of the waitqueue (FIFO): */
+ list_add_tail(&waiter.list, &lock->wait_list);
+@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ * TASK_UNINTERRUPTIBLE case.)
+ */
+ if (unlikely(signal_pending_state(state, task))) {
+- mutex_remove_waiter(lock, &waiter,
+- task_thread_info(task));
++ mutex_remove_waiter(lock, &waiter, task);
+ mutex_release(&lock->dep_map, 1, ip);
+ spin_unlock_mutex(&lock->wait_lock, flags);
+
+@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ done:
+ lock_acquired(&lock->dep_map, ip);
+ /* got the lock - rejoice! */
+- mutex_remove_waiter(lock, &waiter, current_thread_info());
++ mutex_remove_waiter(lock, &waiter, task);
+ mutex_set_owner(lock);
+
+ /* set it to 0 if there are no waiters left: */
+diff --git a/kernel/notifier.c b/kernel/notifier.c
+index 2d5cc4c..d9ea600 100644
+--- a/kernel/notifier.c
++++ b/kernel/notifier.c
+@@ -5,6 +5,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/vmalloc.h>
+ #include <linux/reboot.h>
++#include <linux/mm.h>
+
+ /*
+ * Notifier list for kernel code which wants to be called
+@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
+ while ((*nl) != NULL) {
+ if (n->priority > (*nl)->priority)
+ break;
+- nl = &((*nl)->next);
++ nl = (struct notifier_block **)&((*nl)->next);
+ }
+- n->next = *nl;
++ pax_open_kernel();
++ *(const void **)&n->next = *nl;
+ rcu_assign_pointer(*nl, n);
++ pax_close_kernel();
+ return 0;
+ }
+
+@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
+ return 0;
+ if (n->priority > (*nl)->priority)
+ break;
+- nl = &((*nl)->next);
++ nl = (struct notifier_block **)&((*nl)->next);
+ }
+- n->next = *nl;
++ pax_open_kernel();
++ *(const void **)&n->next = *nl;
+ rcu_assign_pointer(*nl, n);
++ pax_close_kernel();
+ return 0;
+ }
+
+@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
+ {
+ while ((*nl) != NULL) {
+ if ((*nl) == n) {
++ pax_open_kernel();
+ rcu_assign_pointer(*nl, n->next);
++ pax_close_kernel();
+ return 0;
+ }
+- nl = &((*nl)->next);
++ nl = (struct notifier_block **)&((*nl)->next);
+ }
+ return -ENOENT;
+ }
+diff --git a/kernel/padata.c b/kernel/padata.c
+index b452599..5d68f4e 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
+ padata->pd = pd;
+ padata->cb_cpu = cb_cpu;
+
+- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
+- atomic_set(&pd->seq_nr, -1);
++ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
++ atomic_set_unchecked(&pd->seq_nr, -1);
+
+- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
++ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
+
+ target_cpu = padata_cpu_hash(padata);
+ queue = per_cpu_ptr(pd->pqueue, target_cpu);
+@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+ padata_init_pqueues(pd);
+ padata_init_squeues(pd);
+ setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+- atomic_set(&pd->seq_nr, -1);
++ atomic_set_unchecked(&pd->seq_nr, -1);
+ atomic_set(&pd->reorder_objects, 0);
+ atomic_set(&pd->refcnt, 0);
+ pd->pinst = pinst;
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 3458469..3ed0694 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -65,6 +65,14 @@ NORET_TYPE void panic(const char * fmt, ...)
+ int state = 0;
+
+ /*
++ * Disable local interrupts. This will prevent panic_smp_self_stop
++ * from deadlocking the first cpu that invokes the panic, since
++ * there is nothing to prevent an interrupt handler (that runs
++ * after the panic_lock is acquired) from invoking panic again.
++ */
++ local_irq_disable();
++
++ /*
+ * It's possible to come here directly from a panic-assertion and
+ * not have preempt disabled. Some functions called from here want
+ * preempt to be disabled. No point enabling it later though...
+@@ -78,7 +86,11 @@ NORET_TYPE void panic(const char * fmt, ...)
+ va_end(args);
+ printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+- dump_stack();
++ /*
++ * Avoid nested stack-dumping if a panic occurs during oops processing
++ */
++ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
++ dump_stack();
+ #endif
+
+ /*
+@@ -382,7 +394,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
+ const char *board;
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
++ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
+ board = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (board)
+ printk(KERN_WARNING "Hardware name: %s\n", board);
+@@ -437,7 +449,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
+ */
+ void __stack_chk_fail(void)
+ {
+- panic("stack-protector: Kernel stack is corrupted in: %p\n",
++ dump_stack();
++ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
+ __builtin_return_address(0));
+ }
+ EXPORT_SYMBOL(__stack_chk_fail);
+diff --git a/kernel/pid.c b/kernel/pid.c
+index fa5f722..0c93e57 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -33,6 +33,7 @@
+ #include <linux/rculist.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/security.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
+@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
+
+ int pid_max = PID_MAX_DEFAULT;
+
+-#define RESERVED_PIDS 300
++#define RESERVED_PIDS 500
+
+ int pid_max_min = RESERVED_PIDS + 1;
+ int pid_max_max = PID_MAX_LIMIT;
+@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
+ */
+ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+ {
++ struct task_struct *task;
++
+ rcu_lockdep_assert(rcu_read_lock_held(),
+ "find_task_by_pid_ns() needs rcu_read_lock()"
+ " protection");
+- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++
++ if (gr_pid_is_chrooted(task))
++ return NULL;
++
++ return task;
+ }
+
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
+ return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
+ }
+
++struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
++{
++ rcu_lockdep_assert(rcu_read_lock_held(),
++ "find_task_by_pid_ns() needs rcu_read_lock()"
++ " protection");
++ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
++}
++
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
+ struct pid *pid;
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index 962c291..31cf69d7 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -6,9 +6,11 @@
+ #include <linux/posix-timers.h>
+ #include <linux/errno.h>
+ #include <linux/math64.h>
++#include <linux/security.h>
+ #include <asm/uaccess.h>
+ #include <linux/kernel_stat.h>
+ #include <trace/events/timer.h>
++#include <linux/random.h>
+
+ /*
+ * Called after updating RLIMIT_CPU to run cpu timer and update
+@@ -511,6 +513,8 @@ static void cleanup_timers(struct list_head *head,
+ */
+ void posix_cpu_timers_exit(struct task_struct *tsk)
+ {
++ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++ sizeof(unsigned long long));
+ cleanup_timers(tsk->cpu_timers,
+ tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
+
+@@ -1625,14 +1629,14 @@ struct k_clock clock_posix_cpu = {
+
+ static __init int init_posix_cpu_timers(void)
+ {
+- struct k_clock process = {
++ static struct k_clock process = {
+ .clock_getres = process_cpu_clock_getres,
+ .clock_get = process_cpu_clock_get,
+ .timer_create = process_cpu_timer_create,
+ .nsleep = process_cpu_nsleep,
+ .nsleep_restart = process_cpu_nsleep_restart,
+ };
+- struct k_clock thread = {
++ static struct k_clock thread = {
+ .clock_getres = thread_cpu_clock_getres,
+ .clock_get = thread_cpu_clock_get,
+ .timer_create = thread_cpu_timer_create,
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index e885be1..f005738 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -43,6 +43,7 @@
+ #include <linux/idr.h>
+ #include <linux/posix-clock.h>
+ #include <linux/posix-timers.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscalls.h>
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
+ * which we beg off on and pass to do_sys_settimeofday().
+ */
+
+-static struct k_clock posix_clocks[MAX_CLOCKS];
++static struct k_clock *posix_clocks[MAX_CLOCKS];
+
+ /*
+ * These ones are defined below.
+@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
+ */
+ static __init int init_posix_timers(void)
+ {
+- struct k_clock clock_realtime = {
++ static struct k_clock clock_realtime = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_clock_realtime_get,
+ .clock_set = posix_clock_realtime_set,
+@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
+ };
+- struct k_clock clock_monotonic = {
++ static struct k_clock clock_monotonic = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_ktime_get_ts,
+ .nsleep = common_nsleep,
+@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
+ .timer_get = common_timer_get,
+ .timer_del = common_timer_del,
+ };
+- struct k_clock clock_monotonic_raw = {
++ static struct k_clock clock_monotonic_raw = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_monotonic_raw,
+ };
+- struct k_clock clock_realtime_coarse = {
++ static struct k_clock clock_realtime_coarse = {
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_realtime_coarse,
+ };
+- struct k_clock clock_monotonic_coarse = {
++ static struct k_clock clock_monotonic_coarse = {
+ .clock_getres = posix_get_coarse_res,
+ .clock_get = posix_get_monotonic_coarse,
+ };
+- struct k_clock clock_boottime = {
++ static struct k_clock clock_boottime = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_boottime,
+ .nsleep = common_nsleep,
+@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
+ return;
+ }
+
+- posix_clocks[clock_id] = *new_clock;
++ posix_clocks[clock_id] = new_clock;
+ }
+ EXPORT_SYMBOL_GPL(posix_timers_register_clock);
+
+@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
+ return (id & CLOCKFD_MASK) == CLOCKFD ?
+ &clock_posix_dynamic : &clock_posix_cpu;
+
+- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
++ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
+ return NULL;
+- return &posix_clocks[id];
++ return posix_clocks[id];
+ }
+
+ static int common_timer_create(struct k_itimer *new_timer)
+@@ -539,7 +540,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ struct k_clock *kc = clockid_to_kclock(which_clock);
+ struct k_itimer *new_timer;
+ int error, new_timer_id;
+- sigevent_t event;
++ sigevent_t event = { };
+ int it_id_set = IT_ID_NOT_SET;
+
+ if (!kc)
+@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+ if (copy_from_user(&new_tp, tp, sizeof (*tp)))
+ return -EFAULT;
+
++ /* only the CLOCK_REALTIME clock can be set, all other clocks
++ have their clock_set fptr set to a nosettime dummy function
++ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
++ call common_clock_set, which calls do_sys_settimeofday, which
++ we hook
++ */
++
+ return kc->clock_set(which_clock, &new_tp);
+ }
+
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index deb5461..9fc0e9b 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
+ config HIBERNATION
+ bool "Hibernation (aka 'suspend to disk')"
+ depends on SWAP && ARCH_HIBERNATION_POSSIBLE
++ depends on !GRKERNSEC_KMEM
++ depends on !PAX_MEMORY_SANITIZE
+ select HIBERNATE_CALLBACKS
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
+index d523593..68197a4 100644
+--- a/kernel/power/poweroff.c
++++ b/kernel/power/poweroff.c
+@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+ };
+
+-static int pm_sysrq_init(void)
++static int __init pm_sysrq_init(void)
+ {
+ register_sysrq_key('o', &sysrq_poweroff_op);
+ return 0;
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 3d4b954..11af930 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
+ u64 elapsed_csecs64;
+ unsigned int elapsed_csecs;
+ bool wakeup = false;
++ bool timedout = false;
+
+ do_gettimeofday(&start);
+
+@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
+
+ while (true) {
+ todo = 0;
++ if (time_after(jiffies, end_time))
++ timedout = true;
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ if (frozen(p) || !freezable(p))
+@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
+ * try_to_stop() after schedule() in ptrace/signal
+ * stop sees TIF_FREEZE.
+ */
+- if (!task_is_stopped_or_traced(p) &&
+- !freezer_should_skip(p))
++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
+ todo++;
++ if (timedout) {
++ printk(KERN_ERR "Task refusing to freeze:\n");
++ sched_show_task(p);
++ }
++ }
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+
+@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
+ todo += wq_busy;
+ }
+
+- if (!todo || time_after(jiffies, end_time))
++ if (!todo || timedout)
+ break;
+
+ if (pm_wakeup_pending()) {
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 16688ec..327729b 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
+ if (from_file && type != SYSLOG_ACTION_OPEN)
+ return 0;
+
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
++ return -EPERM;
++#endif
++
+ if (syslog_action_restricted(type)) {
+ if (capable(CAP_SYSLOG))
+ return 0;
+diff --git a/kernel/profile.c b/kernel/profile.c
+index 76b8e77..a2930e8 100644
+--- a/kernel/profile.c
++++ b/kernel/profile.c
+@@ -39,7 +39,7 @@ struct profile_hit {
+ /* Oprofile timer tick hook */
+ static int (*timer_hook)(struct pt_regs *) __read_mostly;
+
+-static atomic_t *prof_buffer;
++static atomic_unchecked_t *prof_buffer;
+ static unsigned long prof_len, prof_shift;
+
+ int prof_on __read_mostly;
+@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
+ hits[i].pc = 0;
+ continue;
+ }
+- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].hits = hits[i].pc = 0;
+ }
+ }
+@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
+ * Add the current hit(s) and flush the write-queue out
+ * to the global buffer:
+ */
+- atomic_add(nr_hits, &prof_buffer[pc]);
++ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
+ for (i = 0; i < NR_PROFILE_HIT; ++i) {
+- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
++ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
+ hits[i].pc = hits[i].hits = 0;
+ }
+ out:
+@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
+ {
+ unsigned long pc;
+ pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
+- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
++ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+ }
+ #endif /* !CONFIG_SMP */
+
+@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ return -EFAULT;
+ buf++; p++; count--; read++;
+ }
+- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
++ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
+ if (copy_to_user(buf, (void *)pnt, count))
+ return -EFAULT;
+ read += count;
+@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
+ }
+ #endif
+ profile_discard_flip_buffers();
+- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
++ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
+ return count;
+ }
+
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index f79803a..0dcc1be 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -211,7 +211,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ return ret;
+ }
+
+-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
++static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
++ unsigned int log)
+ {
+ const struct cred *cred = current_cred(), *tcred;
+
+@@ -237,7 +238,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ cred->gid == tcred->sgid &&
+ cred->gid == tcred->gid))
+ goto ok;
+- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
++ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
++ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
+ goto ok;
+ rcu_read_unlock();
+ return -EPERM;
+@@ -247,7 +249,8 @@ ok:
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+ if (dumpable != SUID_DUMP_USER &&
+- !task_ns_capable(task, CAP_SYS_PTRACE))
++ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
++ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
+ return -EPERM;
+
+ return security_ptrace_access_check(task, mode);
+@@ -257,7 +260,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+ {
+ int err;
+ task_lock(task);
+- err = __ptrace_may_access(task, mode);
++ err = __ptrace_may_access(task, mode, 0);
++ task_unlock(task);
++ return !err;
++}
++
++bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
++{
++ return __ptrace_may_access(task, mode, 0);
++}
++
++bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
++{
++ int err;
++ task_lock(task);
++ err = __ptrace_may_access(task, mode, 1);
+ task_unlock(task);
+ return !err;
+ }
+@@ -302,7 +319,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ goto out;
+
+ task_lock(task);
+- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
++ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
+ task_unlock(task);
+ if (retval)
+ goto unlock_creds;
+@@ -317,7 +334,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ task->ptrace = PT_PTRACED;
+ if (seize)
+ task->ptrace |= PT_SEIZED;
+- if (task_ns_capable(task, CAP_SYS_PTRACE))
++ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
+ task->ptrace |= PT_PTRACE_CAP;
+
+ __ptrace_link(task, current);
+@@ -523,7 +540,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
+ break;
+ return -EIO;
+ }
+- if (copy_to_user(dst, buf, retval))
++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
+ return -EFAULT;
+ copied += retval;
+ src += retval;
+@@ -583,6 +600,9 @@ static int ptrace_setoptions(struct task_struct *child, unsigned long data)
+ if (data & PTRACE_O_TRACEEXIT)
+ child->ptrace |= PT_TRACE_EXIT;
+
++ if (data & PTRACE_O_TRACESECCOMP)
++ child->ptrace |= PT_TRACE_SECCOMP;
++
+ return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
+ }
+
+@@ -720,7 +740,7 @@ int ptrace_request(struct task_struct *child, long request,
+ bool seized = child->ptrace & PT_SEIZED;
+ int ret = -EIO;
+ siginfo_t siginfo, *si;
+- void __user *datavp = (void __user *) data;
++ void __user *datavp = (__force void __user *) data;
+ unsigned long __user *datalp = datavp;
+ unsigned long flags;
+
+@@ -922,14 +942,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ ret = -EPERM;
++ goto out_put_task_struct;
++ }
++
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, data);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+- if (!ret)
++ if (!ret) {
+ arch_ptrace_attach(child);
++ gr_audit_ptrace(child);
++ }
+ goto out_put_task_struct;
+ }
+
+@@ -957,7 +984,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+- return put_user(tmp, (unsigned long __user *)data);
++ return put_user(tmp, (__force unsigned long __user *)data);
+ }
+
+ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+@@ -1051,7 +1078,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+ }
+
+ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+- compat_long_t addr, compat_long_t data)
++ compat_ulong_t addr, compat_ulong_t data)
+ {
+ struct task_struct *child;
+ long ret;
+@@ -1067,14 +1094,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ ret = -EPERM;
++ goto out_put_task_struct;
++ }
++
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, data);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+- if (!ret)
++ if (!ret) {
+ arch_ptrace_attach(child);
++ gr_audit_ptrace(child);
++ }
+ goto out_put_task_struct;
+ }
+
+diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
+index 636af6d..90b936f 100644
+--- a/kernel/rcutiny.c
++++ b/kernel/rcutiny.c
+@@ -46,7 +46,7 @@
+ struct rcu_ctrlblk;
+ static void invoke_rcu_callbacks(void);
+ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+-static void rcu_process_callbacks(struct softirq_action *unused);
++static void rcu_process_callbacks(void);
+ static void __call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu),
+ struct rcu_ctrlblk *rcp);
+@@ -186,7 +186,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
+ }
+
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+ __rcu_process_callbacks(&rcu_sched_ctrlblk);
+ __rcu_process_callbacks(&rcu_bh_ctrlblk);
+diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
+index 2b0484a..07955ab 100644
+--- a/kernel/rcutiny_plugin.h
++++ b/kernel/rcutiny_plugin.h
+@@ -907,7 +907,7 @@ static int rcu_kthread(void *arg)
+ have_rcu_kthread_work = morework;
+ local_irq_restore(flags);
+ if (work)
+- rcu_process_callbacks(NULL);
++ rcu_process_callbacks();
+ schedule_timeout_interruptible(1); /* Leave CPU for others. */
+ }
+
+diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
+index 764825c..3aa6ac4 100644
+--- a/kernel/rcutorture.c
++++ b/kernel/rcutorture.c
+@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
+ { 0 };
+ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
+ { 0 };
+-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+-static atomic_t n_rcu_torture_alloc;
+-static atomic_t n_rcu_torture_alloc_fail;
+-static atomic_t n_rcu_torture_free;
+-static atomic_t n_rcu_torture_mberror;
+-static atomic_t n_rcu_torture_error;
++static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
++static atomic_unchecked_t n_rcu_torture_alloc;
++static atomic_unchecked_t n_rcu_torture_alloc_fail;
++static atomic_unchecked_t n_rcu_torture_free;
++static atomic_unchecked_t n_rcu_torture_mberror;
++static atomic_unchecked_t n_rcu_torture_error;
+ static long n_rcu_torture_boost_ktrerror;
+ static long n_rcu_torture_boost_rterror;
+ static long n_rcu_torture_boost_failure;
+@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
+
+ spin_lock_bh(&rcu_torture_lock);
+ if (list_empty(&rcu_torture_freelist)) {
+- atomic_inc(&n_rcu_torture_alloc_fail);
++ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
+ spin_unlock_bh(&rcu_torture_lock);
+ return NULL;
+ }
+- atomic_inc(&n_rcu_torture_alloc);
++ atomic_inc_unchecked(&n_rcu_torture_alloc);
+ p = rcu_torture_freelist.next;
+ list_del_init(p);
+ spin_unlock_bh(&rcu_torture_lock);
+@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
+ static void
+ rcu_torture_free(struct rcu_torture *p)
+ {
+- atomic_inc(&n_rcu_torture_free);
++ atomic_inc_unchecked(&n_rcu_torture_free);
+ spin_lock_bh(&rcu_torture_lock);
+ list_add_tail(&p->rtort_free, &rcu_torture_freelist);
+ spin_unlock_bh(&rcu_torture_lock);
+@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
+ i = old_rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+- atomic_inc(&rcu_torture_wcount[i]);
++ atomic_inc_unchecked(&rcu_torture_wcount[i]);
+ old_rp->rtort_pipe_count++;
+ cur_ops->deferred_free(old_rp);
+ }
+@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
+ return;
+ }
+ if (p->rtort_mbtest == 0)
+- atomic_inc(&n_rcu_torture_mberror);
++ atomic_inc_unchecked(&n_rcu_torture_mberror);
+ spin_lock(&rand_lock);
+ cur_ops->read_delay(&rand);
+ n_rcu_torture_timers++;
+@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
+ continue;
+ }
+ if (p->rtort_mbtest == 0)
+- atomic_inc(&n_rcu_torture_mberror);
++ atomic_inc_unchecked(&n_rcu_torture_mberror);
+ cur_ops->read_delay(&rand);
+ preempt_disable();
+ pipe_count = p->rtort_pipe_count;
+@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
+ rcu_torture_current,
+ rcu_torture_current_version,
+ list_empty(&rcu_torture_freelist),
+- atomic_read(&n_rcu_torture_alloc),
+- atomic_read(&n_rcu_torture_alloc_fail),
+- atomic_read(&n_rcu_torture_free),
+- atomic_read(&n_rcu_torture_mberror),
++ atomic_read_unchecked(&n_rcu_torture_alloc),
++ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
++ atomic_read_unchecked(&n_rcu_torture_free),
++ atomic_read_unchecked(&n_rcu_torture_mberror),
+ n_rcu_torture_boost_ktrerror,
+ n_rcu_torture_boost_rterror,
+ n_rcu_torture_boost_failure,
+ n_rcu_torture_boosts,
+ n_rcu_torture_timers);
+- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
++ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
+ n_rcu_torture_boost_ktrerror != 0 ||
+ n_rcu_torture_boost_rterror != 0 ||
+ n_rcu_torture_boost_failure != 0)
+@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
+ if (i > 1) {
+ cnt += sprintf(&page[cnt], "!!! ");
+- atomic_inc(&n_rcu_torture_error);
++ atomic_inc_unchecked(&n_rcu_torture_error);
+ WARN_ON_ONCE(1);
+ }
+ cnt += sprintf(&page[cnt], "Reader Pipe: ");
+@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
+ cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ cnt += sprintf(&page[cnt], " %d",
+- atomic_read(&rcu_torture_wcount[i]));
++ atomic_read_unchecked(&rcu_torture_wcount[i]));
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ if (cur_ops->stats)
+@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
+
+ if (cur_ops->cleanup)
+ cur_ops->cleanup();
+- if (atomic_read(&n_rcu_torture_error))
++ if (atomic_read_unchecked(&n_rcu_torture_error))
+ rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
+ else
+ rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
+@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
+
+ rcu_torture_current = NULL;
+ rcu_torture_current_version = 0;
+- atomic_set(&n_rcu_torture_alloc, 0);
+- atomic_set(&n_rcu_torture_alloc_fail, 0);
+- atomic_set(&n_rcu_torture_free, 0);
+- atomic_set(&n_rcu_torture_mberror, 0);
+- atomic_set(&n_rcu_torture_error, 0);
++ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
++ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
++ atomic_set_unchecked(&n_rcu_torture_free, 0);
++ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
++ atomic_set_unchecked(&n_rcu_torture_error, 0);
+ n_rcu_torture_boost_ktrerror = 0;
+ n_rcu_torture_boost_rterror = 0;
+ n_rcu_torture_boost_failure = 0;
+ n_rcu_torture_boosts = 0;
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
+- atomic_set(&rcu_torture_wcount[i], 0);
++ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
+ per_cpu(rcu_torture_count, cpu)[i] = 0;
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 1aa52af..d2875ad 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -369,9 +369,9 @@ void rcu_enter_nohz(void)
+ trace_rcu_dyntick("Start");
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+ local_irq_restore(flags);
+ }
+
+@@ -393,10 +393,10 @@ void rcu_exit_nohz(void)
+ return;
+ }
+ smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+ trace_rcu_dyntick("End");
+ local_irq_restore(flags);
+ }
+@@ -413,14 +413,14 @@ void rcu_nmi_enter(void)
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks_nmi_nesting == 0 &&
+- (atomic_read(&rdtp->dynticks) & 0x1))
++ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
+ return;
+ rdtp->dynticks_nmi_nesting++;
+ smp_mb__before_atomic_inc(); /* Force delay from prior write. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
+ smp_mb__after_atomic_inc(); /* See above. */
+- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
++ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
+ }
+
+ /**
+@@ -439,9 +439,9 @@ void rcu_nmi_exit(void)
+ return;
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+- atomic_inc(&rdtp->dynticks);
++ atomic_inc_unchecked(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force delay to next write. */
+- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
++ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
+ }
+
+ /**
+@@ -476,7 +476,7 @@ void rcu_irq_exit(void)
+ */
+ static int dyntick_save_progress_counter(struct rcu_data *rdp)
+ {
+- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
++ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+ return 0;
+ }
+
+@@ -491,7 +491,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+ unsigned int curr;
+ unsigned int snap;
+
+- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
++ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
+ snap = (unsigned int)rdp->dynticks_snap;
+
+ /*
+@@ -1554,7 +1554,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+ /*
+ * Do RCU core processing for the current CPU.
+ */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static __latent_entropy void rcu_process_callbacks(void)
+ {
+ trace_rcu_utilization("Start RCU core");
+ __rcu_process_callbacks(&rcu_sched_state,
+diff --git a/kernel/rcutree.h b/kernel/rcutree.h
+index 849ce9e..74bc9de 100644
+--- a/kernel/rcutree.h
++++ b/kernel/rcutree.h
+@@ -86,7 +86,7 @@
+ struct rcu_dynticks {
+ int dynticks_nesting; /* Track irq/process nesting level. */
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
+- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
++ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
+ };
+
+ /* RCU's kthread states for tracing. */
+diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
+index 4b9b9f8..2326053 100644
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
+
+ /* Clean up and exit. */
+ smp_mb(); /* ensure expedited GP seen before counter increment. */
+- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
++ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
+ unlock_mb_ret:
+ mutex_unlock(&sync_rcu_preempt_exp_mutex);
+ mb_ret:
+@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+ #else /* #ifndef CONFIG_SMP */
+
+-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
+-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
++static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
++static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
+
+ static int synchronize_sched_expedited_cpu_stop(void *data)
+ {
+@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
+ int firstsnap, s, snap, trycount = 0;
+
+ /* Note that atomic_inc_return() implies full memory barrier. */
+- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
++ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
+ get_online_cpus();
+
+ /*
+@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
+ }
+
+ /* Check to see if someone else did our work for us. */
+- s = atomic_read(&sync_sched_expedited_done);
++ s = atomic_read_unchecked(&sync_sched_expedited_done);
+ if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ return;
+@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
+ * grace period works for us.
+ */
+ get_online_cpus();
+- snap = atomic_read(&sync_sched_expedited_started) - 1;
++ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
+ smp_mb(); /* ensure read is before try_stop_cpus(). */
+ }
+
+@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
+ * than we did beat us to the punch.
+ */
+ do {
+- s = atomic_read(&sync_sched_expedited_done);
++ s = atomic_read_unchecked(&sync_sched_expedited_done);
+ if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ break;
+ }
+- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
++ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
+
+ put_online_cpus();
+ }
+@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
+ for_each_online_cpu(thatcpu) {
+ if (thatcpu == cpu)
+ continue;
+- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
++ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
+ thatcpu).dynticks);
+ smp_mb(); /* Order sampling of snap with end of grace period. */
+ if ((snap & 0x1) != 0) {
+diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
+index 9feffa4..54058df 100644
+--- a/kernel/rcutree_trace.c
++++ b/kernel/rcutree_trace.c
+@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
+ rdp->qs_pending);
+ #ifdef CONFIG_NO_HZ
+ seq_printf(m, " dt=%d/%d/%d df=%lu",
+- atomic_read(&rdp->dynticks->dynticks),
++ atomic_read_unchecked(&rdp->dynticks->dynticks),
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi_nesting,
+ rdp->dynticks_fqs);
+@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
+ rdp->qs_pending);
+ #ifdef CONFIG_NO_HZ
+ seq_printf(m, ",%d,%d,%d,%lu",
+- atomic_read(&rdp->dynticks->dynticks),
++ atomic_read_unchecked(&rdp->dynticks->dynticks),
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi_nesting,
+ rdp->dynticks_fqs);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 08aa28e..b958c1c 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
+
+ static int __init ioresources_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
++#endif
++#else
+ proc_create("ioports", 0, NULL, &proc_ioports_operations);
+ proc_create("iomem", 0, NULL, &proc_iomem_operations);
++#endif
+ return 0;
+ }
+ __initcall(ioresources_init);
+diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
+index 3d9f31c..7fefc9e 100644
+--- a/kernel/rtmutex-tester.c
++++ b/kernel/rtmutex-tester.c
+@@ -20,7 +20,7 @@
+ #define MAX_RT_TEST_MUTEXES 8
+
+ static spinlock_t rttest_lock;
+-static atomic_t rttest_event;
++static atomic_unchecked_t rttest_event;
+
+ struct test_thread_data {
+ int opcode;
+@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+
+ case RTTEST_LOCKCONT:
+ td->mutexes[td->opdata] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ return 0;
+
+ case RTTEST_RESET:
+@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+ return 0;
+
+ case RTTEST_RESETEVENT:
+- atomic_set(&rttest_event, 0);
++ atomic_set_unchecked(&rttest_event, 0);
+ return 0;
+
+ default:
+@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+ return ret;
+
+ td->mutexes[id] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ rt_mutex_lock(&mutexes[id]);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = 4;
+ return 0;
+
+@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+ return ret;
+
+ td->mutexes[id] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = ret ? 0 : 4;
+ return ret ? -EINTR : 0;
+
+@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+ return ret;
+
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ rt_mutex_unlock(&mutexes[id]);
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ td->mutexes[id] = 0;
+ return 0;
+
+@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
+ break;
+
+ td->mutexes[dat] = 2;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ break;
+
+ default:
+@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
+ return;
+
+ td->mutexes[dat] = 3;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKNOWAIT:
+@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
+ return;
+
+ td->mutexes[dat] = 1;
+- td->event = atomic_add_return(1, &rttest_event);
++ td->event = atomic_add_return_unchecked(1, &rttest_event);
+ return;
+
+ default:
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d93369a..700af59 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -5046,7 +5046,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
+ */
+-long __sched
++long __sched __intentional_overflow(-1)
+ wait_for_completion_interruptible_timeout(struct completion *x,
+ unsigned long timeout)
+ {
+@@ -5063,7 +5063,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ */
+-int __sched wait_for_completion_killable(struct completion *x)
++int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
+ {
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+ if (t == -ERESTARTSYS)
+@@ -5084,7 +5084,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
+ */
+-long __sched
++long __sched __intentional_overflow(-1)
+ wait_for_completion_killable_timeout(struct completion *x,
+ unsigned long timeout)
+ {
+@@ -5293,6 +5293,8 @@ int can_nice(const struct task_struct *p, const int nice)
+ /* convert nice value [19,-20] to rlimit style value [1,40] */
+ int nice_rlim = 20 - nice;
+
++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
++
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+ capable(CAP_SYS_NICE));
+ }
+@@ -5326,7 +5328,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+ if (nice > 19)
+ nice = 19;
+
+- if (increment < 0 && !can_nice(current, nice))
++ if (increment < 0 && (!can_nice(current, nice) ||
++ gr_handle_chroot_nice()))
+ return -EPERM;
+
+ retval = security_task_setnice(current, nice);
+@@ -5483,6 +5486,7 @@ recheck:
+ unsigned long rlim_rtprio =
+ task_rlimit(p, RLIMIT_RTPRIO);
+
++ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
+ /* can't set/change the rt policy */
+ if (policy != p->policy && !rlim_rtprio)
+ return -EPERM;
+@@ -6626,7 +6630,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+
+ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+
+-static struct ctl_table sd_ctl_dir[] = {
++static ctl_table_no_const sd_ctl_dir[] __read_only = {
+ {
+ .procname = "sched_domain",
+ .mode = 0555,
+@@ -6643,17 +6647,17 @@ static struct ctl_table sd_ctl_root[] = {
+ {}
+ };
+
+-static struct ctl_table *sd_alloc_ctl_entry(int n)
++static ctl_table_no_const *sd_alloc_ctl_entry(int n)
+ {
+- struct ctl_table *entry =
++ ctl_table_no_const *entry =
+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+
+ return entry;
+ }
+
+-static void sd_free_ctl_entry(struct ctl_table **tablep)
++static void sd_free_ctl_entry(ctl_table_no_const *tablep)
+ {
+- struct ctl_table *entry;
++ ctl_table_no_const *entry;
+
+ /*
+ * In the intermediate directories, both the child directory and
+@@ -6661,22 +6665,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+ * will always be set. In the lowest directory the names are
+ * static strings and all have proc handlers.
+ */
+- for (entry = *tablep; entry->mode; entry++) {
+- if (entry->child)
+- sd_free_ctl_entry(&entry->child);
++ for (entry = tablep; entry->mode; entry++) {
++ if (entry->child) {
++ sd_free_ctl_entry(entry->child);
++ pax_open_kernel();
++ entry->child = NULL;
++ pax_close_kernel();
++ }
+ if (entry->proc_handler == NULL)
+ kfree(entry->procname);
+ }
+
+- kfree(*tablep);
+- *tablep = NULL;
++ kfree(tablep);
+ }
+
+ static int min_load_idx = 0;
+ static int max_load_idx = CPU_LOAD_IDX_MAX-1;
+
+ static void
+-set_table_entry(struct ctl_table *entry,
++set_table_entry(ctl_table_no_const *entry,
+ const char *procname, void *data, int maxlen,
+ mode_t mode, proc_handler *proc_handler,
+ bool load_idx)
+@@ -6696,7 +6703,7 @@ set_table_entry(struct ctl_table *entry,
+ static struct ctl_table *
+ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ {
+- struct ctl_table *table = sd_alloc_ctl_entry(13);
++ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
+
+ if (table == NULL)
+ return NULL;
+@@ -6731,9 +6738,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ return table;
+ }
+
+-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
+ {
+- struct ctl_table *entry, *table;
++ ctl_table_no_const *entry, *table;
+ struct sched_domain *sd;
+ int domain_num = 0, i;
+ char buf[32];
+@@ -6760,11 +6767,13 @@ static struct ctl_table_header *sd_sysctl_header;
+ static void register_sched_domain_sysctl(void)
+ {
+ int i, cpu_num = num_possible_cpus();
+- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
++ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ char buf[32];
+
+ WARN_ON(sd_ctl_dir[0].child);
++ pax_open_kernel();
+ sd_ctl_dir[0].child = entry;
++ pax_close_kernel();
+
+ if (entry == NULL)
+ return;
+@@ -6787,8 +6796,12 @@ static void unregister_sched_domain_sysctl(void)
+ if (sd_sysctl_header)
+ unregister_sysctl_table(sd_sysctl_header);
+ sd_sysctl_header = NULL;
+- if (sd_ctl_dir[0].child)
+- sd_free_ctl_entry(&sd_ctl_dir[0].child);
++ if (sd_ctl_dir[0].child) {
++ sd_free_ctl_entry(sd_ctl_dir[0].child);
++ pax_open_kernel();
++ sd_ctl_dir[0].child = NULL;
++ pax_close_kernel();
++ }
+ }
+ #else
+ static void register_sched_domain_sysctl(void)
+@@ -6886,7 +6899,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ * happens before everything else. This has to be lower priority than
+ * the notifier in the perf_event subsystem, though.
+ */
+-static struct notifier_block __cpuinitdata migration_notifier = {
++static struct notifier_block migration_notifier = {
+ .notifier_call = migration_call,
+ .priority = CPU_PRI_MIGRATION,
+ };
+diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
+index f280df1..da1281d 100644
+--- a/kernel/sched_autogroup.c
++++ b/kernel/sched_autogroup.c
+@@ -7,7 +7,7 @@
+
+ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+ static struct autogroup autogroup_default;
+-static atomic_t autogroup_seq_nr;
++static atomic_unchecked_t autogroup_seq_nr;
+
+ static void __init autogroup_init(struct task_struct *init_task)
+ {
+@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
+
+ kref_init(&ag->kref);
+ init_rwsem(&ag->lock);
+- ag->id = atomic_inc_return(&autogroup_seq_nr);
++ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
+ ag->tg = tg;
+ #ifdef CONFIG_RT_GROUP_SCHED
+ /*
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 5b9e456..03c74cd 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4803,7 +4803,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+ * run_rebalance_domains is triggered when needed from the scheduler tick.
+ * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
+ */
+-static void run_rebalance_domains(struct softirq_action *h)
++static __latent_entropy void run_rebalance_domains(void)
+ {
+ int this_cpu = smp_processor_id();
+ struct rq *this_rq = cpu_rq(this_cpu);
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 57d4b13..bc84054 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -3,15 +3,353 @@
+ *
+ * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
+ *
+- * This defines a simple but solid secure-computing mode.
++ * Copyright (C) 2012 Google, Inc.
++ * Will Drewry <wad@chromium.org>
++ *
++ * This defines a simple but solid secure-computing facility.
++ *
++ * Mode 1 uses a fixed list of allowed system calls.
++ * Mode 2 allows user-defined system call filters in the form
++ * of Berkeley Packet Filters/Linux Socket Filters.
+ */
+
+-#include <linux/seccomp.h>
+-#include <linux/sched.h>
++#include <linux/atomic.h>
++#include <linux/audit.h>
+ #include <linux/compat.h>
++#include <linux/filter.h>
++#include <linux/ptrace.h>
++#include <linux/sched.h>
++#include <linux/seccomp.h>
++#include <linux/security.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
++#include <asm/syscall.h>
++#endif
+
+ /* #define SECCOMP_DEBUG 1 */
+-#define NR_SECCOMP_MODES 1
++
++#ifdef CONFIG_SECCOMP_FILTER
++/**
++ * struct seccomp_filter - container for seccomp BPF programs
++ *
++ * @usage: reference count to manage the object liftime.
++ * get/put helpers should be used when accessing an instance
++ * outside of a lifetime-guarded section. In general, this
++ * is only needed for handling filters shared across tasks.
++ * @prev: points to a previously installed, or inherited, filter
++ * @len: the number of instructions in the program
++ * @insns: the BPF program instructions to evaluate
++ *
++ * seccomp_filter objects are organized in a tree linked via the @prev
++ * pointer. For any task, it appears to be a singly-linked list starting
++ * with current->seccomp.filter, the most recently attached or inherited filter.
++ * However, multiple filters may share a @prev node, by way of fork(), which
++ * results in a unidirectional tree existing in memory. This is similar to
++ * how namespaces work.
++ *
++ * seccomp_filter objects should never be modified after being attached
++ * to a task_struct (other than @usage).
++ */
++struct seccomp_filter {
++ atomic_t usage;
++ struct seccomp_filter *prev;
++ unsigned short len; /* Instruction count */
++ struct sock_filter insns[];
++};
++
++/* Limit any path through the tree to 256KB worth of instructions. */
++#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
++
++/**
++ * get_u32 - returns a u32 offset into data
++ * @data: a unsigned 64 bit value
++ * @index: 0 or 1 to return the first or second 32-bits
++ *
++ * This inline exists to hide the length of unsigned long.
++ * If a 32-bit unsigned long is passed in, it will be extended
++ * and the top 32-bits will be 0. If it is a 64-bit unsigned
++ * long, then whatever data is resident will be properly returned.
++ */
++static inline u32 get_u32(u64 data, int index)
++{
++ return ((u32 *)&data)[index];
++}
++
++/* Helper for bpf_load below. */
++#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
++/**
++ * bpf_load: checks and returns a pointer to the requested offset
++ * @off: offset into struct seccomp_data to load from
++ *
++ * Returns the requested 32-bits of data.
++ * seccomp_chk_filter() should assure that @off is 32-bit aligned
++ * and not out of bounds. Failure to do so is a BUG.
++ */
++u32 seccomp_bpf_load(int off)
++{
++ struct pt_regs *regs = task_pt_regs(current);
++ if (off == BPF_DATA(nr))
++ return syscall_get_nr(current, regs);
++ if (off == BPF_DATA(arch))
++ return syscall_get_arch(current, regs);
++ if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
++ unsigned long value;
++ int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
++ int index = !!(off % sizeof(u64));
++ syscall_get_arguments(current, regs, arg, 1, &value);
++ return get_u32(value, index);
++ }
++ if (off == BPF_DATA(instruction_pointer))
++ return get_u32(KSTK_EIP(current), 0);
++ if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
++ return get_u32(KSTK_EIP(current), 1);
++ /* seccomp_chk_filter should make this impossible. */
++ BUG();
++}
++
++/**
++ * seccomp_chk_filter - verify seccomp filter code
++ * @filter: filter to verify
++ * @flen: length of filter
++ *
++ * Takes a previously checked filter (by sk_chk_filter) and
++ * redirects all filter code that loads struct sk_buff data
++ * and related data through seccomp_bpf_load. It also
++ * enforces length and alignment checking of those loads.
++ *
++ * Returns 0 if the rule set is legal or -EINVAL if not.
++ */
++static int seccomp_chk_filter(struct sock_filter *filter, unsigned int flen)
++{
++ int pc;
++ for (pc = 0; pc < flen; pc++) {
++ struct sock_filter *ftest = &filter[pc];
++ u16 code = ftest->code;
++ u32 k = ftest->k;
++ switch (code) {
++ case BPF_S_LD_W_ABS:
++ ftest->code = BPF_S_ANC_SECCOMP_LD_W;
++ /* 32-bit aligned and not out of bounds. */
++ if (k >= sizeof(struct seccomp_data) || k & 3)
++ return -EINVAL;
++ continue;
++ case BPF_S_LD_W_LEN:
++ ftest->code = BPF_S_LD_IMM;
++ ftest->k = sizeof(struct seccomp_data);
++ continue;
++ case BPF_S_LDX_W_LEN:
++ ftest->code = BPF_S_LDX_IMM;
++ ftest->k = sizeof(struct seccomp_data);
++ continue;
++ /* Explicitly include allowed calls. */
++ case BPF_S_RET_K:
++ case BPF_S_RET_A:
++ case BPF_S_ALU_ADD_K:
++ case BPF_S_ALU_ADD_X:
++ case BPF_S_ALU_SUB_K:
++ case BPF_S_ALU_SUB_X:
++ case BPF_S_ALU_MUL_K:
++ case BPF_S_ALU_MUL_X:
++ case BPF_S_ALU_DIV_X:
++ case BPF_S_ALU_AND_K:
++ case BPF_S_ALU_AND_X:
++ case BPF_S_ALU_OR_K:
++ case BPF_S_ALU_OR_X:
++ case BPF_S_ALU_LSH_K:
++ case BPF_S_ALU_LSH_X:
++ case BPF_S_ALU_RSH_K:
++ case BPF_S_ALU_RSH_X:
++ case BPF_S_ALU_NEG:
++ case BPF_S_LD_IMM:
++ case BPF_S_LDX_IMM:
++ case BPF_S_MISC_TAX:
++ case BPF_S_MISC_TXA:
++ case BPF_S_ALU_DIV_K:
++ case BPF_S_LD_MEM:
++ case BPF_S_LDX_MEM:
++ case BPF_S_ST:
++ case BPF_S_STX:
++ case BPF_S_JMP_JA:
++ case BPF_S_JMP_JEQ_K:
++ case BPF_S_JMP_JEQ_X:
++ case BPF_S_JMP_JGE_K:
++ case BPF_S_JMP_JGE_X:
++ case BPF_S_JMP_JGT_K:
++ case BPF_S_JMP_JGT_X:
++ case BPF_S_JMP_JSET_K:
++ case BPF_S_JMP_JSET_X:
++ continue;
++ default:
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/**
++ * seccomp_run_filters - evaluates all seccomp filters against @syscall
++ * @syscall: number of the current system call
++ *
++ * Returns valid seccomp BPF response codes.
++ */
++static u32 seccomp_run_filters(int syscall)
++{
++ struct seccomp_filter *f;
++ u32 ret = SECCOMP_RET_ALLOW;
++
++ /* Ensure unexpected behavior doesn't result in failing open. */
++ if (WARN_ON(current->seccomp.filter == NULL))
++ return SECCOMP_RET_KILL;
++
++ /*
++ * All filters are evaluated in order of youngest to oldest. The lowest
++ * BPF return value (ignoring the DATA) always takes priority.
++ */
++ for (f = current->seccomp.filter; f; f = f->prev) {
++ u32 cur_ret = sk_run_filter(NULL, f->insns);
++ if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
++ ret = cur_ret;
++ }
++ return ret;
++}
++
++/**
++ * seccomp_attach_filter: Attaches a seccomp filter to current.
++ * @fprog: BPF program to install
++ *
++ * Returns 0 on success or an errno on failure.
++ */
++static long seccomp_attach_filter(struct sock_fprog *fprog)
++{
++ struct seccomp_filter *filter;
++ unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
++ unsigned long total_insns = fprog->len;
++ long ret;
++
++ if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
++ return -EINVAL;
++
++ for (filter = current->seccomp.filter; filter; filter = filter->prev)
++ total_insns += filter->len + 4; /* include a 4 instr penalty */
++ if (total_insns > MAX_INSNS_PER_PATH)
++ return -ENOMEM;
++
++ /*
++ * Installing a seccomp filter requires that the task have
++ * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
++ * This avoids scenarios where unprivileged tasks can affect the
++ * behavior of privileged children.
++ */
++ if (!current->no_new_privs &&
++ security_real_capable_noaudit(current, current_user_ns(),
++ CAP_SYS_ADMIN) != 0)
++ return -EACCES;
++
++ /* Allocate a new seccomp_filter */
++ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, GFP_KERNEL);
++ if (!filter)
++ return -ENOMEM;
++ atomic_set(&filter->usage, 1);
++ filter->len = fprog->len;
++
++ /* Copy the instructions from fprog. */
++ ret = -EFAULT;
++ if (copy_from_user(filter->insns, fprog->filter, fp_size))
++ goto fail;
++
++ /* Check and rewrite the fprog via the skb checker */
++ ret = sk_chk_filter(filter->insns, filter->len);
++ if (ret)
++ goto fail;
++
++ /* Check and rewrite the fprog for seccomp use */
++ ret = seccomp_chk_filter(filter->insns, filter->len);
++ if (ret)
++ goto fail;
++
++ /*
++ * If there is an existing filter, make it the prev and don't drop its
++ * task reference.
++ */
++ filter->prev = current->seccomp.filter;
++ current->seccomp.filter = filter;
++ return 0;
++fail:
++ kfree(filter);
++ return ret;
++}
++
++/**
++ * seccomp_attach_user_filter - attaches a user-supplied sock_fprog
++ * @user_filter: pointer to the user data containing a sock_fprog.
++ *
++ * Returns 0 on success and non-zero otherwise.
++ */
++long seccomp_attach_user_filter(char __user *user_filter)
++{
++ struct sock_fprog fprog;
++ long ret = -EFAULT;
++
++#ifdef CONFIG_COMPAT
++ if (is_compat_task()) {
++ struct compat_sock_fprog fprog32;
++ if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
++ goto out;
++ fprog.len = fprog32.len;
++ fprog.filter = compat_ptr(fprog32.filter);
++ } else /* falls through to the if below. */
++#endif
++ if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
++ goto out;
++ ret = seccomp_attach_filter(&fprog);
++out:
++ return ret;
++}
++
++/* get_seccomp_filter - increments the reference count of the filter on @tsk */
++void get_seccomp_filter(struct task_struct *tsk)
++{
++ struct seccomp_filter *orig = tsk->seccomp.filter;
++ if (!orig)
++ return;
++ /* Reference count is bounded by the number of total processes. */
++ atomic_inc(&orig->usage);
++}
++
++/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
++void put_seccomp_filter(struct task_struct *tsk)
++{
++ struct seccomp_filter *orig = tsk->seccomp.filter;
++ /* Clean up single-reference branches iteratively. */
++ while (orig && atomic_dec_and_test(&orig->usage)) {
++ struct seccomp_filter *freeme = orig;
++ orig = orig->prev;
++ kfree(freeme);
++ }
++}
++
++/**
++ * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
++ * @syscall: syscall number to send to userland
++ * @reason: filter-supplied reason code to send to userland (via si_errno)
++ *
++ * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
++ */
++static void seccomp_send_sigsys(int syscall, int reason)
++{
++ struct siginfo info;
++ memset(&info, 0, sizeof(info));
++ info.si_signo = SIGSYS;
++ info.si_code = SYS_SECCOMP;
++ info.si_call_addr = (void __user *)KSTK_EIP(current);
++ info.si_errno = reason;
++ info.si_arch = syscall_get_arch(current, task_pt_regs(current));
++ info.si_syscall = syscall;
++ force_sig_info(SIGSYS, &info, current);
++}
++#endif /* CONFIG_SECCOMP_FILTER */
+
+ /*
+ * Secure computing mode 1 allows only read/write/exit/sigreturn.
+@@ -32,11 +370,21 @@ static int mode1_syscalls_32[] = {
+
+ void __secure_computing(int this_syscall)
+ {
++ /* Filter calls should never use this function. */
++ BUG_ON(current->seccomp.mode == SECCOMP_MODE_FILTER);
++ __secure_computing_int(this_syscall);
++}
++
++int __secure_computing_int(int this_syscall)
++{
+ int mode = current->seccomp.mode;
+- int * syscall;
++ int exit_sig = 0;
++ int *syscall;
++ u32 ret = SECCOMP_RET_KILL;
++ int data;
+
+ switch (mode) {
+- case 1:
++ case SECCOMP_MODE_STRICT:
+ syscall = mode1_syscalls;
+ #ifdef CONFIG_COMPAT
+ if (is_compat_task())
+@@ -44,9 +392,44 @@ void __secure_computing(int this_syscall)
+ #endif
+ do {
+ if (*syscall == this_syscall)
+- return;
++ return 0;
+ } while (*++syscall);
++ exit_sig = SIGKILL;
+ break;
++#ifdef CONFIG_SECCOMP_FILTER
++ case SECCOMP_MODE_FILTER:
++ ret = seccomp_run_filters(this_syscall);
++ data = ret & SECCOMP_RET_DATA;
++ switch (ret & SECCOMP_RET_ACTION) {
++ case SECCOMP_RET_ERRNO:
++ /* Set the low-order 16-bits as a errno. */
++ syscall_set_return_value(current, task_pt_regs(current),
++ -data, 0);
++ goto skip;
++ case SECCOMP_RET_TRAP:
++ /* Show the handler the original registers. */
++ syscall_rollback(current, task_pt_regs(current));
++ /* Let the filter pass back 16 bits of data. */
++ seccomp_send_sigsys(this_syscall, data);
++ goto skip;
++ case SECCOMP_RET_TRACE:
++ /* Skip these calls if there is no tracer. */
++ if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP))
++ goto skip;
++ /* Allow the BPF to provide the event message */
++ ptrace_event(PTRACE_EVENT_SECCOMP, data);
++ if (fatal_signal_pending(current))
++ break;
++ return 0;
++ case SECCOMP_RET_ALLOW:
++ return 0;
++ case SECCOMP_RET_KILL:
++ default:
++ break;
++ }
++ exit_sig = SIGSYS;
++ break;
++#endif
+ default:
+ BUG();
+ }
+@@ -54,7 +437,11 @@ void __secure_computing(int this_syscall)
+ #ifdef SECCOMP_DEBUG
+ dump_stack();
+ #endif
+- do_exit(SIGKILL);
++ __audit_seccomp(this_syscall, exit_sig, ret);
++ do_exit(exit_sig);
++skip:
++ audit_seccomp(this_syscall, exit_sig, ret);
++ return -1;
+ }
+
+ long prctl_get_seccomp(void)
+@@ -62,25 +449,48 @@ long prctl_get_seccomp(void)
+ return current->seccomp.mode;
+ }
+
+-long prctl_set_seccomp(unsigned long seccomp_mode)
++/**
++ * prctl_set_seccomp: configures current->seccomp.mode
++ * @seccomp_mode: requested mode to use
++ * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
++ *
++ * This function may be called repeatedly with a @seccomp_mode of
++ * SECCOMP_MODE_FILTER to install additional filters. Every filter
++ * successfully installed will be evaluated (in reverse order) for each system
++ * call the task makes.
++ *
++ * Once current->seccomp.mode is non-zero, it may not be changed.
++ *
++ * Returns 0 on success or -EINVAL on failure.
++ */
++long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
+ {
+- long ret;
++ long ret = -EINVAL;
+
+- /* can set it only once to be even more secure */
+- ret = -EPERM;
+- if (unlikely(current->seccomp.mode))
++ if (current->seccomp.mode &&
++ current->seccomp.mode != seccomp_mode)
+ goto out;
+
+- ret = -EINVAL;
+- if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
+- current->seccomp.mode = seccomp_mode;
+- set_thread_flag(TIF_SECCOMP);
++ switch (seccomp_mode) {
++ case SECCOMP_MODE_STRICT:
++ ret = 0;
+ #ifdef TIF_NOTSC
+ disable_TSC();
+ #endif
+- ret = 0;
++ break;
++#ifdef CONFIG_SECCOMP_FILTER
++ case SECCOMP_MODE_FILTER:
++ ret = seccomp_attach_user_filter(filter);
++ if (ret)
++ goto out;
++ break;
++#endif
++ default:
++ goto out;
+ }
+
+- out:
++ current->seccomp.mode = seccomp_mode;
++ set_thread_flag(TIF_SECCOMP);
++out:
+ return ret;
+ }
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 3ecf574..0541e21 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
+
+ int print_fatal_signals __read_mostly;
+
+-static void __user *sig_handler(struct task_struct *t, int sig)
++static __sighandler_t sig_handler(struct task_struct *t, int sig)
+ {
+ return t->sighand->action[sig - 1].sa.sa_handler;
+ }
+
+-static int sig_handler_ignored(void __user *handler, int sig)
++static int sig_handler_ignored(__sighandler_t handler, int sig)
+ {
+ /* Is it explicitly or implicitly ignored? */
+ return handler == SIG_IGN ||
+@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
+ static int sig_task_ignored(struct task_struct *t, int sig,
+ int from_ancestor_ns)
+ {
+- void __user *handler;
++ __sighandler_t handler;
+
+ handler = sig_handler(t, sig);
+
+@@ -159,7 +159,7 @@ void recalc_sigpending(void)
+
+ #define SYNCHRONOUS_MASK \
+ (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
+- sigmask(SIGTRAP) | sigmask(SIGFPE))
++ sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
+
+ int next_signal(struct sigpending *pending, sigset_t *mask)
+ {
+@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ atomic_inc(&user->sigpending);
+ rcu_read_unlock();
+
++ if (!override_rlimit)
++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
++
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+@@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+
+ int unhandled_signal(struct task_struct *tsk, int sig)
+ {
+- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
+ if (is_global_init(tsk))
+ return 1;
+ if (handler != SIG_IGN && handler != SIG_DFL)
+@@ -812,6 +815,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
+ }
+ }
+
++ /* allow glibc communication via tgkill to other threads in our
++ thread group */
++ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
++ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
++ && gr_handle_signal(t, sig))
++ return -EPERM;
++
+ return security_task_kill(t, info, sig, 0);
+ }
+
+@@ -1162,7 +1172,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+ return send_signal(sig, info, p, 1);
+ }
+
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ return send_signal(sig, info, t, 0);
+@@ -1199,6 +1209,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ unsigned long int flags;
+ int ret, blocked, ignored;
+ struct k_sigaction *action;
++ int is_unhandled = 0;
+
+ spin_lock_irqsave(&t->sighand->siglock, flags);
+ action = &t->sighand->action[sig-1];
+@@ -1213,9 +1224,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ }
+ if (action->sa.sa_handler == SIG_DFL)
+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
++ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
++ is_unhandled = 1;
+ ret = specific_send_sig_info(sig, info, t);
+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
++ /* only deal with unhandled signals, java etc trigger SIGSEGV during
++ normal operation */
++ if (is_unhandled) {
++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
++ gr_handle_crash(t, sig);
++ }
++
+ return ret;
+ }
+
+@@ -1282,8 +1302,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+ ret = check_kill_permission(sig, info, p);
+ rcu_read_unlock();
+
+- if (!ret && sig)
++ if (!ret && sig) {
+ ret = do_send_sig_info(sig, info, p, true);
++ if (!ret)
++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
++ }
+
+ return ret;
+ }
+@@ -2631,6 +2654,13 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
+ err |= __put_user(from->si_uid, &to->si_uid);
+ err |= __put_user(from->si_ptr, &to->si_ptr);
+ break;
++#ifdef __ARCH_SIGSYS
++ case __SI_SYS:
++ err |= __put_user(from->si_call_addr, &to->si_call_addr);
++ err |= __put_user(from->si_syscall, &to->si_syscall);
++ err |= __put_user(from->si_arch, &to->si_arch);
++ break;
++#endif
+ default: /* this is just in case for now ... */
+ err |= __put_user(from->si_pid, &to->si_pid);
+ err |= __put_user(from->si_uid, &to->si_uid);
+@@ -2765,7 +2795,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+ int error = -ESRCH;
+
+ rcu_read_lock();
+- p = find_task_by_vpid(pid);
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ /* allow glibc communication via tgkill to other threads in our
++ thread group */
++ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
++ sig == (SIGRTMIN+1) && tgid == info->si_pid)
++ p = find_task_by_vpid_unrestricted(pid);
++ else
++#endif
++ p = find_task_by_vpid(pid);
+ if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
+ error = check_kill_permission(sig, info, p);
+ /*
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 9e800b2..1533ba5 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -75,7 +75,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
++static struct notifier_block hotplug_cfd_notifier = {
+ .notifier_call = hotplug_cfd,
+ };
+
+@@ -591,22 +591,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
+ }
+ EXPORT_SYMBOL(smp_call_function);
+
+-void ipi_call_lock(void)
++void ipi_call_lock(void) __acquires(call_function.lock)
+ {
+ raw_spin_lock(&call_function.lock);
+ }
+
+-void ipi_call_unlock(void)
++void ipi_call_unlock(void) __releases(call_function.lock)
+ {
+ raw_spin_unlock(&call_function.lock);
+ }
+
+-void ipi_call_lock_irq(void)
++void ipi_call_lock_irq(void) __acquires(call_function.lock)
+ {
+ raw_spin_lock_irq(&call_function.lock);
+ }
+
+-void ipi_call_unlock_irq(void)
++void ipi_call_unlock_irq(void) __releases(call_function.lock)
+ {
+ raw_spin_unlock_irq(&call_function.lock);
+ }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 2c71d91..6b690a4 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -52,11 +52,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
+ EXPORT_SYMBOL(irq_stat);
+ #endif
+
+-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
++static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+
+-char *softirq_to_name[NR_SOFTIRQS] = {
++const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+@@ -235,7 +235,7 @@ restart:
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+- h->action(h);
++ h->action();
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ printk(KERN_ERR "huh, entered softirq %u %s %p"
+@@ -385,7 +385,7 @@ void raise_softirq(unsigned int nr)
+ local_irq_restore(flags);
+ }
+
+-void open_softirq(int nr, void (*action)(struct softirq_action *))
++void __init open_softirq(int nr, void (*action)(void))
+ {
+ softirq_vec[nr].action = action;
+ }
+@@ -441,7 +441,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++static __latent_entropy void tasklet_action(void)
+ {
+ struct tasklet_struct *list;
+
+@@ -476,7 +476,7 @@ static void tasklet_action(struct softirq_action *a)
+ }
+ }
+
+-static void tasklet_hi_action(struct softirq_action *a)
++static __latent_entropy void tasklet_hi_action(void)
+ {
+ struct tasklet_struct *list;
+
+@@ -712,7 +712,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
++static struct notifier_block remote_softirq_cpu_notifier = {
+ .notifier_call = remote_softirq_cpu_notify,
+ };
+
+@@ -894,7 +894,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata cpu_nfb = {
++static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+ };
+
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 2f194e9..2c05ea9 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
+ * cpu notifiers. It currently shares the same priority as sched
+ * migration_notifier.
+ */
+-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
++static struct notifier_block cpu_stop_cpu_notifier = {
+ .notifier_call = cpu_stop_cpu_callback,
+ .priority = 10,
+ };
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 9d557df..b2a5319 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
+ error = -EACCES;
+ goto out;
+ }
++
++ if (gr_handle_chroot_setpriority(p, niceval)) {
++ error = -EACCES;
++ goto out;
++ }
++
+ no_nice = security_task_setnice(p, niceval);
+ if (no_nice) {
+ error = no_nice;
+@@ -597,6 +603,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+ goto error;
+ }
+
++ if (gr_check_group_change(new->gid, new->egid, -1))
++ goto error;
++
+ if (rgid != (gid_t) -1 ||
+ (egid != (gid_t) -1 && egid != old->gid))
+ new->sgid = new->egid;
+@@ -626,6 +635,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+ old = current_cred();
+
+ retval = -EPERM;
++
++ if (gr_check_group_change(gid, gid, gid))
++ goto error;
++
+ if (nsown_capable(CAP_SETGID))
+ new->gid = new->egid = new->sgid = new->fsgid = gid;
+ else if (gid == old->gid || gid == old->sgid)
+@@ -643,7 +656,7 @@ error:
+ /*
+ * change the user struct in a credentials set to match the new UID
+ */
+-static int set_user(struct cred *new)
++int set_user(struct cred *new)
+ {
+ struct user_struct *new_user;
+
+@@ -713,6 +726,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+ goto error;
+ }
+
++ if (gr_check_user_change(new->uid, new->euid, -1))
++ goto error;
++
+ if (new->uid != old->uid) {
+ retval = set_user(new);
+ if (retval < 0)
+@@ -757,6 +773,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+ old = current_cred();
+
+ retval = -EPERM;
++
++ if (gr_check_crash_uid(uid))
++ goto error;
++ if (gr_check_user_change(uid, uid, uid))
++ goto error;
++
+ if (nsown_capable(CAP_SETUID)) {
+ new->suid = new->uid = uid;
+ if (uid != old->uid) {
+@@ -811,6 +833,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+ goto error;
+ }
+
++ if (gr_check_user_change(ruid, euid, -1))
++ goto error;
++
+ if (ruid != (uid_t) -1) {
+ new->uid = ruid;
+ if (ruid != old->uid) {
+@@ -875,6 +900,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+ goto error;
+ }
+
++ if (gr_check_group_change(rgid, egid, -1))
++ goto error;
++
+ if (rgid != (gid_t) -1)
+ new->gid = rgid;
+ if (egid != (gid_t) -1)
+@@ -925,12 +953,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ uid == old->suid || uid == old->fsuid ||
+ nsown_capable(CAP_SETUID)) {
+ if (uid != old_fsuid) {
++ if (gr_check_user_change(-1, -1, uid))
++ goto error;
++
+ new->fsuid = uid;
+ if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+ goto change_okay;
+ }
+ }
+
++error:
+ abort_creds(new);
+ return old_fsuid;
+
+@@ -957,12 +989,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+ if (gid == old->gid || gid == old->egid ||
+ gid == old->sgid || gid == old->fsgid ||
+ nsown_capable(CAP_SETGID)) {
++ if (gr_check_group_change(-1, -1, gid))
++ goto error;
++
+ if (gid != old_fsgid) {
+ new->fsgid = gid;
+ goto change_okay;
+ }
+ }
+
++error:
+ abort_creds(new);
+ return old_fsgid;
+
+@@ -1270,19 +1306,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+ return -EFAULT;
+
+ down_read(&uts_sem);
+- error = __copy_to_user(&name->sysname, &utsname()->sysname,
++ error = __copy_to_user(name->sysname, &utsname()->sysname,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
++ error |= __copy_to_user(name->nodename, &utsname()->nodename,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->release, &utsname()->release,
++ error |= __copy_to_user(name->release, &utsname()->release,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->release + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->version, &utsname()->version,
++ error |= __copy_to_user(name->version, &utsname()->version,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->version + __OLD_UTS_LEN);
+- error |= __copy_to_user(&name->machine, &utsname()->machine,
++ error |= __copy_to_user(name->machine, &utsname()->machine,
+ __OLD_UTS_LEN);
+ error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+ up_read(&uts_sem);
+@@ -1484,6 +1520,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
+ */
+ new_rlim->rlim_cur = 1;
+ }
++ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
++ is changed to a lower value. Since tasks can be created by the same
++ user in between this limit change and an execve by this task, force
++ a recheck only for this task by setting PF_NPROC_EXCEEDED
++ */
++ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
++ tsk->flags |= PF_NPROC_EXCEEDED;
+ }
+ if (!retval) {
+ if (old_rlim)
+@@ -1747,7 +1790,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ error = get_dumpable(me->mm);
+ break;
+ case PR_SET_DUMPABLE:
+- if (arg2 < 0 || arg2 > 1) {
++ if (arg2 > 1) {
+ error = -EINVAL;
+ break;
+ }
+@@ -1808,7 +1851,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ error = prctl_get_seccomp();
+ break;
+ case PR_SET_SECCOMP:
+- error = prctl_set_seccomp(arg2);
++ error = prctl_set_seccomp(arg2, (char __user *)arg3);
+ break;
+ case PR_GET_TSC:
+ error = GET_TSC_CTL(arg2);
+@@ -1868,6 +1911,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ else
+ error = PR_MCE_KILL_DEFAULT;
+ break;
++ case PR_SET_NO_NEW_PRIVS:
++ if (arg2 != 1 || arg3 || arg4 || arg5)
++ return -EINVAL;
++
++ current->no_new_privs = 1;
++ break;
++ case PR_GET_NO_NEW_PRIVS:
++ if (arg2 || arg3 || arg4 || arg5)
++ return -EINVAL;
++ return current->no_new_privs ? 1 : 0;
+ default:
+ error = -EINVAL;
+ break;
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index ea7ec7f..798623e 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -86,6 +86,13 @@
+
+
+ #if defined(CONFIG_SYSCTL)
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++ const int op);
++extern int gr_handle_chroot_sysctl(const int op);
+
+ /* External variables not in a header file. */
+ extern int sysctl_overcommit_memory;
+@@ -112,18 +119,18 @@ extern int blk_iopoll_enabled;
+
+ /* Constants used for minimum and maximum */
+ #ifdef CONFIG_LOCKUP_DETECTOR
+-static int sixty = 60;
+-static int neg_one = -1;
++static int sixty __read_only = 60;
+ #endif
+
+-static int zero;
+-static int __maybe_unused one = 1;
+-static int __maybe_unused two = 2;
+-static int __maybe_unused three = 3;
+-static unsigned long one_ul = 1;
+-static int one_hundred = 100;
++static int neg_one __read_only = -1;
++static int zero __read_only = 0;
++static int __maybe_unused one __read_only = 1;
++static int __maybe_unused two __read_only = 2;
++static int __maybe_unused three __read_only = 3;
++static unsigned long one_ul __read_only = 1;
++static int one_hundred __read_only = 100;
+ #ifdef CONFIG_PRINTK
+-static int ten_thousand = 10000;
++static int ten_thousand __read_only = 10000;
+ #endif
+
+ /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
+@@ -165,10 +172,13 @@ static int proc_taint(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+ #endif
+
+-#ifdef CONFIG_PRINTK
+ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+-#endif
++
++static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos);
++static int proc_dostring_coredump(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+ /* Note: sysrq code uses it's own private copy */
+@@ -191,6 +201,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
+ }
+
+ #endif
++extern struct ctl_table grsecurity_table[];
+
+ static struct ctl_table root_table[];
+ static struct ctl_table_root sysctl_table_root;
+@@ -220,6 +231,20 @@ extern struct ctl_table epoll_table[];
+ int sysctl_legacy_va_layout;
+ #endif
+
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++ {
++ .procname = "softmode",
++ .data = &pax_softmode,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++
++ { }
++};
++#endif
++
+ /* The default sysctl tables: */
+
+ static struct ctl_table root_table[] = {
+@@ -266,6 +291,22 @@ static int max_extfrag_threshold = 1000;
+ #endif
+
+ static struct ctl_table kern_table[] = {
++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
++ {
++ .procname = "grsecurity",
++ .mode = 0500,
++ .child = grsecurity_table,
++ },
++#endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {
++ .procname = "pax",
++ .mode = 0500,
++ .child = pax_table,
++ },
++#endif
++
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+@@ -420,7 +461,7 @@ static struct ctl_table kern_table[] = {
+ .data = core_pattern,
+ .maxlen = CORENAME_MAX_SIZE,
+ .mode = 0644,
+- .proc_handler = proc_dostring,
++ .proc_handler = proc_dostring_coredump,
+ },
+ {
+ .procname = "core_pipe_limit",
+@@ -550,7 +591,7 @@ static struct ctl_table kern_table[] = {
+ .data = &modprobe_path,
+ .maxlen = KMOD_PATH_LEN,
+ .mode = 0644,
+- .proc_handler = proc_dostring,
++ .proc_handler = proc_dostring_modpriv,
+ },
+ {
+ .procname = "modules_disabled",
+@@ -717,16 +758,20 @@ static struct ctl_table kern_table[] = {
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
++#endif
+ {
+ .procname = "kptr_restrict",
+ .data = &kptr_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_sysadmin,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ .extra1 = &two,
++#else
+ .extra1 = &zero,
++#endif
+ .extra2 = &two,
+ },
+-#endif
+ {
+ .procname = "ngroups_max",
+ .data = &ngroups_max,
+@@ -957,10 +1002,17 @@ static struct ctl_table kern_table[] = {
+ */
+ {
+ .procname = "perf_event_paranoid",
+- .data = &sysctl_perf_event_paranoid,
+- .maxlen = sizeof(sysctl_perf_event_paranoid),
++ .data = &sysctl_perf_event_legitimately_concerned,
++ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
+ .mode = 0644,
+- .proc_handler = proc_dointvec,
++ /* go ahead, be a hero */
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &neg_one,
++#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
++ .extra2 = &three,
++#else
++ .extra2 = &two,
++#endif
+ },
+ {
+ .procname = "perf_event_mlock_kb",
+@@ -1216,6 +1268,13 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
++ {
++ .procname = "heap_stack_gap",
++ .data = &sysctl_heap_stack_gap,
++ .maxlen = sizeof(sysctl_heap_stack_gap),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ #else
+ {
+ .procname = "nr_trim_pages",
+@@ -1499,7 +1558,7 @@ static struct ctl_table fs_table[] = {
+ .data = &suid_dumpable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_dointvec_minmax_coredump,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
+@@ -1720,6 +1779,17 @@ static int test_perm(int mode, int op)
+ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
+ {
+ int mode;
++ int error;
++
++ if (table->parent != NULL && table->parent->procname != NULL &&
++ table->procname != NULL &&
++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
++ return -EACCES;
++ if (gr_handle_chroot_sysctl(op))
++ return -EACCES;
++ error = gr_handle_sysctl(table, op);
++ if (error)
++ return error;
+
+ if (root->permissions)
+ mode = root->permissions(root, current->nsproxy, table);
+@@ -1732,7 +1802,9 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
+ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
+ {
+ for (; table->procname; table++) {
+- table->parent = parent;
++ pax_open_kernel();
++ *(void **)&table->parent = (ctl_table_no_const *)parent;
++ pax_close_kernel();
+ if (table->child)
+ sysctl_set_parent(table, table->child);
+ }
+@@ -1856,7 +1928,8 @@ struct ctl_table_header *__register_sysctl_paths(
+ const struct ctl_path *path, struct ctl_table *table)
+ {
+ struct ctl_table_header *header;
+- struct ctl_table *new, **prevp;
++ struct ctl_table **prevp;
++ ctl_table_no_const *new;
+ unsigned int n, npath;
+ struct ctl_table_set *set;
+
+@@ -1877,7 +1950,7 @@ struct ctl_table_header *__register_sysctl_paths(
+ if (!header)
+ return NULL;
+
+- new = (struct ctl_table *) (header + 1);
++ new = (ctl_table_no_const *) (header + 1);
+
+ /* Now connect the dots */
+ prevp = &header->ctl_table;
+@@ -2124,6 +2197,16 @@ int proc_dostring(struct ctl_table *table, int write,
+ buffer, lenp, ppos);
+ }
+
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ if (write && !capable(CAP_SYS_MODULE))
++ return -EPERM;
++
++ return _proc_do_string(table->data, table->maxlen, write,
++ buffer, lenp, ppos);
++}
++
+ static size_t proc_skip_spaces(char **buf)
+ {
+ size_t ret;
+@@ -2229,6 +2312,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
+ len = strlen(tmp);
+ if (len > *size)
+ len = *size;
++ if (len > sizeof(tmp))
++ len = sizeof(tmp);
+ if (copy_to_user(*buf, tmp, len))
+ return -EFAULT;
+ *size -= len;
+@@ -2393,7 +2478,7 @@ int proc_dointvec(struct ctl_table *table, int write,
+ static int proc_taint(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table t;
++ ctl_table_no_const t;
+ unsigned long tmptaint = get_taint();
+ int err;
+
+@@ -2421,7 +2506,6 @@ static int proc_taint(struct ctl_table *table, int write,
+ return err;
+ }
+
+-#ifdef CONFIG_PRINTK
+ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -2430,7 +2514,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ }
+-#endif
+
+ struct do_proc_dointvec_minmax_conv_param {
+ int *min;
+@@ -2488,6 +2571,34 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
+ do_proc_dointvec_minmax_conv, &param);
+ }
+
++static void validate_coredump_safety(void)
++{
++ if (suid_dumpable == SUID_DUMPABLE_SAFE &&
++ core_pattern[0] != '/' && core_pattern[0] != '|') {
++ printk(KERN_WARNING "Unsafe core_pattern used with "\
++ "suid_dumpable=2. Pipe handler or fully qualified "\
++ "core dump path required.\n");
++ }
++}
++
++static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++ if (!error)
++ validate_coredump_safety();
++ return error;
++}
++
++static int proc_dostring_coredump(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ int error = proc_dostring(table, write, buffer, lenp, ppos);
++ if (!error)
++ validate_coredump_safety();
++ return error;
++}
++
+ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+@@ -2545,8 +2656,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
+ *i = val;
+ } else {
+ val = convdiv * (*i) / convmul;
+- if (!first)
++ if (!first) {
+ err = proc_put_char(&buffer, &left, '\t');
++ if (err)
++ break;
++ }
+ err = proc_put_long(&buffer, &left, val, false);
+ if (err)
+ break;
+@@ -2941,6 +3055,12 @@ int proc_dostring(struct ctl_table *table, int write,
+ return -ENOSYS;
+ }
+
++int proc_dostring_modpriv(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ return -ENOSYS;
++}
++
+ int proc_dointvec(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -2997,6 +3117,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+ EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
+ EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
+ EXPORT_SYMBOL(proc_dostring);
++EXPORT_SYMBOL(proc_dostring_modpriv);
+ EXPORT_SYMBOL(proc_doulongvec_minmax);
+ EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
+ EXPORT_SYMBOL(register_sysctl_table);
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index 9f9aa32..d0c4f42 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
+ int i;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
+ }
+
+ set_fs(KERNEL_DS);
+- result = vfs_write(file, buffer, str - buffer, &pos);
++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
+ int i;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
+ }
+
+ set_fs(KERNEL_DS);
+- result = vfs_write(file, buffer, str - buffer, &pos);
++ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out_kfree;
+@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
+ int i;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
+ __le16 dnaddr;
+
+ set_fs(KERNEL_DS);
+- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
++ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
+ le16_to_cpu(dnaddr) & 0x3ff);
+
+ set_fs(KERNEL_DS);
+- result = vfs_write(file, buf, len, &pos);
++ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
+ set_fs(old_fs);
+ if (result < 0)
+ goto out;
+diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
+index 362da65..ab8ef8c 100644
+--- a/kernel/sysctl_check.c
++++ b/kernel/sysctl_check.c
+@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
+ set_fail(&fail, table, "Directory with extra2");
+ } else {
+ if ((table->proc_handler == proc_dostring) ||
++ (table->proc_handler == proc_dostring_modpriv) ||
+ (table->proc_handler == proc_dointvec) ||
+ (table->proc_handler == proc_dointvec_minmax) ||
+ (table->proc_handler == proc_dointvec_jiffies) ||
+diff --git a/kernel/taskstats.c b/kernel/taskstats.c
+index e660464..c8b9e67 100644
+--- a/kernel/taskstats.c
++++ b/kernel/taskstats.c
+@@ -27,9 +27,12 @@
+ #include <linux/cgroup.h>
+ #include <linux/fs.h>
+ #include <linux/file.h>
++#include <linux/grsecurity.h>
+ #include <net/genetlink.h>
+ #include <linux/atomic.h>
+
++extern int gr_is_taskstats_denied(int pid);
++
+ /*
+ * Maximum length of a cpumask that can be specified in
+ * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
+@@ -556,6 +559,9 @@ err:
+
+ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+ {
++ if (gr_is_taskstats_denied(current->pid))
++ return -EACCES;
++
+ if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
+ return cmd_attr_register_cpumask(info);
+ else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
+diff --git a/kernel/time.c b/kernel/time.c
+index 73e416d..cfc6f69 100644
+--- a/kernel/time.c
++++ b/kernel/time.c
+@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
+ return error;
+
+ if (tz) {
++ /* we log in do_settimeofday called below, so don't log twice
++ */
++ if (!tv)
++ gr_log_timechange();
++
+ /* SMP safe, global irq locking makes it work. */
+ sys_tz = *tz;
+ update_vsyscall_tz();
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 0907e43..56a6a92 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
+ struct platform_device *pdev;
+ int error = 0;
+ int i;
+- struct k_clock alarm_clock = {
++ static struct k_clock alarm_clock = {
+ .clock_getres = alarm_clock_getres,
+ .clock_get = alarm_clock_get,
+ .timer_create = alarm_timer_create,
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index c3509fb..ebec319 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -120,7 +120,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
+ * then clear the broadcast bit.
+ */
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
+- int cpu = smp_processor_id();
++ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ tick_broadcast_clear_oneshot(cpu);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index cb7f33e..7504d61 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -14,6 +14,7 @@
+ #include <linux/init.h>
+ #include <linux/mm.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/clocksource.h>
+ #include <linux/jiffies.h>
+@@ -385,6 +386,8 @@ int do_settimeofday(const struct timespec *tv)
+ if (!timespec_valid_strict(tv))
+ return -EINVAL;
+
++ gr_log_timechange();
++
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+ timekeeping_forward_now();
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index 3258455..f35227d 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
+
+ static void print_name_offset(struct seq_file *m, void *sym)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ SEQ_printf(m, "<%p>", NULL);
++#else
+ char symname[KSYM_NAME_LEN];
+
+ if (lookup_symbol_name((unsigned long)sym, symname) < 0)
+ SEQ_printf(m, "<%pK>", sym);
+ else
+ SEQ_printf(m, "%s", symname);
++#endif
+ }
+
+ static void
+@@ -112,7 +116,11 @@ next_one:
+ static void
+ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ SEQ_printf(m, " .base: %p\n", NULL);
++#else
+ SEQ_printf(m, " .base: %pK\n", base);
++#endif
+ SEQ_printf(m, " .index: %d\n",
+ base->index);
+ SEQ_printf(m, " .resolution: %Lu nsecs\n",
+@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
+ {
+ struct proc_dir_entry *pe;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
++#else
+ pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
++#endif
+ if (!pe)
+ return -ENOMEM;
+ return 0;
+diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
+index 0b537f2..40d6c20 100644
+--- a/kernel/time/timer_stats.c
++++ b/kernel/time/timer_stats.c
+@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
+ static unsigned long nr_entries;
+ static struct entry entries[MAX_ENTRIES];
+
+-static atomic_t overflow_count;
++static atomic_unchecked_t overflow_count;
+
+ /*
+ * The entries are in a hash-table, for fast lookup:
+@@ -140,7 +140,7 @@ static void reset_entries(void)
+ nr_entries = 0;
+ memset(entries, 0, sizeof(entries));
+ memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
+- atomic_set(&overflow_count, 0);
++ atomic_set_unchecked(&overflow_count, 0);
+ }
+
+ static struct entry *alloc_entry(void)
+@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ if (likely(entry))
+ entry->count++;
+ else
+- atomic_inc(&overflow_count);
++ atomic_inc_unchecked(&overflow_count);
+
+ out_unlock:
+ raw_spin_unlock_irqrestore(lock, flags);
+@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+
+ static void print_name_offset(struct seq_file *m, unsigned long addr)
+ {
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(m, "<%p>", NULL);
++#else
+ char symname[KSYM_NAME_LEN];
+
+ if (lookup_symbol_name(addr, symname) < 0)
+- seq_printf(m, "<%p>", (void *)addr);
++ seq_printf(m, "<%pK>", (void *)addr);
+ else
+ seq_printf(m, "%s", symname);
++#endif
+ }
+
+ static int tstats_show(struct seq_file *m, void *v)
+@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
+
+ seq_puts(m, "Timer Stats Version: v0.2\n");
+ seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
+- if (atomic_read(&overflow_count))
++ if (atomic_read_unchecked(&overflow_count))
+ seq_printf(m, "Overflow: %d entries\n",
+- atomic_read(&overflow_count));
++ atomic_read_unchecked(&overflow_count));
+
+ for (i = 0; i < nr_entries; i++) {
+ entry = entries + i;
+@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
+ {
+ struct proc_dir_entry *pe;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
++#else
+ pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
++#endif
+ if (!pe)
+ return -ENOMEM;
+ return 0;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index f8b05a4..ece06b3 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1308,7 +1308,7 @@ void update_process_times(int user_tick)
+ /*
+ * This function runs timers and the timer-tq in bottom half context.
+ */
+-static void run_timer_softirq(struct softirq_action *h)
++static __latent_entropy void run_timer_softirq(void)
+ {
+ struct tvec_base *base = __this_cpu_read(tvec_bases);
+
+@@ -1435,7 +1435,7 @@ static void process_timeout(unsigned long __data)
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
+-signed long __sched schedule_timeout(signed long timeout)
++signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
+ {
+ struct timer_list timer;
+ unsigned long expire;
+@@ -1727,7 +1727,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata timers_nb = {
++static struct notifier_block timers_nb = {
+ .notifier_call = timer_cpu_notify,
+ };
+
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 16fc34a..efd8bb8 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+ struct blk_trace *bt = filp->private_data;
+ char buf[16];
+
+- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
++ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ }
+@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+ return 1;
+
+ bt = buf->chan->private_data;
+- atomic_inc(&bt->dropped);
++ atomic_inc_unchecked(&bt->dropped);
+ return 0;
+ }
+
+@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+
+ bt->dir = dir;
+ bt->dev = dev;
+- atomic_set(&bt->dropped, 0);
++ atomic_set_unchecked(&bt->dropped, 0);
+
+ ret = -EIO;
+ bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index d40d7f6..b4e9662 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1598,12 +1598,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+ if (unlikely(ftrace_disabled))
+ return 0;
+
++ ret = ftrace_arch_code_modify_prepare();
++ FTRACE_WARN_ON(ret);
++ if (ret)
++ return 0;
++
+ ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
+ if (ret) {
+ ftrace_bug(ret, ip);
+- return 0;
+ }
+- return 1;
++ return ret ? 0 : 1;
+ }
+
+ /*
+@@ -2665,7 +2670,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+
+ int
+ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+- void *data)
++ void *data)
+ {
+ struct ftrace_func_probe *entry;
+ struct ftrace_page *pg;
+@@ -4034,8 +4039,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+ static int ftrace_graph_active;
+-static struct notifier_block ftrace_suspend_notifier;
+-
+ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ {
+ return 0;
+@@ -4179,6 +4182,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+ return NOTIFY_DONE;
+ }
+
++static struct notifier_block ftrace_suspend_notifier = {
++ .notifier_call = ftrace_suspend_notifier_call
++};
++
+ /* Just a place holder for function graph */
+ static struct ftrace_ops fgraph_ops __read_mostly = {
+ .func = ftrace_stub,
+@@ -4198,7 +4205,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ goto out;
+ }
+
+- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
+ register_pm_notifier(&ftrace_suspend_notifier);
+
+ ftrace_graph_active++;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 6fdc629..55739fe 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -376,9 +376,9 @@ struct buffer_data_page {
+ */
+ struct buffer_page {
+ struct list_head list; /* list of buffer pages */
+- local_t write; /* index for next write */
++ local_unchecked_t write; /* index for next write */
+ unsigned read; /* index for next read */
+- local_t entries; /* entries on this page */
++ local_unchecked_t entries; /* entries on this page */
+ unsigned long real_end; /* real end of data */
+ struct buffer_data_page *page; /* Actual data page */
+ };
+@@ -489,8 +489,8 @@ struct ring_buffer_per_cpu {
+ unsigned long lost_events;
+ unsigned long last_overrun;
+ local_t entries_bytes;
+- local_t commit_overrun;
+- local_t overrun;
++ local_unchecked_t commit_overrun;
++ local_unchecked_t overrun;
+ local_t entries;
+ local_t committing;
+ local_t commits;
+@@ -884,8 +884,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ *
+ * We add a counter to the write field to denote this.
+ */
+- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
++ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
++ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
+
+ /*
+ * Just make sure we have seen our old_write and synchronize
+@@ -913,8 +913,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ * cmpxchg to only update if an interrupt did not already
+ * do it for us. If the cmpxchg fails, we don't care.
+ */
+- (void)local_cmpxchg(&next_page->write, old_write, val);
+- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
++ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
++ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
+
+ /*
+ * No need to worry about races with clearing out the commit.
+@@ -1481,7 +1481,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
+
+ static inline unsigned long rb_page_write(struct buffer_page *bpage)
+ {
+- return local_read(&bpage->write) & RB_WRITE_MASK;
++ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
+ }
+
+ static inline unsigned rb_page_commit(struct buffer_page *bpage)
+@@ -1491,7 +1491,7 @@ static inline unsigned rb_page_commit(struct buffer_page *bpage)
+
+ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
+ {
+- return local_read(&bpage->entries) & RB_WRITE_MASK;
++ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
+ }
+
+ /* Size is determined by what has been committed */
+@@ -1709,7 +1709,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ * it is our responsibility to update
+ * the counters.
+ */
+- local_add(entries, &cpu_buffer->overrun);
++ local_add_unchecked(entries, &cpu_buffer->overrun);
+ local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+
+ /*
+@@ -1859,7 +1859,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ if (tail == BUF_PAGE_SIZE)
+ tail_page->real_end = 0;
+
+- local_sub(length, &tail_page->write);
++ local_sub_unchecked(length, &tail_page->write);
+ return;
+ }
+
+@@ -1894,7 +1894,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ rb_event_set_padding(event);
+
+ /* Set the write back to the previous setting */
+- local_sub(length, &tail_page->write);
++ local_sub_unchecked(length, &tail_page->write);
+ return;
+ }
+
+@@ -1906,7 +1906,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+
+ /* Set write to end of buffer */
+ length = (tail + length) - BUF_PAGE_SIZE;
+- local_sub(length, &tail_page->write);
++ local_sub_unchecked(length, &tail_page->write);
+ }
+
+ /*
+@@ -1932,7 +1932,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ * about it.
+ */
+ if (unlikely(next_page == commit_page)) {
+- local_inc(&cpu_buffer->commit_overrun);
++ local_inc_unchecked(&cpu_buffer->commit_overrun);
+ goto out_reset;
+ }
+
+@@ -1986,7 +1986,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ cpu_buffer->tail_page) &&
+ (cpu_buffer->commit_page ==
+ cpu_buffer->reader_page))) {
+- local_inc(&cpu_buffer->commit_overrun);
++ local_inc_unchecked(&cpu_buffer->commit_overrun);
+ goto out_reset;
+ }
+ }
+@@ -2034,7 +2034,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ length += RB_LEN_TIME_EXTEND;
+
+ tail_page = cpu_buffer->tail_page;
+- write = local_add_return(length, &tail_page->write);
++ write = local_add_return_unchecked(length, &tail_page->write);
+
+ /* set write to only the index of the write */
+ write &= RB_WRITE_MASK;
+@@ -2051,7 +2051,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ kmemcheck_annotate_bitfield(event, bitfield);
+ rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
+
+- local_inc(&tail_page->entries);
++ local_inc_unchecked(&tail_page->entries);
+
+ /*
+ * If this is the first commit on the page, then update
+@@ -2084,7 +2084,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+
+ if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+ unsigned long write_mask =
+- local_read(&bpage->write) & ~RB_WRITE_MASK;
++ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
+ unsigned long event_length = rb_event_length(event);
+ /*
+ * This is on the tail page. It is possible that
+@@ -2094,7 +2094,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ */
+ old_index += write_mask;
+ new_index += write_mask;
+- index = local_cmpxchg(&bpage->write, old_index, new_index);
++ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
+ if (index == old_index) {
+ /* update counters */
+ local_sub(event_length, &cpu_buffer->entries_bytes);
+@@ -2433,7 +2433,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+
+ /* Do the likely case first */
+ if (likely(bpage->page == (void *)addr)) {
+- local_dec(&bpage->entries);
++ local_dec_unchecked(&bpage->entries);
+ return;
+ }
+
+@@ -2445,7 +2445,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+ start = bpage;
+ do {
+ if (bpage->page == (void *)addr) {
+- local_dec(&bpage->entries);
++ local_dec_unchecked(&bpage->entries);
+ return;
+ }
+ rb_inc_page(cpu_buffer, &bpage);
+@@ -2670,7 +2670,7 @@ static inline unsigned long
+ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+ return local_read(&cpu_buffer->entries) -
+- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
++ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
+ }
+
+ /**
+@@ -2758,7 +2758,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+- ret = local_read(&cpu_buffer->overrun);
++ ret = local_read_unchecked(&cpu_buffer->overrun);
+
+ return ret;
+ }
+@@ -2779,7 +2779,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+- ret = local_read(&cpu_buffer->commit_overrun);
++ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
+
+ return ret;
+ }
+@@ -2824,7 +2824,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+ /* if you care about this being correct, lock the buffer */
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+- overruns += local_read(&cpu_buffer->overrun);
++ overruns += local_read_unchecked(&cpu_buffer->overrun);
+ }
+
+ return overruns;
+@@ -2996,8 +2996,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ /*
+ * Reset the reader page to size zero.
+ */
+- local_set(&cpu_buffer->reader_page->write, 0);
+- local_set(&cpu_buffer->reader_page->entries, 0);
++ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
++ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
+ cpu_buffer->reader_page->real_end = 0;
+
+@@ -3031,7 +3031,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ * want to compare with the last_overrun.
+ */
+ smp_mb();
+- overwrite = local_read(&(cpu_buffer->overrun));
++ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
+
+ /*
+ * Here's the tricky part.
+@@ -3579,8 +3579,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+
+ cpu_buffer->head_page
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
+- local_set(&cpu_buffer->head_page->write, 0);
+- local_set(&cpu_buffer->head_page->entries, 0);
++ local_set_unchecked(&cpu_buffer->head_page->write, 0);
++ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
+ local_set(&cpu_buffer->head_page->page->commit, 0);
+
+ cpu_buffer->head_page->read = 0;
+@@ -3589,14 +3589,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+ cpu_buffer->commit_page = cpu_buffer->head_page;
+
+ INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+- local_set(&cpu_buffer->reader_page->write, 0);
+- local_set(&cpu_buffer->reader_page->entries, 0);
++ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
++ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
+ cpu_buffer->reader_page->read = 0;
+
+- local_set(&cpu_buffer->commit_overrun, 0);
++ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
+ local_set(&cpu_buffer->entries_bytes, 0);
+- local_set(&cpu_buffer->overrun, 0);
++ local_set_unchecked(&cpu_buffer->overrun, 0);
+ local_set(&cpu_buffer->entries, 0);
+ local_set(&cpu_buffer->committing, 0);
+ local_set(&cpu_buffer->commits, 0);
+@@ -3994,8 +3994,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ rb_init_page(bpage);
+ bpage = reader->page;
+ reader->page = *data_page;
+- local_set(&reader->write, 0);
+- local_set(&reader->entries, 0);
++ local_set_unchecked(&reader->write, 0);
++ local_set_unchecked(&reader->entries, 0);
+ reader->read = 0;
+ *data_page = bpage;
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index c5a12a7..4d94416 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2656,7 +2656,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+ return 0;
+ }
+
+-int set_tracer_flag(unsigned int mask, int enabled)
++int set_tracer_flag(unsigned long mask, int enabled)
+ {
+ /* do nothing if flag is already set */
+ if (!!(trace_flags & mask) == !!enabled)
+@@ -4248,10 +4248,9 @@ static const struct file_operations tracing_dyn_info_fops = {
+ };
+ #endif
+
+-static struct dentry *d_tracer;
+-
+ struct dentry *tracing_init_dentry(void)
+ {
++ static struct dentry *d_tracer;
+ static int once;
+
+ if (d_tracer)
+@@ -4271,10 +4270,9 @@ struct dentry *tracing_init_dentry(void)
+ return d_tracer;
+ }
+
+-static struct dentry *d_percpu;
+-
+ struct dentry *tracing_dentry_percpu(void)
+ {
++ static struct dentry *d_percpu;
+ static int once;
+ struct dentry *d_tracer;
+
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index c3c3f6b..7d8dbdc 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -820,7 +820,7 @@ extern const char *__start___trace_bprintk_fmt[];
+ extern const char *__stop___trace_bprintk_fmt[];
+
+ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+-int set_tracer_flag(unsigned int mask, int enabled);
++int set_tracer_flag(unsigned long mask, int enabled);
+
+ #undef FTRACE_ENTRY
+ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index 3947835..8c0488b 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -114,7 +114,7 @@ u64 notrace trace_clock_global(void)
+ return now;
+ }
+
+-static atomic64_t trace_counter;
++static atomic64_unchecked_t trace_counter;
+
+ /*
+ * trace_clock_counter(): simply an atomic counter.
+@@ -123,5 +123,5 @@ static atomic64_t trace_counter;
+ */
+ u64 notrace trace_clock_counter(void)
+ {
+- return atomic64_add_return(1, &trace_counter);
++ return atomic64_add_return_unchecked(1, &trace_counter);
+ }
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index c212a7f..a2560bc 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
+ struct ftrace_module_file_ops {
+ struct list_head list;
+ struct module *mod;
+- struct file_operations id;
+- struct file_operations enable;
+- struct file_operations format;
+- struct file_operations filter;
+ };
+
+ static struct ftrace_module_file_ops *
+@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
+
+ file_ops->mod = mod;
+
+- file_ops->id = ftrace_event_id_fops;
+- file_ops->id.owner = mod;
+-
+- file_ops->enable = ftrace_enable_fops;
+- file_ops->enable.owner = mod;
+-
+- file_ops->filter = ftrace_event_filter_fops;
+- file_ops->filter.owner = mod;
+-
+- file_ops->format = ftrace_event_format_fops;
+- file_ops->format.owner = mod;
++ pax_open_kernel();
++ mod->trace_id.owner = mod;
++ mod->trace_enable.owner = mod;
++ mod->trace_filter.owner = mod;
++ mod->trace_format.owner = mod;
++ pax_close_kernel();
+
+ list_add(&file_ops->list, &ftrace_module_file_list);
+
+@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
+
+ for_each_event(call, start, end) {
+ __trace_add_event_call(*call, mod,
+- &file_ops->id, &file_ops->enable,
+- &file_ops->filter, &file_ops->format);
++ &mod->trace_id, &mod->trace_enable,
++ &mod->trace_filter, &mod->trace_format);
+ }
+ }
+
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 00d527c..7c5b1a3 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ long ret;
+ int maxlen = get_rloc_len(*(u32 *)dest);
+ u8 *dst = get_rloc_data(dest);
+- u8 *src = addr;
++ const u8 __user *src = (const u8 __force_user *)addr;
+ mm_segment_t old_fs = get_fs();
+ if (!maxlen)
+ return;
+@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ pagefault_disable();
+ do
+ ret = __copy_from_user_inatomic(dst++, src++, 1);
+- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
++ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
+ dst[-1] = '\0';
+ pagefault_enable();
+ set_fs(old_fs);
+@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ ((u8 *)get_rloc_data(dest))[0] = '\0';
+ *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
+ } else
+- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
++ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
+ get_rloc_offs(*(u32 *)dest));
+ }
+ /* Return the length of string -- including null terminal byte */
+@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+ do {
+- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
++ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
+ len++;
+ } while (c && ret == 0 && len < MAX_STRING_SIZE);
+ pagefault_enable();
+diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
+index fd3c8aa..5f324a6 100644
+--- a/kernel/trace/trace_mmiotrace.c
++++ b/kernel/trace/trace_mmiotrace.c
+@@ -24,7 +24,7 @@ struct header_iter {
+ static struct trace_array *mmio_trace_array;
+ static bool overrun_detected;
+ static unsigned long prev_overruns;
+-static atomic_t dropped_count;
++static atomic_unchecked_t dropped_count;
+
+ static void mmio_reset_data(struct trace_array *tr)
+ {
+@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
+
+ static unsigned long count_overruns(struct trace_iterator *iter)
+ {
+- unsigned long cnt = atomic_xchg(&dropped_count, 0);
++ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
+ unsigned long over = ring_buffer_overruns(iter->tr->buffer);
+
+ if (over > prev_overruns)
+@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
+ sizeof(*entry), 0, pc);
+ if (!event) {
+- atomic_inc(&dropped_count);
++ atomic_inc_unchecked(&dropped_count);
+ return;
+ }
+ entry = ring_buffer_event_data(event);
+@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
+ sizeof(*entry), 0, pc);
+ if (!event) {
+- atomic_inc(&dropped_count);
++ atomic_inc_unchecked(&dropped_count);
+ return;
+ }
+ entry = ring_buffer_event_data(event);
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 1dcf253..e1568b3 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
+
+ p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+ if (!IS_ERR(p)) {
+- p = mangle_path(s->buffer + s->len, p, "\n");
++ p = mangle_path(s->buffer + s->len, p, "\n\\");
+ if (p) {
+ s->len = p - s->buffer;
+ return 1;
+@@ -810,14 +810,16 @@ int register_ftrace_event(struct trace_event *event)
+ goto out;
+ }
+
++ pax_open_kernel();
+ if (event->funcs->trace == NULL)
+- event->funcs->trace = trace_nop_print;
++ *(void **)&event->funcs->trace = trace_nop_print;
+ if (event->funcs->raw == NULL)
+- event->funcs->raw = trace_nop_print;
++ *(void **)&event->funcs->raw = trace_nop_print;
+ if (event->funcs->hex == NULL)
+- event->funcs->hex = trace_nop_print;
++ *(void **)&event->funcs->hex = trace_nop_print;
+ if (event->funcs->binary == NULL)
+- event->funcs->binary = trace_nop_print;
++ *(void **)&event->funcs->binary = trace_nop_print;
++ pax_close_kernel();
+
+ key = event->type & (EVENT_HASHSIZE - 1);
+
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index c5b20a3..6b38c73 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -66,7 +66,7 @@ check_stack(unsigned long ip, unsigned long *stack)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(stack))
++ if (!object_starts_on_stack(stack))
+ return;
+
+ local_irq_save(flags);
+diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
+index 209b379..7f76423 100644
+--- a/kernel/trace/trace_workqueue.c
++++ b/kernel/trace/trace_workqueue.c
+@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
+ int cpu;
+ pid_t pid;
+ /* Can be inserted from interrupt or user context, need to be atomic */
+- atomic_t inserted;
++ atomic_unchecked_t inserted;
+ /*
+ * Don't need to be atomic, works are serialized in a single workqueue thread
+ * on a single CPU.
+@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
+ if (node->pid == wq_thread->pid) {
+- atomic_inc(&node->inserted);
++ atomic_inc_unchecked(&node->inserted);
+ goto found;
+ }
+ }
+@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (tsk) {
+ seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
+- atomic_read(&cws->inserted), cws->executed,
++ atomic_read_unchecked(&cws->inserted), cws->executed,
+ tsk->comm);
+ put_task_struct(tsk);
+ }
+diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
+index 63da38c..639904e 100644
+--- a/kernel/utsname_sysctl.c
++++ b/kernel/utsname_sysctl.c
+@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
+ static int proc_do_uts_string(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table uts_table;
++ ctl_table_no_const uts_table;
+ int r;
+ memcpy(&uts_table, table, sizeof(uts_table));
+ uts_table.data = get_uts(table, write);
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index a8bc4d9..eae8357 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -574,7 +574,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata cpu_nfb = {
++static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+ };
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 0bc9ff0..4722738 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3499,7 +3499,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
+ */
+ worker_flags |= WORKER_REBIND;
+ worker_flags &= ~WORKER_ROGUE;
+- ACCESS_ONCE(worker->flags) = worker_flags;
++ ACCESS_ONCE_RW(worker->flags) = worker_flags;
+
+ /* queue rebind_work, wq doesn't matter, use the default one */
+ if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 82928f5..a3c7bb1 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -510,7 +510,7 @@ config DEBUG_MUTEXES
+
+ config DEBUG_LOCK_ALLOC
+ bool "Lock debugging: detect incorrect freeing of live locks"
+- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+ select LOCKDEP
+@@ -524,7 +524,7 @@ config DEBUG_LOCK_ALLOC
+
+ config PROVE_LOCKING
+ bool "Lock debugging: prove locking correctness"
+- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+ select LOCKDEP
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+@@ -616,7 +616,7 @@ config LOCKDEP
+
+ config LOCK_STAT
+ bool "Lock usage statistics"
+- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
++ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
+ select LOCKDEP
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES
+@@ -1103,6 +1103,7 @@ config LATENCYTOP
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+ depends on PROC_FS
++ depends on !GRKERNSEC_HIDESYM
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
+ select KALLSYMS
+ select KALLSYMS_ALL
+@@ -1126,7 +1127,7 @@ source kernel/trace/Kconfig
+
+ config PROVIDE_OHCI1394_DMA_INIT
+ bool "Remote debugging over FireWire early on boot"
+- depends on PCI && X86
++ depends on PCI && X86 && !GRKERNSEC
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+@@ -1155,7 +1156,7 @@ config PROVIDE_OHCI1394_DMA_INIT
+
+ config FIREWIRE_OHCI_REMOTE_DMA
+ bool "Remote debugging over FireWire with firewire-ohci"
+- depends on FIREWIRE_OHCI
++ depends on FIREWIRE_OHCI && !GRKERNSEC
+ help
+ This option lets you use the FireWire bus for remote debugging
+ with help of the firewire-ohci driver. It enables unfiltered
+diff --git a/lib/Makefile b/lib/Makefile
+index a4da283..57fec5e 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -45,7 +45,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
+ obj-$(CONFIG_BTREE) += btree.o
+ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
++obj-y += list_debug.o
+ obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
+
+ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 0d4a127..33a06c7 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+ {
+ int c, old_c, totaldigits, ndigits, nchunks, nbits;
+ u32 chunk;
+- const char __user __force *ubuf = (const char __user __force *)buf;
++ const char __user *ubuf = (const char __force_user *)buf;
+
+ bitmap_zero(maskp, nmaskbits);
+
+@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
+ {
+ if (!access_ok(VERIFY_READ, ubuf, ulen))
+ return -EFAULT;
+- return __bitmap_parse((const char __force *)ubuf,
++ return __bitmap_parse((const char __force_kernel *)ubuf,
+ ulen, 1, maskp, nmaskbits);
+
+ }
+@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ {
+ unsigned a, b;
+ int c, old_c, totaldigits;
+- const char __user __force *ubuf = (const char __user __force *)buf;
++ const char __user *ubuf = (const char __force_user *)buf;
+ int exp_digit, in_range;
+
+ totaldigits = c = 0;
+@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+ {
+ if (!access_ok(VERIFY_READ, ubuf, ulen))
+ return -EFAULT;
+- return __bitmap_parselist((const char __force *)ubuf,
++ return __bitmap_parselist((const char __force_kernel *)ubuf,
+ ulen, 1, maskp, nmaskbits);
+ }
+ EXPORT_SYMBOL(bitmap_parselist_user);
+diff --git a/lib/bug.c b/lib/bug.c
+index 1955209..cbbb2ad 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ return BUG_TRAP_TYPE_NONE;
+
+ bug = find_bug(bugaddr);
++ if (!bug)
++ return BUG_TRAP_TYPE_NONE;
+
+ file = NULL;
+ line = 0;
+diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
+index 4dc2032..7a2a1da 100644
+--- a/lib/cpu-notifier-error-inject.c
++++ b/lib/cpu-notifier-error-inject.c
+@@ -45,7 +45,9 @@ static struct notifier_block err_inject_cpu_notifier = {
+
+ static int err_inject_init(void)
+ {
+- err_inject_cpu_notifier.priority = priority;
++ pax_open_kernel();
++ *(int *)&err_inject_cpu_notifier.priority = priority;
++ pax_close_kernel();
+
+ return register_hotcpu_notifier(&err_inject_cpu_notifier);
+ }
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index a78b7c6..2c73084 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
+ if (limit > 4)
+ return;
+
+- is_on_stack = object_is_on_stack(addr);
++ is_on_stack = object_starts_on_stack(addr);
+ if (is_on_stack == onstack)
+ return;
+
+diff --git a/lib/devres.c b/lib/devres.c
+index 7c0e953..f642b5c 100644
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
+ void devm_iounmap(struct device *dev, void __iomem *addr)
+ {
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+- (void *)addr));
++ (void __force *)addr));
+ iounmap(addr);
+ }
+ EXPORT_SYMBOL(devm_iounmap);
+@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+ {
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+- devm_ioport_map_match, (void *)addr));
++ devm_ioport_map_match, (void __force *)addr));
+ }
+ EXPORT_SYMBOL(devm_ioport_unmap);
+
+diff --git a/lib/div64.c b/lib/div64.c
+index 5b49191..d84e6fc 100644
+--- a/lib/div64.c
++++ b/lib/div64.c
+@@ -58,7 +58,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
+ EXPORT_SYMBOL(__div64_32);
+
+ #ifndef div_s64_rem
+-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
++s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+ {
+ u64 quotient;
+
+@@ -89,7 +89,7 @@ EXPORT_SYMBOL(div_s64_rem);
+ * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
+ */
+ #ifndef div64_u64
+-u64 div64_u64(u64 dividend, u64 divisor)
++u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
+ {
+ u32 high = divisor >> 32;
+ u64 quot;
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index fea790a..3bdd6b4 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -760,7 +760,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
+
+ void dma_debug_add_bus(struct bus_type *bus)
+ {
+- struct notifier_block *nb;
++ notifier_block_no_const *nb;
+
+ if (global_disable)
+ return;
+@@ -925,7 +925,7 @@ out:
+
+ static void check_for_stack(struct device *dev, void *addr)
+ {
+- if (object_is_on_stack(addr))
++ if (object_starts_on_stack(addr))
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from"
+ "stack [addr=%p]\n", addr);
+ }
+diff --git a/lib/extable.c b/lib/extable.c
+index 4cac81e..63e9b8f 100644
+--- a/lib/extable.c
++++ b/lib/extable.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/sort.h>
+ #include <asm/uaccess.h>
++#include <asm/pgtable.h>
+
+ #ifndef ARCH_HAS_SORT_EXTABLE
+ /*
+@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
+ void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+ {
++ pax_open_kernel();
+ sort(start, finish - start, sizeof(struct exception_table_entry),
+ cmp_ex, NULL);
++ pax_close_kernel();
+ }
+
+ #ifdef CONFIG_MODULES
+diff --git a/lib/inflate.c b/lib/inflate.c
+index 013a761..c28f3fc 100644
+--- a/lib/inflate.c
++++ b/lib/inflate.c
+@@ -269,7 +269,7 @@ static void free(void *where)
+ malloc_ptr = free_mem_ptr;
+ }
+ #else
+-#define malloc(a) kmalloc(a, GFP_KERNEL)
++#define malloc(a) kmalloc((a), GFP_KERNEL)
+ #define free(a) kfree(a)
+ #endif
+
+diff --git a/lib/ioremap.c b/lib/ioremap.c
+index da4e2ad..6373b5f 100644
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long next;
+
+ phys_addr -= addr;
+- pmd = pmd_alloc(&init_mm, pud, addr);
++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+ unsigned long next;
+
+ phys_addr -= addr;
+- pud = pud_alloc(&init_mm, pgd, addr);
++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
+index bd2bea9..6b3c95e 100644
+--- a/lib/is_single_threaded.c
++++ b/lib/is_single_threaded.c
+@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
+ struct task_struct *p, *t;
+ bool ret;
+
++ if (!mm)
++ return true;
++
+ if (atomic_read(&task->signal->live) != 1)
+ return false;
+
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 83bd5b3..8a0c75f 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -844,7 +844,7 @@ static struct kset *kset_create(const char *name,
+ kset = kzalloc(sizeof(*kset), GFP_KERNEL);
+ if (!kset)
+ return NULL;
+- retval = kobject_set_name(&kset->kobj, name);
++ retval = kobject_set_name(&kset->kobj, "%s", name);
+ if (retval) {
+ kfree(kset);
+ return NULL;
+@@ -898,9 +898,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+
+
+ static DEFINE_SPINLOCK(kobj_ns_type_lock);
+-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
++static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
+
+-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
++int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+ {
+ enum kobj_ns_type type = ops->type;
+ int error;
+diff --git a/lib/kref.c b/lib/kref.c
+index 3efb882..8492f4c 100644
+--- a/lib/kref.c
++++ b/lib/kref.c
+@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
+ */
+ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
+ {
+- WARN_ON(release == NULL);
++ BUG_ON(release == NULL);
+ WARN_ON(release == (void (*)(struct kref *))kfree);
+
+ if (atomic_dec_and_test(&kref->refcount)) {
+diff --git a/lib/list_debug.c b/lib/list_debug.c
+index b8029a5..2b120e1 100644
+--- a/lib/list_debug.c
++++ b/lib/list_debug.c
+@@ -8,7 +8,9 @@
+
+ #include <linux/module.h>
+ #include <linux/list.h>
++#include <linux/mm.h>
+
++#ifdef CONFIG_DEBUG_LIST
+ /*
+ * Insert a new entry between two known consecutive entries.
+ *
+@@ -16,18 +18,40 @@
+ * the prev/next entries already!
+ */
+
++static bool __list_add_debug(struct list_head *new,
++ struct list_head *prev,
++ struct list_head *next)
++{
++ if (unlikely(next->prev != prev)) {
++ printk(KERN_ERR "list_add corruption. next->prev should be "
++ "prev (%p), but was %p. (next=%p).\n",
++ prev, next->prev, next);
++ BUG();
++ return false;
++ }
++ if (unlikely(prev->next != next)) {
++ printk(KERN_ERR "list_add corruption. prev->next should be "
++ "next (%p), but was %p. (prev=%p).\n",
++ next, prev->next, prev);
++ BUG();
++ return false;
++ }
++ if (unlikely(new == prev || new == next)) {
++ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
++ new, prev, next);
++ BUG();
++ return false;
++ }
++ return true;
++}
++
+ void __list_add(struct list_head *new,
+- struct list_head *prev,
+- struct list_head *next)
++ struct list_head *prev,
++ struct list_head *next)
+ {
+- WARN(next->prev != prev,
+- "list_add corruption. next->prev should be "
+- "prev (%p), but was %p. (next=%p).\n",
+- prev, next->prev, next);
+- WARN(prev->next != next,
+- "list_add corruption. prev->next should be "
+- "next (%p), but was %p. (prev=%p).\n",
+- next, prev->next, prev);
++ if (!__list_add_debug(new, prev, next))
++ return;
++
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+@@ -35,28 +59,46 @@ void __list_add(struct list_head *new,
+ }
+ EXPORT_SYMBOL(__list_add);
+
+-void __list_del_entry(struct list_head *entry)
++static bool __list_del_entry_debug(struct list_head *entry)
+ {
+ struct list_head *prev, *next;
+
+ prev = entry->prev;
+ next = entry->next;
+
+- if (WARN(next == LIST_POISON1,
+- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+- entry, LIST_POISON1) ||
+- WARN(prev == LIST_POISON2,
+- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+- entry, LIST_POISON2) ||
+- WARN(prev->next != entry,
+- "list_del corruption. prev->next should be %p, "
+- "but was %p\n", entry, prev->next) ||
+- WARN(next->prev != entry,
+- "list_del corruption. next->prev should be %p, "
+- "but was %p\n", entry, next->prev))
++ if (unlikely(next == LIST_POISON1)) {
++ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
++ entry, LIST_POISON1);
++ BUG();
++ return false;
++ }
++ if (unlikely(prev == LIST_POISON2)) {
++ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
++ entry, LIST_POISON2);
++ BUG();
++ return false;
++ }
++ if (unlikely(entry->prev->next != entry)) {
++ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
++ "but was %p\n", entry, prev->next);
++ BUG();
++ return false;
++ }
++ if (unlikely(entry->next->prev != entry)) {
++ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
++ "but was %p\n", entry, next->prev);
++ BUG();
++ return false;
++ }
++ return true;
++}
++
++void __list_del_entry(struct list_head *entry)
++{
++ if (!__list_del_entry_debug(entry))
+ return;
+
+- __list_del(prev, next);
++ __list_del(entry->prev, entry->next);
+ }
+ EXPORT_SYMBOL(__list_del_entry);
+
+@@ -73,3 +115,76 @@ void list_del(struct list_head *entry)
+ entry->prev = LIST_POISON2;
+ }
+ EXPORT_SYMBOL(list_del);
++#endif
++
++void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
++{
++#ifdef CONFIG_DEBUG_LIST
++ if (!__list_add_debug(new, prev, next))
++ return;
++#endif
++
++ pax_open_kernel();
++ next->prev = new;
++ new->next = next;
++ new->prev = prev;
++ prev->next = new;
++ pax_close_kernel();
++}
++EXPORT_SYMBOL(__pax_list_add);
++
++void pax_list_del(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++ if (!__list_del_entry_debug(entry))
++ return;
++#endif
++
++ pax_open_kernel();
++ __list_del(entry->prev, entry->next);
++ entry->next = LIST_POISON1;
++ entry->prev = LIST_POISON2;
++ pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del);
++
++void pax_list_del_init(struct list_head *entry)
++{
++ pax_open_kernel();
++ __list_del(entry->prev, entry->next);
++ INIT_LIST_HEAD(entry);
++ pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del_init);
++
++void __pax_list_add_rcu(struct list_head *new,
++ struct list_head *prev, struct list_head *next)
++{
++#ifdef CONFIG_DEBUG_LIST
++ if (!__list_add_debug(new, prev, next))
++ return;
++#endif
++
++ pax_open_kernel();
++ new->next = next;
++ new->prev = prev;
++ rcu_assign_pointer(list_next_rcu(prev), new);
++ next->prev = new;
++ pax_close_kernel();
++}
++EXPORT_SYMBOL(__pax_list_add_rcu);
++
++void pax_list_del_rcu(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++ if (!__list_del_entry_debug(entry))
++ return;
++#endif
++
++ pax_open_kernel();
++ __list_del(entry->prev, entry->next);
++ entry->next = LIST_POISON1;
++ entry->prev = LIST_POISON2;
++ pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del_rcu);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index d9df745..e73c2fe 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -80,7 +80,7 @@ struct radix_tree_preload {
+ int nr;
+ struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+ };
+-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
+
+ static inline void *ptr_to_indirect(void *ptr)
+ {
+diff --git a/lib/random32.c b/lib/random32.c
+index 1f44bdc..1e5b2df 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -2,19 +2,19 @@
+ This is a maximally equidistributed combined Tausworthe generator
+ based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+
+- x_n = (s1_n ^ s2_n ^ s3_n)
++ lfsr113 version:
+
+- s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
+- s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
+- s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
++ x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
+
+- The period of this generator is about 2^88.
++ s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
++ s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
++ s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
++ s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
++
++ The period of this generator is about 2^113 (see erratum paper).
+
+ From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+- Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
+-
+- This is available on the net from L'Ecuyer's home page,
+-
++ Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
+ http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+
+@@ -29,61 +29,148 @@
+ that paper.)
+
+ This affects the seeding procedure by imposing the requirement
+- s1 > 1, s2 > 7, s3 > 15.
++ s1 > 1, s2 > 7, s3 > 15, s4 > 127.
+
+ */
+
+ #include <linux/types.h>
+ #include <linux/percpu.h>
+-#include <linux/module.h>
++#include <linux/export.h>
+ #include <linux/jiffies.h>
+ #include <linux/random.h>
++#include <linux/sched.h>
++
++#ifdef CONFIG_RANDOM32_SELFTEST
++static void __init prandom_state_selftest(void);
++#endif
+
+ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+
+ /**
+- * prandom32 - seeded pseudo-random number generator.
++ * prandom_u32_state - seeded pseudo-random number generator.
+ * @state: pointer to state structure holding seeded state.
+ *
+ * This is used for pseudo-randomness with no outside seeding.
+- * For more random results, use random32().
++ * For more random results, use prandom_u32().
+ */
+-u32 prandom32(struct rnd_state *state)
++u32 prandom_u32_state(struct rnd_state *state)
+ {
+ #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
+
+- state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
+- state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
+- state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
++ state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
++ state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
++ state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
++ state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
+
+- return (state->s1 ^ state->s2 ^ state->s3);
++ return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
+ }
+-EXPORT_SYMBOL(prandom32);
++EXPORT_SYMBOL(prandom_u32_state);
+
+ /**
+- * random32 - pseudo random number generator
++ * prandom_u32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+-u32 random32(void)
++u32 prandom_u32(void)
+ {
+ unsigned long r;
+ struct rnd_state *state = &get_cpu_var(net_rand_state);
+- r = prandom32(state);
++ r = prandom_u32_state(state);
+ put_cpu_var(state);
+ return r;
+ }
+-EXPORT_SYMBOL(random32);
++EXPORT_SYMBOL(prandom_u32);
++
++/*
++ * prandom_bytes_state - get the requested number of pseudo-random bytes
++ *
++ * @state: pointer to state structure holding seeded state.
++ * @buf: where to copy the pseudo-random bytes to
++ * @bytes: the requested number of bytes
++ *
++ * This is used for pseudo-randomness with no outside seeding.
++ * For more random results, use prandom_bytes().
++ */
++void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes)
++{
++ unsigned char *p = buf;
++ int i;
++
++ for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
++ u32 random = prandom_u32_state(state);
++ int j;
++
++ for (j = 0; j < sizeof(u32); j++) {
++ p[i + j] = random;
++ random >>= BITS_PER_BYTE;
++ }
++ }
++ if (i < bytes) {
++ u32 random = prandom_u32_state(state);
++
++ for (; i < bytes; i++) {
++ p[i] = random;
++ random >>= BITS_PER_BYTE;
++ }
++ }
++}
++EXPORT_SYMBOL(prandom_bytes_state);
++
++/**
++ * prandom_bytes - get the requested number of pseudo-random bytes
++ * @buf: where to copy the pseudo-random bytes to
++ * @bytes: the requested number of bytes
++ */
++void prandom_bytes(void *buf, int bytes)
++{
++ struct rnd_state *state = &get_cpu_var(net_rand_state);
++
++ prandom_bytes_state(state, buf, bytes);
++ put_cpu_var(state);
++}
++EXPORT_SYMBOL(prandom_bytes);
++
++static void prandom_warmup(struct rnd_state *state)
++{
++ /* Calling RNG ten times to satify recurrence condition */
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++ prandom_u32_state(state);
++}
++
++static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
++{
++ /* Note: This sort of seeding is ONLY used in test cases and
++ * during boot at the time from core_initcall until late_initcall
++ * as we don't have a stronger entropy source available yet.
++ * After late_initcall, we reseed entire state, we have to (!),
++ * otherwise an attacker just needs to search 32 bit space to
++ * probe for our internal 128 bit state if he knows a couple
++ * of prandom32 outputs!
++ */
++#define LCG(x) ((x) * 69069U) /* super-duper LCG */
++ state->s1 = __seed(LCG(seed), 2U);
++ state->s2 = __seed(LCG(state->s1), 8U);
++ state->s3 = __seed(LCG(state->s2), 16U);
++ state->s4 = __seed(LCG(state->s3), 128U);
++}
+
+ /**
+- * srandom32 - add entropy to pseudo random number generator
++ * prandom_seed - add entropy to pseudo random number generator
+ * @seed: seed value
+ *
+- * Add some additional seeding to the random32() pool.
++ * Add some additional seeding to the prandom pool.
+ */
+-void srandom32(u32 entropy)
++void prandom_seed(u32 entropy)
+ {
+ int i;
+ /*
+@@ -92,59 +179,264 @@ void srandom32(u32 entropy)
+ */
+ for_each_possible_cpu (i) {
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
+- state->s1 = __seed(state->s1 ^ entropy, 2);
++
++ state->s1 = __seed(state->s1 ^ entropy, 2U);
++ prandom_warmup(state);
+ }
+ }
+-EXPORT_SYMBOL(srandom32);
++EXPORT_SYMBOL(prandom_seed);
+
+ /*
+ * Generate some initially weak seeding values to allow
+- * to start the random32() engine.
++ * to start the prandom_u32() engine.
+ */
+-static int __init random32_init(void)
++static int __init prandom_init(void)
+ {
+ int i;
+
++#ifdef CONFIG_RANDOM32_SELFTEST
++ prandom_state_selftest();
++#endif
++
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+-#define LCG(x) ((x) * 69069) /* super-duper LCG */
+- state->s1 = __seed(LCG(i + jiffies), 2);
+- state->s2 = __seed(LCG(state->s1), 8);
+- state->s3 = __seed(LCG(state->s2), 16);
+-
+- /* "warm it up" */
+- prandom32(state);
+- prandom32(state);
+- prandom32(state);
+- prandom32(state);
+- prandom32(state);
+- prandom32(state);
++ prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
++ prandom_warmup(state);
+ }
+ return 0;
+ }
+-core_initcall(random32_init);
++core_initcall(prandom_init);
++
++static void __prandom_timer(unsigned long dontcare);
++static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
++
++static void __prandom_timer(unsigned long dontcare)
++{
++ u32 entropy;
++ unsigned long expires;
++
++ get_random_bytes(&entropy, sizeof(entropy));
++ prandom_seed(entropy);
++
++ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
++ expires = 40 + (prandom_u32() % 40);
++ seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
++
++ add_timer(&seed_timer);
++}
++
++static void __init __prandom_start_seed_timer(void)
++{
++ set_timer_slack(&seed_timer, HZ);
++ seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
++ add_timer(&seed_timer);
++}
+
+ /*
+ * Generate better values after random number generator
+ * is fully initialized.
+ */
+-static int __init random32_reseed(void)
++static void __prandom_reseed(bool late)
+ {
+ int i;
++ unsigned long flags;
++ static bool latch = false;
++ static DEFINE_SPINLOCK(lock);
++
++ /* only allow initial seeding (late == false) once */
++ spin_lock_irqsave(&lock, flags);
++ if (latch && !late)
++ goto out;
++ latch = true;
+
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+- u32 seeds[3];
++ u32 seeds[4];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+- state->s1 = __seed(seeds[0], 2);
+- state->s2 = __seed(seeds[1], 8);
+- state->s3 = __seed(seeds[2], 16);
++ state->s1 = __seed(seeds[0], 2U);
++ state->s2 = __seed(seeds[1], 8U);
++ state->s3 = __seed(seeds[2], 16U);
++ state->s4 = __seed(seeds[3], 128U);
+
+- /* mix it in */
+- prandom32(state);
++ prandom_warmup(state);
+ }
++out:
++ spin_unlock_irqrestore(&lock, flags);
++}
++
++void prandom_reseed_late(void)
++{
++ __prandom_reseed(true);
++}
++
++static int __init prandom_reseed(void)
++{
++ __prandom_reseed(false);
++ __prandom_start_seed_timer();
+ return 0;
+ }
+-late_initcall(random32_reseed);
++late_initcall(prandom_reseed);
++
++#ifdef CONFIG_RANDOM32_SELFTEST
++static struct prandom_test1 {
++ u32 seed;
++ u32 result;
++} test1[] = {
++ { 1U, 3484351685U },
++ { 2U, 2623130059U },
++ { 3U, 3125133893U },
++ { 4U, 984847254U },
++};
++
++static struct prandom_test2 {
++ u32 seed;
++ u32 iteration;
++ u32 result;
++} test2[] = {
++ /* Test cases against taus113 from GSL library. */
++ { 931557656U, 959U, 2975593782U },
++ { 1339693295U, 876U, 3887776532U },
++ { 1545556285U, 961U, 1615538833U },
++ { 601730776U, 723U, 1776162651U },
++ { 1027516047U, 687U, 511983079U },
++ { 416526298U, 700U, 916156552U },
++ { 1395522032U, 652U, 2222063676U },
++ { 366221443U, 617U, 2992857763U },
++ { 1539836965U, 714U, 3783265725U },
++ { 556206671U, 994U, 799626459U },
++ { 684907218U, 799U, 367789491U },
++ { 2121230701U, 931U, 2115467001U },
++ { 1668516451U, 644U, 3620590685U },
++ { 768046066U, 883U, 2034077390U },
++ { 1989159136U, 833U, 1195767305U },
++ { 536585145U, 996U, 3577259204U },
++ { 1008129373U, 642U, 1478080776U },
++ { 1740775604U, 939U, 1264980372U },
++ { 1967883163U, 508U, 10734624U },
++ { 1923019697U, 730U, 3821419629U },
++ { 442079932U, 560U, 3440032343U },
++ { 1961302714U, 845U, 841962572U },
++ { 2030205964U, 962U, 1325144227U },
++ { 1160407529U, 507U, 240940858U },
++ { 635482502U, 779U, 4200489746U },
++ { 1252788931U, 699U, 867195434U },
++ { 1961817131U, 719U, 668237657U },
++ { 1071468216U, 983U, 917876630U },
++ { 1281848367U, 932U, 1003100039U },
++ { 582537119U, 780U, 1127273778U },
++ { 1973672777U, 853U, 1071368872U },
++ { 1896756996U, 762U, 1127851055U },
++ { 847917054U, 500U, 1717499075U },
++ { 1240520510U, 951U, 2849576657U },
++ { 1685071682U, 567U, 1961810396U },
++ { 1516232129U, 557U, 3173877U },
++ { 1208118903U, 612U, 1613145022U },
++ { 1817269927U, 693U, 4279122573U },
++ { 1510091701U, 717U, 638191229U },
++ { 365916850U, 807U, 600424314U },
++ { 399324359U, 702U, 1803598116U },
++ { 1318480274U, 779U, 2074237022U },
++ { 697758115U, 840U, 1483639402U },
++ { 1696507773U, 840U, 577415447U },
++ { 2081979121U, 981U, 3041486449U },
++ { 955646687U, 742U, 3846494357U },
++ { 1250683506U, 749U, 836419859U },
++ { 595003102U, 534U, 366794109U },
++ { 47485338U, 558U, 3521120834U },
++ { 619433479U, 610U, 3991783875U },
++ { 704096520U, 518U, 4139493852U },
++ { 1712224984U, 606U, 2393312003U },
++ { 1318233152U, 922U, 3880361134U },
++ { 855572992U, 761U, 1472974787U },
++ { 64721421U, 703U, 683860550U },
++ { 678931758U, 840U, 380616043U },
++ { 692711973U, 778U, 1382361947U },
++ { 677703619U, 530U, 2826914161U },
++ { 92393223U, 586U, 1522128471U },
++ { 1222592920U, 743U, 3466726667U },
++ { 358288986U, 695U, 1091956998U },
++ { 1935056945U, 958U, 514864477U },
++ { 735675993U, 990U, 1294239989U },
++ { 1560089402U, 897U, 2238551287U },
++ { 70616361U, 829U, 22483098U },
++ { 368234700U, 731U, 2913875084U },
++ { 20221190U, 879U, 1564152970U },
++ { 539444654U, 682U, 1835141259U },
++ { 1314987297U, 840U, 1801114136U },
++ { 2019295544U, 645U, 3286438930U },
++ { 469023838U, 716U, 1637918202U },
++ { 1843754496U, 653U, 2562092152U },
++ { 400672036U, 809U, 4264212785U },
++ { 404722249U, 965U, 2704116999U },
++ { 600702209U, 758U, 584979986U },
++ { 519953954U, 667U, 2574436237U },
++ { 1658071126U, 694U, 2214569490U },
++ { 420480037U, 749U, 3430010866U },
++ { 690103647U, 969U, 3700758083U },
++ { 1029424799U, 937U, 3787746841U },
++ { 2012608669U, 506U, 3362628973U },
++ { 1535432887U, 998U, 42610943U },
++ { 1330635533U, 857U, 3040806504U },
++ { 1223800550U, 539U, 3954229517U },
++ { 1322411537U, 680U, 3223250324U },
++ { 1877847898U, 945U, 2915147143U },
++ { 1646356099U, 874U, 965988280U },
++ { 805687536U, 744U, 4032277920U },
++ { 1948093210U, 633U, 1346597684U },
++ { 392609744U, 783U, 1636083295U },
++ { 690241304U, 770U, 1201031298U },
++ { 1360302965U, 696U, 1665394461U },
++ { 1220090946U, 780U, 1316922812U },
++ { 447092251U, 500U, 3438743375U },
++ { 1613868791U, 592U, 828546883U },
++ { 523430951U, 548U, 2552392304U },
++ { 726692899U, 810U, 1656872867U },
++ { 1364340021U, 836U, 3710513486U },
++ { 1986257729U, 931U, 935013962U },
++ { 407983964U, 921U, 728767059U },
++};
++
++static void __init prandom_state_selftest(void)
++{
++ int i, j, errors = 0, runs = 0;
++ bool error = false;
++
++ for (i = 0; i < ARRAY_SIZE(test1); i++) {
++ struct rnd_state state;
++
++ prandom_seed_very_weak(&state, test1[i].seed);
++ prandom_warmup(&state);
++
++ if (test1[i].result != prandom_u32_state(&state))
++ error = true;
++ }
++
++ if (error)
++ pr_warn("prandom: seed boundary self test failed\n");
++ else
++ pr_info("prandom: seed boundary self test passed\n");
++
++ for (i = 0; i < ARRAY_SIZE(test2); i++) {
++ struct rnd_state state;
++
++ prandom_seed_very_weak(&state, test2[i].seed);
++ prandom_warmup(&state);
++
++ for (j = 0; j < test2[i].iteration - 1; j++)
++ prandom_u32_state(&state);
++
++ if (test2[i].result != prandom_u32_state(&state))
++ errors++;
++
++ runs++;
++ cond_resched();
++ }
++
++ if (errors)
++ pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
++ else
++ pr_info("prandom: %d self tests passed\n", runs);
++}
++#endif
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index ae02e42..4ffc938 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -16,6 +16,9 @@
+ * - scnprintf and vscnprintf
+ */
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <stdarg.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -414,7 +417,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
+ char sym[KSYM_SYMBOL_LEN];
+ if (ext == 'B')
+ sprint_backtrace(sym, value);
+- else if (ext != 'f' && ext != 's')
++ else if (ext != 'f' && ext != 's' && ext != 'a')
+ sprint_symbol(sym, value);
+ else
+ kallsyms_lookup(value, NULL, NULL, NULL, sym);
+@@ -778,7 +781,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
+ return string(buf, end, uuid, spec);
+ }
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++int kptr_restrict __read_mostly = 2;
++#else
+ int kptr_restrict __read_mostly;
++#endif
+
+ /*
+ * Show a '%p' thing. A kernel extension is that the '%p' is followed
+@@ -792,6 +799,8 @@ int kptr_restrict __read_mostly;
+ * - 'S' For symbolic direct pointers with offset
+ * - 's' For symbolic direct pointers without offset
+ * - 'B' For backtraced symbolic direct pointers with offset
++ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
++ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
+ * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+ * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
+ * - 'M' For a 6-byte MAC address, it prints the address in the
+@@ -836,12 +845,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ {
+ if (!ptr && *fmt != 'K') {
+ /*
+- * Print (null) with the same width as a pointer so it makes
++ * Print (nil) with the same width as a pointer so it makes
+ * tabular output look nice.
+ */
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
+- return string(buf, end, "(null)", spec);
++ return string(buf, end, "(nil)", spec);
+ }
+
+ switch (*fmt) {
+@@ -851,6 +860,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ /* Fallthrough */
+ case 'S':
+ case 's':
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ break;
++#else
++ return symbol_string(buf, end, ptr, spec, *fmt);
++#endif
++ case 'A':
++ case 'a':
+ case 'B':
+ return symbol_string(buf, end, ptr, spec, *fmt);
+ case 'R':
+@@ -879,9 +895,17 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ case 'U':
+ return uuid_string(buf, end, ptr, spec, fmt);
+ case 'V':
+- return buf + vsnprintf(buf, end > buf ? end - buf : 0,
+- ((struct va_format *)ptr)->fmt,
+- *(((struct va_format *)ptr)->va));
++ {
++ va_list va;
++
++ va_copy(va, *((struct va_format *)ptr)->va);
++ buf += vsnprintf(buf, end > buf ? end - buf : 0,
++ ((struct va_format *)ptr)->fmt, va);
++ va_end(va);
++ return buf;
++ }
++ case 'P':
++ break;
+ case 'K':
+ /*
+ * %pK cannot be used in IRQ context because its test
+@@ -924,6 +948,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ }
+ break;
+ }
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ /* 'P' = approved pointers to copy to userland,
++ as in the /proc/kallsyms case, as we make it display nothing
++ for non-root users, and the real contents for root users
++ Also ignore 'K' pointers, since we force their NULLing for non-root users
++ above
++ */
++ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
++ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
++ dump_stack();
++ ptr = NULL;
++ }
++#endif
++
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(void *);
+@@ -1635,11 +1674,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+ typeof(type) value; \
+ if (sizeof(type) == 8) { \
+ args = PTR_ALIGN(args, sizeof(u32)); \
+- *(u32 *)&value = *(u32 *)args; \
+- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
++ *(u32 *)&value = *(const u32 *)args; \
++ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
+ } else { \
+ args = PTR_ALIGN(args, sizeof(type)); \
+- value = *(typeof(type) *)args; \
++ value = *(const typeof(type) *)args; \
+ } \
+ args += sizeof(type); \
+ value; \
+@@ -1702,7 +1741,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+ case FORMAT_TYPE_STR: {
+ const char *str_arg = args;
+ args += strlen(str_arg) + 1;
+- str = string(str, end, (char *)str_arg, spec);
++ str = string(str, end, str_arg, spec);
+ break;
+ }
+
+diff --git a/localversion-grsec b/localversion-grsec
+new file mode 100644
+index 0000000..7cd6065
+--- /dev/null
++++ b/localversion-grsec
+@@ -0,0 +1 @@
++-grsec
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 011b110..05d1b6f 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -241,10 +241,11 @@ config KSM
+ root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+
+ config DEFAULT_MMAP_MIN_ADDR
+- int "Low address space to protect from user allocation"
++ int "Low address space to protect from user allocation"
+ depends on MMU
+- default 4096
+- help
++ default 32768 if ALPHA || ARM || PARISC || SPARC32
++ default 65536
++ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+ can help reduce the impact of kernel NULL pointer bugs.
+@@ -274,7 +275,7 @@ config MEMORY_FAILURE
+
+ config HWPOISON_INJECT
+ tristate "HWPoison pages injector"
+- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
++ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
+ select PROC_PAGE_MONITOR
+
+ config NOMMU_INITIAL_TRIM_EXCESS
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 2b49dd2..0527d62 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -12,7 +12,7 @@
+ #include <linux/device.h>
+ #include <trace/events/writeback.h>
+
+-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
++static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
+
+ struct backing_dev_info default_backing_dev_info = {
+ .name = "default",
+@@ -759,7 +759,6 @@ EXPORT_SYMBOL(bdi_destroy);
+ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
+ unsigned int cap)
+ {
+- char tmp[32];
+ int err;
+
+ bdi->name = name;
+@@ -768,8 +767,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
+ if (err)
+ return err;
+
+- sprintf(tmp, "%.28s%s", name, "-%d");
+- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
++ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return_unchecked(&bdi_seq));
+ if (err) {
+ bdi_destroy(bdi);
+ return err;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 556858c..71a567d 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1773,7 +1773,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+- return -ENOEXEC;
++ return -ENODEV;
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+@@ -2021,7 +2021,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+
+ while (bytes) {
+ char __user *buf = iov->iov_base + base;
+- int copy = min(bytes, iov->iov_len - base);
++ size_t copy = min(bytes, iov->iov_len - base);
+
+ base = 0;
+ left = __copy_from_user_inatomic(vaddr, buf, copy);
+@@ -2050,7 +2050,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+ BUG_ON(!in_atomic());
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (likely(i->nr_segs == 1)) {
+- int left;
++ size_t left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+@@ -2078,7 +2078,7 @@ size_t iov_iter_copy_from_user(struct page *page,
+
+ kaddr = kmap(page);
+ if (likely(i->nr_segs == 1)) {
+- int left;
++ size_t left;
+ char __user *buf = i->iov->iov_base + i->iov_offset;
+ left = __copy_from_user(kaddr + offset, buf, bytes);
+ copied = bytes - left;
+@@ -2108,7 +2108,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
+ * zero-length segments (without overruning the iovec).
+ */
+ while (bytes || unlikely(i->count && !iov->iov_len)) {
+- int copy;
++ size_t copy;
+
+ copy = min(bytes, iov->iov_len - base);
+ BUG_ON(!i->count || i->count < copy);
+@@ -2179,6 +2179,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
+ *pos = i_size_read(inode);
+
+ if (limit != RLIM_INFINITY) {
++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
+ if (*pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+diff --git a/mm/fremap.c b/mm/fremap.c
+index 9ed4fd4..c42648d 100644
+--- a/mm/fremap.c
++++ b/mm/fremap.c
+@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ retry:
+ vma = find_vma(mm, start);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
++ goto out;
++#endif
++
+ /*
+ * Make sure the vma is shared, that it supports prefaulting,
+ * and that the remapped range is valid and fully within
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 2a07f97..2cdc054 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -138,9 +138,10 @@ static void flush_all_zero_pkmaps(void)
+ * So no dangers, even with speculative execution.
+ */
+ page = pte_page(pkmap_page_table[i]);
++ pax_open_kernel();
+ pte_clear(&init_mm, (unsigned long)page_address(page),
+ &pkmap_page_table[i]);
+-
++ pax_close_kernel();
+ set_page_address(page, NULL);
+ need_flush = 1;
+ }
+@@ -199,9 +200,11 @@ start:
+ }
+ }
+ vaddr = PKMAP_ADDR(last_pkmap_nr);
++
++ pax_open_kernel();
+ set_pte_at(&init_mm, vaddr,
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+-
++ pax_close_kernel();
+ pkmap_count[last_pkmap_nr] = 1;
+ set_page_address(page, (void *)vaddr);
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index ed0ed8a..cc835b9 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -704,7 +704,7 @@ out:
+ * run pte_offset_map on the pmd, if an huge pmd could
+ * materialize from under us from a different thread.
+ */
+- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+ /* if an huge pmd materialized from under us just retry later */
+ if (unlikely(pmd_trans_huge(*pmd)))
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ddf2128..af57f40 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1990,15 +1990,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+ struct hstate *h = &default_hstate;
+ unsigned long tmp;
+ int ret;
++ ctl_table_no_const hugetlb_table;
+
+ tmp = h->max_huge_pages;
+
+ if (write && h->order >= MAX_ORDER)
+ return -EINVAL;
+
+- table->data = &tmp;
+- table->maxlen = sizeof(unsigned long);
+- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
++ hugetlb_table = *table;
++ hugetlb_table.data = &tmp;
++ hugetlb_table.maxlen = sizeof(unsigned long);
++ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
+ if (ret)
+ goto out;
+
+@@ -2055,15 +2057,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+ struct hstate *h = &default_hstate;
+ unsigned long tmp;
+ int ret;
++ ctl_table_no_const hugetlb_table;
+
+ tmp = h->nr_overcommit_huge_pages;
+
+ if (write && h->order >= MAX_ORDER)
+ return -EINVAL;
+
+- table->data = &tmp;
+- table->maxlen = sizeof(unsigned long);
+- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
++ hugetlb_table = *table;
++ hugetlb_table.data = &tmp;
++ hugetlb_table.maxlen = sizeof(unsigned long);
++ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
+ if (ret)
+ goto out;
+
+@@ -2482,6 +2486,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ return 1;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct vm_area_struct *vma_m;
++ unsigned long address_m;
++ pte_t *ptep_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
++ get_page(page_m);
++ hugepage_add_anon_rmap(page_m, vma_m, address_m);
++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
++}
++#endif
++
+ /*
+ * Hugetlb_cow() should be called with page lock of the original hugepage held.
+ */
+@@ -2584,6 +2609,11 @@ retry_avoidcopy:
+ make_huge_pte(vma, new_page, 1));
+ page_remove_rmap(old_page);
+ hugepage_add_new_anon_rmap(new_page, vma, address);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, new_page);
++#endif
++
+ /* Make the old page be freed below */
+ new_page = old_page;
+ mmu_notifier_invalidate_range_end(mm,
+@@ -2735,6 +2765,10 @@ retry:
+ && (vma->vm_flags & VM_SHARED)));
+ set_huge_pte_at(mm, address, ptep, new_pte);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_huge_pte(vma, address, page);
++#endif
++
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ /* Optimization, do the COW without a second fault */
+ ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+@@ -2764,6 +2798,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+ struct hstate *h = hstate_vma(vma);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ ptep = huge_pte_offset(mm, address);
+ if (ptep) {
+ entry = huge_ptep_get(ptep);
+@@ -2775,6 +2813,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ VM_FAULT_SET_HINDEX(h - hstates);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ h = hstate_vma(vma);
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
++ return VM_FAULT_OOM;
++ address_m &= HPAGE_MASK;
++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
++ }
++#endif
++
+ ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+ if (!ptep)
+ return VM_FAULT_OOM;
+diff --git a/mm/internal.h b/mm/internal.h
+index 0c26b5e..1cc340f 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
+ * in mm/page_alloc.c
+ */
+ extern void __free_pages_bootmem(struct page *page, unsigned int order);
++extern void free_compound_page(struct page *page);
+ extern void prep_compound_page(struct page *page, unsigned long order);
+ #ifdef CONFIG_MEMORY_FAILURE
+ extern bool is_free_buddy_page(struct page *page);
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index f3b2a00..5899e43 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
+
+ for (i = 0; i < object->trace_len; i++) {
+ void *ptr = (void *)object->trace[i];
+- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
++ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
+ }
+ }
+
+@@ -1745,7 +1745,7 @@ static int __init kmemleak_late_init(void)
+ return -ENOMEM;
+ }
+
+- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
++ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
+ &kmemleak_fops);
+ if (!dentry)
+ pr_warning("Failed to create the debugfs kmemleak file\n");
+diff --git a/mm/maccess.c b/mm/maccess.c
+index d53adf9..03a24bf 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dst,
+- (__force const void __user *)src, size);
++ (const void __force_user *)src, size);
+ pagefault_enable();
+ set_fs(old_fs);
+
+@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
+
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
++ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
+ pagefault_enable();
+ set_fs(old_fs);
+
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 23d3a6b..e10d35a 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
+ pgoff_t pgoff;
+ unsigned long new_flags = vma->vm_flags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ switch (behavior) {
+ case MADV_NORMAL:
+ new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
+@@ -111,6 +115,13 @@ success:
+ /*
+ * vm_flags is protected by the mmap_sem held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
++#endif
++
+ vma->vm_flags = new_flags;
+
+ out:
+@@ -169,6 +180,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
+ struct vm_area_struct ** prev,
+ unsigned long start, unsigned long end)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ *prev = vma;
+ if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+ return -EINVAL;
+@@ -181,6 +197,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
+ zap_page_range(vma, start, end - start, &details);
+ } else
+ zap_page_range(vma, start, end - start, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
++ struct zap_details details = {
++ .nonlinear_vma = vma_m,
++ .last_index = ULONG_MAX,
++ };
++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
++ } else
++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
++ }
++#endif
++
+ return 0;
+ }
+
+@@ -386,6 +417,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+ if (end < start)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ goto out;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ goto out;
++
+ error = 0;
+ if (end == start)
+ goto out;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 1b03878..d62c02b 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
+
+ int sysctl_memory_failure_recovery __read_mostly = 1;
+
+-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
+
+ #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+
+@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_MCEERR_AO;
+- si.si_addr = (void *)addr;
++ si.si_addr = (void __user *)addr;
+ #ifdef __ARCH_SI_TRAPNO
+ si.si_trapno = trapno;
+ #endif
+@@ -750,7 +750,7 @@ static struct page_state {
+ unsigned long res;
+ char *msg;
+ int (*action)(struct page *p, unsigned long pfn);
+-} error_states[] = {
++} __do_const error_states[] = {
+ { reserved, reserved, "reserved kernel", me_kernel },
+ /*
+ * free pages are specially detected outside this table:
+@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ }
+
+ nr_pages = 1 << compound_trans_order(hpage);
+- atomic_long_add(nr_pages, &mce_bad_pages);
++ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
+
+ /*
+ * We need/can do nothing about count=0 pages.
+@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ if (!PageHWPoison(hpage)
+ || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ return 0;
+ }
+ set_page_hwpoison_huge_page(hpage);
+@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ }
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(p))
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ unlock_page(hpage);
+ put_page(hpage);
+ return 0;
+@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
+ return 0;
+ }
+ if (TestClearPageHWPoison(p))
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ return 0;
+ }
+@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
+ */
+ if (TestClearPageHWPoison(page)) {
+ pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
+- atomic_long_sub(nr_pages, &mce_bad_pages);
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
+ freeit = 1;
+ if (PageHuge(page))
+ clear_page_hwpoison_huge_page(page);
+@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ }
+ done:
+ if (!PageHWPoison(hpage))
+- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
++ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ /* keep elevated page count for bad page */
+@@ -1581,7 +1581,7 @@ int soft_offline_page(struct page *page, int flags)
+ return ret;
+
+ done:
+- atomic_long_add(1, &mce_bad_pages);
++ atomic_long_add_unchecked(1, &mce_bad_pages);
+ SetPageHWPoison(page);
+ /* keep elevated page count for bad page */
+ return ret;
+diff --git a/mm/memory.c b/mm/memory.c
+index d5f913b..6f403ad 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -462,8 +462,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ return;
+
+ pmd = pmd_offset(pud, start);
++
++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
++#endif
++
+ }
+
+ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -494,9 +498,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+ if (end - 1 > ceiling - 1)
+ return;
+
++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
+ pud = pud_offset(pgd, start);
+ pgd_clear(pgd);
+ pud_free_tlb(tlb, pud, start);
++#endif
++
+ }
+
+ /*
+@@ -1582,12 +1589,6 @@ no_page_table:
+ return page;
+ }
+
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return stack_guard_page_start(vma, addr) ||
+- stack_guard_page_end(vma, addr+PAGE_SIZE);
+-}
+-
+ /**
+ * __get_user_pages() - pin user pages in memory
+ * @tsk: task_struct of target task
+@@ -1660,10 +1661,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+- do {
++ while (nr_pages) {
+ struct vm_area_struct *vma;
+
+- vma = find_extend_vma(mm, start);
++ vma = find_vma(mm, start);
+ if (!vma && in_gate_area(mm, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ pgd_t *pgd;
+@@ -1711,7 +1712,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ goto next_page;
+ }
+
+- if (!vma ||
++ if (!vma || start < vma->vm_start ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+@@ -1738,11 +1739,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ int ret;
+ unsigned int fault_flags = 0;
+
+- /* For mlock, just skip the stack guard page. */
+- if (foll_flags & FOLL_MLOCK) {
+- if (stack_guard_page(vma, start))
+- goto next_page;
+- }
+ if (foll_flags & FOLL_WRITE)
+ fault_flags |= FAULT_FLAG_WRITE;
+ if (nonblocking)
+@@ -1816,7 +1812,7 @@ next_page:
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+- } while (nr_pages);
++ }
+ return i;
+ }
+ EXPORT_SYMBOL(__get_user_pages);
+@@ -2023,6 +2019,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+ page_add_file_rmap(page);
+ set_pte_at(mm, addr, pte, mk_pte(page, prot));
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_file_pte(vma, addr, page, ptl);
++#endif
++
+ retval = 0;
+ pte_unmap_unlock(pte, ptl);
+ return retval;
+@@ -2057,10 +2057,22 @@ out:
+ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+ if (!page_count(page))
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m)
++ vma_m->vm_flags |= VM_INSERTPAGE;
++#endif
++
+ vma->vm_flags |= VM_INSERTPAGE;
+ return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+@@ -2146,6 +2158,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn)
+ {
+ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
++ BUG_ON(vma->vm_mirror);
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+@@ -2400,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+
+ BUG_ON(pud_huge(*pud));
+
+- pmd = pmd_alloc(mm, pud, addr);
++ pmd = (mm == &init_mm) ?
++ pmd_alloc_kernel(mm, pud, addr) :
++ pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+@@ -2420,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long next;
+ int err;
+
+- pud = pud_alloc(mm, pgd, addr);
++ pud = (mm == &init_mm) ?
++ pud_alloc_kernel(mm, pgd, addr) :
++ pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+@@ -2508,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+ copy_user_highpage(dst, src, va, vma);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ spinlock_t *ptl;
++ pte_t *pte, entry;
++
++ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
++ entry = *pte;
++ if (!pte_present(entry)) {
++ if (!pte_none(entry)) {
++ BUG_ON(pte_file(entry));
++ free_swap_and_cache(pte_to_swp_entry(entry));
++ pte_clear_not_present_full(mm, address, pte, 0);
++ }
++ } else {
++ struct page *page;
++
++ flush_cache_page(vma, address, pte_pfn(entry));
++ entry = ptep_clear_flush(vma, address, pte);
++ BUG_ON(pte_dirty(entry));
++ page = vm_normal_page(vma, address, entry);
++ if (page) {
++ update_hiwater_rss(mm);
++ if (PageAnon(page))
++ dec_mm_counter_fast(mm, MM_ANONPAGES);
++ else
++ dec_mm_counter_fast(mm, MM_FILEPAGES);
++ page_remove_rmap(page);
++ page_cache_release(page);
++ }
++ }
++ pte_unmap_unlock(pte, ptl);
++}
++
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * the ptl of the lower mapped page is held on entry and is not released on exit
++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || !PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(!PageLocked(page_m));
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_anon_rmap(page_m, vma_m, address_m);
++ inc_mm_counter_fast(mm, MM_ANONPAGES);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++ unlock_page(page_m);
++}
++
++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ BUG_ON(!page_m || PageAnon(page_m));
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
++ page_cache_get(page_m);
++ page_add_file_rmap(page_m);
++ inc_mm_counter_fast(mm, MM_FILEPAGES);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++}
++
++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long address_m;
++ spinlock_t *ptl_m;
++ struct vm_area_struct *vma_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m, entry_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++ if (!vma_m)
++ return;
++
++ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
++ address_m = address + SEGMEXEC_TASK_SIZE;
++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
++ pte_m = pte_offset_map(pmd_m, address_m);
++ ptl_m = pte_lockptr(mm, pmd_m);
++ if (ptl != ptl_m) {
++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
++ if (!pte_none(*pte_m))
++ goto out;
++ }
++
++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
++ set_pte_at(mm, address_m, pte_m, entry_m);
++out:
++ if (ptl != ptl_m)
++ spin_unlock(ptl_m);
++ pte_unmap(pte_m);
++}
++
++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
++{
++ struct page *page_m;
++ pte_t entry;
++
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
++ goto out;
++
++ entry = *pte;
++ page_m = vm_normal_page(vma, address, entry);
++ if (!page_m)
++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
++ else if (PageAnon(page_m)) {
++ if (pax_find_mirror_vma(vma)) {
++ pte_unmap_unlock(pte, ptl);
++ lock_page(page_m);
++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
++ if (pte_same(entry, *pte))
++ pax_mirror_anon_pte(vma, address, page_m, ptl);
++ else
++ unlock_page(page_m);
++ }
++ } else
++ pax_mirror_file_pte(vma, address, page_m, ptl);
++
++out:
++ pte_unmap_unlock(pte, ptl);
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -2719,6 +2916,12 @@ gotten:
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(new_page));
++#endif
++
+ if (old_page) {
+ if (!PageAnon(old_page)) {
+ dec_mm_counter_fast(mm, MM_FILEPAGES);
+@@ -2770,6 +2973,10 @@ gotten:
+ page_remove_rmap(old_page);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, new_page, ptl);
++#endif
++
+ /* Free the old page.. */
+ new_page = old_page;
+ ret |= VM_FAULT_WRITE;
+@@ -3049,6 +3256,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ swap_free(entry);
+ if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ try_to_free_swap(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
++#endif
++
+ unlock_page(page);
+ if (swapcache) {
+ /*
+@@ -3072,6 +3284,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ out:
+@@ -3091,40 +3308,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_{down|up}wards()",
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
+- * doesn't hit another vma.
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- expand_downwards(vma, address - PAGE_SIZE);
+- }
+- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+- struct vm_area_struct *next = vma->vm_next;
+-
+- /* As VM_GROWSDOWN but s/below/above/ */
+- if (next && next->vm_start == address + PAGE_SIZE)
+- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+-
+- expand_upwards(vma, address + PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -3133,27 +3316,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
+ unsigned int flags)
+ {
+- struct page *page;
++ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t entry;
+
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -3172,6 +3351,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (!pte_none(*page_table))
+ goto release;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ inc_mm_counter_fast(mm, MM_ANONPAGES);
+ page_add_new_anon_rmap(page, vma, address);
+ setpte:
+@@ -3179,6 +3363,12 @@ setpte:
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++#endif
++
+ unlock:
+ pte_unmap_unlock(page_table, ptl);
+ return 0;
+@@ -3322,6 +3512,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ */
+ /* Only go through if we didn't race with anybody else... */
+ if (likely(pte_same(*page_table, orig_pte))) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon && pax_find_mirror_vma(vma))
++ BUG_ON(!trylock_page(page));
++#endif
++
+ flush_icache_page(vma, page);
+ entry = mk_pte(page, vma->vm_page_prot);
+ if (flags & FAULT_FLAG_WRITE)
+@@ -3341,6 +3537,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+
+ /* no need to invalidate: a not-present page won't be cached */
+ update_mmu_cache(vma, address, page_table);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (anon)
++ pax_mirror_anon_pte(vma, address, page, ptl);
++ else
++ pax_mirror_file_pte(vma, address, page, ptl);
++#endif
++
+ } else {
+ if (cow_page)
+ mem_cgroup_uncharge_page(cow_page);
+@@ -3494,6 +3698,12 @@ int handle_pte_fault(struct mm_struct *mm,
+ if (flags & FAULT_FLAG_WRITE)
+ flush_tlb_fix_spurious_fault(vma, address);
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_mirror_pte(vma, address, pte, pmd, ptl);
++ return 0;
++#endif
++
+ unlock:
+ pte_unmap_unlock(pte, ptl);
+ return 0;
+@@ -3510,6 +3720,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ pmd_t *pmd;
+ pte_t *pte;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ __set_current_state(TASK_RUNNING);
+
+ count_vm_event(PGFAULT);
+@@ -3521,6 +3735,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ unsigned long address_m;
++ pgd_t *pgd_m;
++ pud_t *pud_m;
++ pmd_t *pmd_m;
++
++ if (vma->vm_start > vma_m->vm_start) {
++ address_m = address;
++ address -= SEGMEXEC_TASK_SIZE;
++ vma = vma_m;
++ } else
++ address_m = address + SEGMEXEC_TASK_SIZE;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pud_m = pud_alloc(mm, pgd_m, address_m);
++ if (!pud_m)
++ return VM_FAULT_OOM;
++ pmd_m = pmd_alloc(mm, pud_m, address_m);
++ if (!pmd_m)
++ return VM_FAULT_OOM;
++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
++ return VM_FAULT_OOM;
++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
++ }
++#endif
++
+ retry:
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+@@ -3562,7 +3804,7 @@ retry:
+ * run pte_offset_map on the pmd, if an huge pmd could
+ * materialize from under us from a different thread.
+ */
+- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+ /* if an huge pmd materialized from under us just retry later */
+ if (unlikely(pmd_trans_huge(*pmd)))
+@@ -3599,6 +3841,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+ }
++
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ pud_t *new = pud_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&mm->page_table_lock);
++ if (pgd_present(*pgd)) /* Another has populated it */
++ pud_free(mm, new);
++ else
++ pgd_populate_kernel(mm, pgd, new);
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
+ #endif /* __PAGETABLE_PUD_FOLDED */
+
+ #ifndef __PAGETABLE_PMD_FOLDED
+@@ -3629,11 +3888,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+ }
++
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
++{
++ pmd_t *new = pmd_alloc_one(mm, address);
++ if (!new)
++ return -ENOMEM;
++
++ smp_wmb(); /* See comment in __pte_alloc */
++
++ spin_lock(&mm->page_table_lock);
++#ifndef __ARCH_HAS_4LEVEL_HACK
++ if (pud_present(*pud)) /* Another has populated it */
++ pmd_free(mm, new);
++ else
++ pud_populate_kernel(mm, pud, new);
++#else
++ if (pgd_present(*pud)) /* Another has populated it */
++ pmd_free(mm, new);
++ else
++ pgd_populate_kernel(mm, pud, new);
++#endif /* __ARCH_HAS_4LEVEL_HACK */
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
+ #endif /* __PAGETABLE_PMD_FOLDED */
+
+-int make_pages_present(unsigned long addr, unsigned long end)
++ssize_t make_pages_present(unsigned long addr, unsigned long end)
+ {
+- int ret, len, write;
++ ssize_t ret, len, write;
+ struct vm_area_struct * vma;
+
+ vma = find_vma(current->mm, addr);
+@@ -3666,7 +3949,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+@@ -3806,8 +4089,8 @@ out:
+ return ret;
+ }
+
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+- void *buf, int len, int write)
++ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
++ void *buf, size_t len, int write)
+ {
+ resource_size_t phys_addr;
+ unsigned long prot = 0;
+@@ -3832,8 +4115,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ * Access another process' address space as given in mm. If non-NULL, use the
+ * given task for page fault accounting.
+ */
+-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+- unsigned long addr, void *buf, int len, int write)
++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
++ unsigned long addr, void *buf, size_t len, int write)
+ {
+ struct vm_area_struct *vma;
+ void *old_buf = buf;
+@@ -3841,7 +4124,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ down_read(&mm->mmap_sem);
+ /* ignore errors, just check how much was successfully transferred */
+ while (len) {
+- int bytes, ret, offset;
++ ssize_t bytes, ret, offset;
+ void *maddr;
+ struct page *page = NULL;
+
+@@ -3900,8 +4183,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ *
+ * The caller must hold a reference on @mm.
+ */
+-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+- void *buf, int len, int write)
++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++ void *buf, size_t len, int write)
+ {
+ return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ }
+@@ -3911,11 +4194,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ * Source/target buffer must be kernel space,
+ * Do not walk the page table directly, use get_user_pages
+ */
+-int access_process_vm(struct task_struct *tsk, unsigned long addr,
+- void *buf, int len, int write)
++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
++ void *buf, size_t len, int write)
+ {
+ struct mm_struct *mm;
+- int ret;
++ ssize_t ret;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4d1e637..9e0a005 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ unsigned long vmstart;
+ unsigned long vmend;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++#endif
++
+ vma = find_vma_prev(mm, start, &prev);
+ if (!vma || vma->vm_start > start)
+ return -EFAULT;
+@@ -693,6 +697,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ err = vma_replace_policy(vma, new_pol);
+ if (err)
+ goto out;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++ if (vma_m) {
++ err = vma_replace_policy(vma_m, new_pol);
++ if (err)
++ goto out;
++ }
++#endif
++
+ }
+
+ out:
+@@ -1126,6 +1140,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+
+ if (end < start)
+ return -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (end == start)
+ return 0;
+
+@@ -1344,6 +1369,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+ if (!mm)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (mm != current->mm &&
++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+@@ -1353,8 +1386,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+- cred->uid != tcred->suid && cred->uid != tcred->uid &&
+- !capable(CAP_SYS_NICE)) {
++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 09d6a9d..c514c22 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1389,6 +1389,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+ if (!mm)
+ return -EINVAL;
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (mm != current->mm &&
++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
++ err = -EPERM;
++ goto out;
++ }
++#endif
++
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+@@ -1398,8 +1406,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+- cred->uid != tcred->suid && cred->uid != tcred->uid &&
+- !capable(CAP_SYS_NICE)) {
++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ err = -EPERM;
+ goto out;
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 4f4f53b..dbc8aec 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -13,6 +13,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/mempolicy.h>
+ #include <linux/syscalls.h>
++#include <linux/security.h>
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/rmap.h>
+@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
+ {
+ unsigned long nstart, end, tmp;
+ struct vm_area_struct * vma, * prev;
+- int error;
++ int error = 0;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(len != PAGE_ALIGN(len));
+@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
+ return -EINVAL;
+ if (end == start)
+ return 0;
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma_prev(current->mm, start, &prev);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
+ for (nstart = start ; ; ) {
+ vm_flags_t newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
++
+ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
+
+ newflags = vma->vm_flags | VM_LOCKED;
+@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
+ lock_limit >>= PAGE_SHIFT;
+
+ /* check against resource limits */
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
+ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
+ error = do_mlock(start, len, 1);
+ up_write(&current->mm->mmap_sem);
+@@ -523,23 +533,29 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
+ static int do_mlockall(int flags)
+ {
+ struct vm_area_struct * vma, * prev = NULL;
+- unsigned int def_flags = 0;
+
+ if (flags & MCL_FUTURE)
+- def_flags = VM_LOCKED;
+- current->mm->def_flags = def_flags;
++ current->mm->def_flags |= VM_LOCKED;
++ else
++ current->mm->def_flags &= ~VM_LOCKED;
+ if (flags == MCL_FUTURE)
+ goto out;
+
+ for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+ vm_flags_t newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
++ break;
++#endif
++
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+
+ /* Ignore errors */
+ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
++ cond_resched();
+ }
+ out:
+ return 0;
+@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+ lock_limit >>= PAGE_SHIFT;
+
+ ret = -ENOMEM;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
+ if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
+ capable(CAP_IPC_LOCK))
+ ret = do_mlockall(flags);
+diff --git a/mm/mm_init.c b/mm/mm_init.c
+index 1ffd97a..240aa20 100644
+--- a/mm/mm_init.c
++++ b/mm/mm_init.c
+@@ -11,6 +11,17 @@
+ #include <linux/export.h>
+ #include "internal.h"
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++bool pax_sanitize_slab __read_only = true;
++static int __init pax_sanitize_slab_setup(char *str)
++{
++ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
++ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
++ return 1;
++}
++__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
++#endif
++
+ #ifdef CONFIG_DEBUG_MEMORY_INIT
+ int mminit_loglevel;
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index dff37a6..49e182f 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -30,6 +30,7 @@
+ #include <linux/perf_event.h>
+ #include <linux/audit.h>
+ #include <linux/khugepaged.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -46,6 +47,16 @@
+ #define arch_rebalance_pgtables(addr, len) (addr)
+ #endif
+
++static inline void verify_mm_writelocked(struct mm_struct *mm)
++{
++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
++ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
++ up_read(&mm->mmap_sem);
++ BUG();
++ }
++#endif
++}
++
+ static void unmap_region(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct vm_area_struct *prev,
+ unsigned long start, unsigned long end);
+@@ -71,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ */
+-pgprot_t protection_map[16] = {
++pgprot_t protection_map[16] __read_only = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+
+-pgprot_t vm_get_page_prot(unsigned long vm_flags)
++pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+ {
+- return __pgprot(pgprot_val(protection_map[vm_flags &
++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+ pgprot_val(arch_vm_get_page_prot(vm_flags)));
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if (!(__supported_pte_mask & _PAGE_NX) &&
++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
++ (vm_flags & (VM_READ | VM_WRITE)))
++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
++#endif
++
++ return prot;
+ }
+ EXPORT_SYMBOL(vm_get_page_prot);
+
+ int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ /*
+ * Make sure vm_committed_as in one cacheline and not cacheline shared with
+ * other variables. It can be updated by several CPUs frequently.
+@@ -228,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+ struct vm_area_struct *next = vma->vm_next;
+
+ might_sleep();
++ BUG_ON(vma->vm_mirror);
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file) {
+@@ -272,6 +294,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ * not page aligned -Ram Gupta
+ */
+ rlim = rlimit(RLIMIT_DATA);
++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
+ if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+ (mm->end_data - mm->start_data) > rlim)
+ goto out;
+@@ -689,6 +712,12 @@ static int
+ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+ if (vma->vm_pgoff == vm_pgoff)
+@@ -708,6 +737,12 @@ static int
+ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
++ return 0;
++#endif
++
+ if (is_mergeable_vma(vma, file, vm_flags) &&
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
+ pgoff_t vm_pglen;
+@@ -750,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags,
+- struct anon_vma *anon_vma, struct file *file,
++ struct anon_vma *anon_vma, struct file *file,
+ pgoff_t pgoff, struct mempolicy *policy)
+ {
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
+ struct vm_area_struct *area, *next;
+ int err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
++
++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
++#endif
++
+ /*
+ * We later require that vma->vm_flags == vm_flags,
+ * so this tests vma->vm_flags & VM_SPECIAL, too.
+@@ -772,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ next = next->vm_next;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (prev)
++ prev_m = pax_find_mirror_vma(prev);
++ if (area)
++ area_m = pax_find_mirror_vma(area);
++ if (next)
++ next_m = pax_find_mirror_vma(next);
++#endif
++
+ /*
+ * Can it merge with the predecessor?
+ */
+@@ -791,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ /* cases 1, 6 */
+ err = vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL);
+- } else /* cases 2, 5, 7 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ next_m->vm_end, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 2, 5, 7 */
+ err = vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ end_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ }
+ if (err)
+ return NULL;
+ khugepaged_enter_vma_merge(prev);
+@@ -807,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+ mpol_equal(policy, vma_policy(next)) &&
+ can_vma_merge_before(next, vm_flags,
+ anon_vma, file, pgoff+pglen)) {
+- if (prev && addr < prev->vm_end) /* case 4 */
++ if (prev && addr < prev->vm_end) { /* case 4 */
+ err = vma_adjust(prev, prev->vm_start,
+ addr, prev->vm_pgoff, NULL);
+- else /* cases 3, 8 */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && prev_m)
++ err = vma_adjust(prev_m, prev_m->vm_start,
++ addr_m, prev_m->vm_pgoff, NULL);
++#endif
++
++ } else { /* cases 3, 8 */
+ err = vma_adjust(area, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && area_m)
++ err = vma_adjust(area_m, addr_m, next_m->vm_end,
++ next_m->vm_pgoff - pglen, NULL);
++#endif
++
++ }
+ if (err)
+ return NULL;
+ khugepaged_enter_vma_merge(area);
+@@ -921,15 +1002,22 @@ none:
+ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+ struct file *file, long pages)
+ {
+- const unsigned long stack_flags
+- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
++ mm->total_vm += pages;
+
+ if (file) {
+ mm->shared_vm += pages;
+ if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
+ mm->exec_vm += pages;
+- } else if (flags & stack_flags)
++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
+ mm->stack_vm += pages;
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
+ if (flags & (VM_RESERVED|VM_IO))
+ mm->reserved_vm += pages;
+ }
+@@ -955,7 +1043,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ * (the exception is when the underlying filesystem is noexec
+ * mounted, in which case we dont add PROT_EXEC.)
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
+ prot |= PROT_EXEC;
+
+@@ -981,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+- addr = get_unmapped_area(file, addr, len, pgoff, flags);
++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+@@ -992,6 +1080,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
++ mm->binfmt->handle_mmap)
++ mm->binfmt->handle_mmap(file);
++#endif
++
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
++ gr_log_rwxmmap(file);
++
++#ifdef CONFIG_PAX_EMUPLT
++ vm_flags &= ~VM_EXEC;
++#else
++ return -EPERM;
++#endif
++
++ }
++
++ if (!(vm_flags & VM_EXEC))
++ vm_flags &= ~VM_MAYEXEC;
++#else
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++ else
++ vm_flags &= ~VM_MAYWRITE;
++ }
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
++ vm_flags &= ~VM_PAGEEXEC;
++#endif
++
+ if (flags & MAP_LOCKED)
+ if (!can_do_mlock())
+ return -EPERM;
+@@ -1003,6 +1128,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ locked += mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ return -EAGAIN;
+ }
+@@ -1073,6 +1199,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ if (error)
+ return error;
+
++ if (!gr_acl_handle_mmap(file, prot))
++ return -EACCES;
++
+ return mmap_region(file, addr, len, flags, vm_flags, pgoff);
+ }
+ EXPORT_SYMBOL(do_mmap_pgoff);
+@@ -1153,7 +1282,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+ vm_flags_t vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+@@ -1202,17 +1331,32 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long charged = 0;
+ struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ /* Clear old maps */
+ error = -ENOMEM;
+-munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limit. */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
++#endif
++
+ if (!may_expand_vm(mm, len >> PAGE_SHIFT))
+ return -ENOMEM;
+
+@@ -1258,6 +1402,16 @@ munmap_back:
+ goto unacct_error;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto free_vma;
++ }
++ }
++#endif
++
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+@@ -1266,8 +1420,9 @@ munmap_back:
+ vma->vm_pgoff = pgoff;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+
++ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
++
+ if (file) {
+- error = -EINVAL;
+ if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
+ goto free_vma;
+ if (vm_flags & VM_DENYWRITE) {
+@@ -1281,6 +1436,19 @@ munmap_back:
+ error = file->f_op->mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m && (vm_flags & VM_EXECUTABLE))
++ added_exe_file_vma(mm);
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
++ vma->vm_flags |= VM_PAGEEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ }
++#endif
++
+ if (vm_flags & VM_EXECUTABLE)
+ added_exe_file_vma(mm);
+
+@@ -1293,6 +1461,8 @@ munmap_back:
+ pgoff = vma->vm_pgoff;
+ vm_flags = vma->vm_flags;
+ } else if (vm_flags & VM_SHARED) {
++ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
++ goto free_vma;
+ error = shmem_zero_setup(vma);
+ if (error)
+ goto free_vma;
+@@ -1316,14 +1486,19 @@ munmap_back:
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ file = vma->vm_file;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+ /* Once vma denies write, undo our temporary denial count */
+ if (correct_wcount)
+ atomic_inc(&inode->i_writecount);
+ out:
+ perf_event_mmap(vma);
+
+- mm->total_vm += len >> PAGE_SHIFT;
+ vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
++ track_exec_limit(mm, addr, addr + len, vm_flags);
+ if (vm_flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+ mm->locked_vm += (len >> PAGE_SHIFT);
+@@ -1341,6 +1516,12 @@ unmap_and_free_vma:
+ unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+ charged = 0;
+ free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ kmem_cache_free(vm_area_cachep, vma_m);
++#endif
++
+ kmem_cache_free(vm_area_cachep, vma);
+ unacct_error:
+ if (charged)
+@@ -1348,6 +1529,73 @@ unacct_error:
+ return error;
+ }
+
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
++ return ((random32() & 0xFF) + 1) << PAGE_SHIFT;
++
++ return 0;
++}
++#endif
++
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long *addr, unsigned long len, unsigned long offset)
++{
++ if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++ if (*addr > sysctl_heap_stack_gap)
++ vma = find_vma(current->mm, *addr - sysctl_heap_stack_gap);
++ else
++ vma = find_vma(current->mm, 0);
++ if (vma && (vma->vm_flags & VM_GROWSUP))
++ return false;
++#endif
++ return true;
++ }
++
++ if (*addr + len > vma->vm_start)
++ return false;
++
++ if (offset) {
++ if (vma->vm_prev && *addr == vma->vm_prev->vm_end && (vma->vm_start - len - vma->vm_prev->vm_end >= offset)) {
++ *addr = vma->vm_prev->vm_end + offset;
++ return true;
++ }
++ return offset <= vma->vm_start - *addr - len;
++ } else if (vma->vm_flags & VM_GROWSDOWN)
++ return sysctl_heap_stack_gap <= vma->vm_start - *addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
++ if (*addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap)
++ return true;
++ if (vma->vm_start - len - vma->vm_prev->vm_end >= sysctl_heap_stack_gap) {
++ *addr = vma->vm_start - len;
++ return true;
++ }
++ return false;
++ }
++#endif
++
++ return true;
++}
++
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
++{
++ if (vma->vm_start < len)
++ return -ENOMEM;
++
++ if (!(vma->vm_flags & VM_GROWSDOWN)) {
++ if (offset <= vma->vm_start - len)
++ return vma->vm_start - len - offset;
++ else
++ return -ENOMEM;
++ }
++
++ if (sysctl_heap_stack_gap <= vma->vm_start - len)
++ return vma->vm_start - len - sysctl_heap_stack_gap;
++ return -ENOMEM;
++}
++
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+@@ -1367,6 +1615,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+@@ -1374,18 +1623,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ return addr;
++ }
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -1396,34 +1650,40 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- addr = TASK_UNMAPPED_BASE;
+- start_addr = addr;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- /*
+- * Remember the place where we stopped the search:
+- */
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+ #endif
+
+ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
+ mm->free_area_cache = addr;
+ mm->cached_hole_size = ~0UL;
+ }
+@@ -1441,7 +1701,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+- unsigned long addr = addr0;
++ unsigned long base = mm->mmap_base, addr = addr0;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+@@ -1450,13 +1711,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (flags & MAP_FIXED)
+ return addr;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -1470,10 +1736,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+
+ if (mm->mmap_base < len)
+@@ -1488,7 +1755,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+@@ -1497,8 +1764,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -1507,13 +1774,21 @@ bottomup:
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
++ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
++ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+- mm->free_area_cache = mm->mmap_base;
++ mm->mmap_base = base;
++ mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+@@ -1522,6 +1797,12 @@ bottomup:
+
+ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
++ return;
++#endif
++
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+@@ -1529,8 +1810,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+ mm->free_area_cache = addr;
+
+ /* dont allow allocations above current base */
+- if (mm->free_area_cache > mm->mmap_base)
++ if (mm->free_area_cache > mm->mmap_base) {
+ mm->free_area_cache = mm->mmap_base;
++ mm->cached_hole_size = ~0UL;
++ }
+ }
+
+ unsigned long
+@@ -1603,40 +1886,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+
+ EXPORT_SYMBOL(find_vma);
+
+-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
++/*
++ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
++ */
+ struct vm_area_struct *
+ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+ struct vm_area_struct **pprev)
+ {
+- struct vm_area_struct *vma = NULL, *prev = NULL;
+- struct rb_node *rb_node;
+- if (!mm)
+- goto out;
+-
+- /* Guard against addr being lower than the first VMA */
+- vma = mm->mmap;
+-
+- /* Go through the RB tree quickly. */
+- rb_node = mm->mm_rb.rb_node;
+-
+- while (rb_node) {
+- struct vm_area_struct *vma_tmp;
+- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+-
+- if (addr < vma_tmp->vm_end) {
+- rb_node = rb_node->rb_left;
+- } else {
+- prev = vma_tmp;
+- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
+- break;
++ struct vm_area_struct *vma;
++
++ vma = find_vma(mm, addr);
++ if (vma) {
++ *pprev = vma->vm_prev;
++ } else {
++ struct rb_node *rb_node = mm->mm_rb.rb_node;
++ *pprev = NULL;
++ while (rb_node) {
++ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ rb_node = rb_node->rb_right;
+ }
+ }
++ return vma;
++}
++
++#ifdef CONFIG_PAX_SEGMEXEC
++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
++{
++ struct vm_area_struct *vma_m;
+
+-out:
+- *pprev = prev;
+- return prev ? prev->vm_next : vma;
++ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
++ BUG_ON(vma->vm_mirror);
++ return NULL;
++ }
++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
++ vma_m = vma->vm_mirror;
++ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
++ BUG_ON(vma->vm_file != vma_m->vm_file);
++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
++ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
++ return vma_m;
+ }
++#endif
+
+ /*
+ * Verify that the stack growth is acceptable and
+@@ -1654,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ return -ENOMEM;
+
+ /* Stack limit test */
++ gr_learn_resource(current, RLIMIT_STACK, size, 1);
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ return -ENOMEM;
+
+@@ -1664,6 +1958,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ locked = mm->locked_vm + grow;
+ limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+ limit >>= PAGE_SHIFT;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
+ if (locked > limit && !capable(CAP_IPC_LOCK))
+ return -ENOMEM;
+ }
+@@ -1682,7 +1977,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ return -ENOMEM;
+
+ /* Ok, everything looks good - let it rip */
+- mm->total_vm += grow;
+ if (vma->vm_flags & VM_LOCKED)
+ mm->locked_vm += grow;
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
+@@ -1694,37 +1988,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+ * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+ * vma is the last one with address > vma->vm_end. Have to extend vma.
+ */
++#ifndef CONFIG_IA64
++static
++#endif
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+ int error;
++ bool locknext;
+
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
++ /* Also guard against wrapping around to address 0. */
++ if (address < PAGE_ALIGN(address+1))
++ address = PAGE_ALIGN(address+1);
++ else
++ return -ENOMEM;
++
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+ */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
++ if (locknext && anon_vma_prepare(vma->vm_next))
++ return -ENOMEM;
+ vma_lock_anon_vma(vma);
++ if (locknext)
++ vma_lock_anon_vma(vma->vm_next);
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller
+ * is required to hold the mmap_sem in read mode. We need the
+- * anon_vma lock to serialize against concurrent expand_stacks.
+- * Also guard against wrapping around to address 0.
++ * anon_vma locks to serialize against concurrent expand_stacks
++ * and expand_upwards.
+ */
+- if (address < PAGE_ALIGN(address+4))
+- address = PAGE_ALIGN(address+4);
+- else {
+- vma_unlock_anon_vma(vma);
+- return -ENOMEM;
+- }
+ error = 0;
+
+ /* Somebody else might have raced and expanded it already */
+- if (address > vma->vm_end) {
++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
+ unsigned long size, grow;
+
+ size = address - vma->vm_start;
+@@ -1739,6 +2044,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ }
+ }
+ }
++ if (locknext)
++ vma_unlock_anon_vma(vma->vm_next);
+ vma_unlock_anon_vma(vma);
+ khugepaged_enter_vma_merge(vma);
+ return error;
+@@ -1752,6 +2059,8 @@ int expand_downwards(struct vm_area_struct *vma,
+ unsigned long address)
+ {
+ int error;
++ bool lockprev = false;
++ struct vm_area_struct *prev;
+
+ /*
+ * We must make sure the anon_vma is allocated
+@@ -1765,6 +2074,15 @@ int expand_downwards(struct vm_area_struct *vma,
+ if (error)
+ return error;
+
++ prev = vma->vm_prev;
++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
++ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
++#endif
++ if (lockprev && anon_vma_prepare(prev))
++ return -ENOMEM;
++ if (lockprev)
++ vma_lock_anon_vma(prev);
++
+ vma_lock_anon_vma(vma);
+
+ /*
+@@ -1774,9 +2092,17 @@ int expand_downwards(struct vm_area_struct *vma,
+ */
+
+ /* Somebody else might have raced and expanded it already */
+- if (address < vma->vm_start) {
++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
+ unsigned long size, grow;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m;
++
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ size = vma->vm_end - address;
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+
+@@ -1786,18 +2112,48 @@ int expand_downwards(struct vm_area_struct *vma,
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ vma_m->vm_start -= grow << PAGE_SHIFT;
++ vma_m->vm_pgoff -= grow;
++ }
++#endif
++
+ perf_event_mmap(vma);
+ }
+ }
+ }
+ vma_unlock_anon_vma(vma);
++ if (lockprev)
++ vma_unlock_anon_vma(prev);
+ khugepaged_enter_vma_merge(vma);
+ return error;
+ }
+
++/*
++ * Note how expand_stack() refuses to expand the stack all the way to
++ * abut the next virtual mapping, *unless* that mapping itself is also
++ * a stack mapping. We want to leave room for a guard page, after all
++ * (the guard page itself is not added here, that is done by the
++ * actual page faulting logic)
++ *
++ * This matches the behavior of the guard page logic (see mm/memory.c:
++ * check_stack_guard_page()), which only allows the guard page to be
++ * removed under these circumstances.
++ */
+ #ifdef CONFIG_STACK_GROWSUP
+ int expand_stack(struct vm_area_struct *vma, unsigned long address)
+ {
++ struct vm_area_struct *next;
++
++ address &= PAGE_MASK;
++ next = vma->vm_next;
++ if (next && next->vm_start == address + PAGE_SIZE) {
++ if (!(next->vm_flags & VM_GROWSUP))
++ return -ENOMEM;
++ }
+ return expand_upwards(vma, address);
+ }
+
+@@ -1820,6 +2176,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+ #else
+ int expand_stack(struct vm_area_struct *vma, unsigned long address)
+ {
++ struct vm_area_struct *prev;
++
++ address &= PAGE_MASK;
++ prev = vma->vm_prev;
++ if (prev && prev->vm_end == address) {
++ if (!(prev->vm_flags & VM_GROWSDOWN))
++ return -ENOMEM;
++ }
+ return expand_downwards(vma, address);
+ }
+
+@@ -1860,7 +2224,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+ do {
+ long nrpages = vma_pages(vma);
+
+- mm->total_vm -= nrpages;
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
++ vma = remove_vma(vma);
++ continue;
++ }
++#endif
++
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+ vma = remove_vma(vma);
+ } while (vma);
+@@ -1905,6 +2275,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ vma->vm_prev = NULL;
+ do {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma->vm_mirror) {
++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
++ vma->vm_mirror->vm_mirror = NULL;
++ vma->vm_mirror->vm_flags &= ~VM_EXEC;
++ vma->vm_mirror = NULL;
++ }
++#endif
++
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
+ tail_vma = vma;
+@@ -1933,14 +2313,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ struct vm_area_struct *new;
+ int err = -ENOMEM;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m, *new_m = NULL;
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (is_vm_hugetlb_page(vma) && (addr &
+ ~(huge_page_mask(hstate_vma(vma)))))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ vma_m = pax_find_mirror_vma(vma);
++#endif
++
+ new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!new)
+ goto out_err;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
++ if (!new_m) {
++ kmem_cache_free(vm_area_cachep, new);
++ goto out_err;
++ }
++ }
++#endif
++
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
+
+@@ -1953,6 +2352,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m) {
++ *new_m = *vma_m;
++ INIT_LIST_HEAD(&new_m->anon_vma_chain);
++ new_m->vm_mirror = new;
++ new->vm_mirror = new_m;
++
++ if (new_below)
++ new_m->vm_end = addr_m;
++ else {
++ new_m->vm_start = addr_m;
++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
++ }
++ }
++#endif
++
+ pol = mpol_dup(vma_policy(vma));
+ if (IS_ERR(pol)) {
+ err = PTR_ERR(pol);
+@@ -1978,6 +2393,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ else
+ err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!err && vma_m) {
++ if (anon_vma_clone(new_m, vma_m))
++ goto out_free_mpol;
++
++ mpol_get(pol);
++ vma_set_policy(new_m, pol);
++
++ if (new_m->vm_file) {
++ get_file(new_m->vm_file);
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ added_exe_file_vma(mm);
++ }
++
++ if (new_m->vm_ops && new_m->vm_ops->open)
++ new_m->vm_ops->open(new_m);
++
++ if (new_below)
++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
++ else
++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
++
++ if (err) {
++ if (new_m->vm_ops && new_m->vm_ops->close)
++ new_m->vm_ops->close(new_m);
++ if (new_m->vm_file) {
++ if (vma_m->vm_flags & VM_EXECUTABLE)
++ removed_exe_file_vma(mm);
++ fput(new_m->vm_file);
++ }
++ mpol_put(pol);
++ }
++ }
++#endif
++
+ /* Success. */
+ if (!err)
+ return 0;
+@@ -1990,10 +2441,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ removed_exe_file_vma(mm);
+ fput(new->vm_file);
+ }
+- unlink_anon_vmas(new);
+ out_free_mpol:
+ mpol_put(pol);
+ out_free_vma:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (new_m) {
++ unlink_anon_vmas(new_m);
++ kmem_cache_free(vm_area_cachep, new_m);
++ }
++#endif
++
++ unlink_anon_vmas(new);
+ kmem_cache_free(vm_area_cachep, new);
+ out_err:
+ return err;
+@@ -2006,6 +2465,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+ {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
++ if (mm->map_count >= sysctl_max_map_count-1)
++ return -ENOMEM;
++ } else
++#endif
++
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+@@ -2017,11 +2485,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ * work. This now handles partial unmappings.
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
++#ifdef CONFIG_PAX_SEGMEXEC
+ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+ {
++ int ret = __do_munmap(mm, start, len);
++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
++ return ret;
++
++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
++}
++
++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#else
++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
++#endif
++{
+ unsigned long end;
+ struct vm_area_struct *vma, *prev, *last;
+
++ /*
++ * mm->mmap_sem is required to protect against another thread
++ * changing the mappings in case we sleep.
++ */
++ verify_mm_writelocked(mm);
++
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+@@ -2096,6 +2583,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+ /* Fix up all other VM information */
+ remove_vma_list(mm, vma);
+
++ track_exec_limit(mm, start, end, 0UL);
++
+ return 0;
+ }
+
+@@ -2108,22 +2597,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+
+ profile_munmap(addr);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ return ret;
+ }
+
+-static inline void verify_mm_writelocked(struct mm_struct *mm)
+-{
+-#ifdef CONFIG_DEBUG_VM
+- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+- WARN_ON(1);
+- up_read(&mm->mmap_sem);
+- }
+-#endif
+-}
+-
+ /*
+ * this is really a simplified "do_mmap". it only handles
+ * anonymous maps. eventually we may be able to do some
+@@ -2137,6 +2622,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+ struct rb_node ** rb_link, * rb_parent;
+ pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
++ unsigned long charged;
+
+ len = PAGE_ALIGN(len);
+ if (!len)
+@@ -2148,16 +2634,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (error & ~PAGE_MASK)
+ return error;
+
++ charged = len >> PAGE_SHIFT;
++
+ /*
+ * mlock MCL_FUTURE?
+ */
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+- locked = len >> PAGE_SHIFT;
++ locked = charged;
+ locked += mm->locked_vm;
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
+@@ -2174,22 +2674,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+ /*
+ * Clear old maps. this also does some error checking for us
+ */
+- munmap_back:
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+ if (vma && vma->vm_start < addr + len) {
+ if (do_munmap(mm, addr, len))
+ return -ENOMEM;
+- goto munmap_back;
++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
++ BUG_ON(vma && vma->vm_start < addr + len);
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
+- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
++ if (!may_expand_vm(mm, charged))
+ return -ENOMEM;
+
+ if (mm->map_count > sysctl_max_map_count)
+ return -ENOMEM;
+
+- if (security_vm_enough_memory(len >> PAGE_SHIFT))
++ if (security_vm_enough_memory(charged))
+ return -ENOMEM;
+
+ /* Can we just expand an old private anonymous mapping? */
+@@ -2203,7 +2703,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+ */
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma) {
+- vm_unacct_memory(len >> PAGE_SHIFT);
++ vm_unacct_memory(charged);
+ return -ENOMEM;
+ }
+
+@@ -2217,11 +2717,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+ perf_event_mmap(vma);
+- mm->total_vm += len >> PAGE_SHIFT;
++ mm->total_vm += charged;
+ if (flags & VM_LOCKED) {
+ if (!mlock_vma_pages_range(vma, addr, addr + len))
+- mm->locked_vm += (len >> PAGE_SHIFT);
++ mm->locked_vm += charged;
+ }
++ track_exec_limit(mm, addr, addr + len, flags);
+ return addr;
+ }
+
+@@ -2268,8 +2769,10 @@ void exit_mmap(struct mm_struct *mm)
+ * Walk the list again, actually closing and freeing it,
+ * with preemption enabled, without holding any MM locks.
+ */
+- while (vma)
++ while (vma) {
++ vma->vm_mirror = NULL;
+ vma = remove_vma(vma);
++ }
+
+ BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+ }
+@@ -2283,6 +2786,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++#endif
++
++ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
++ return -EPERM;
++
+ /*
+ * The vm_pgoff of a purely anonymous vma should be irrelevant
+ * until its first write fault, when page's anon_vma and index
+@@ -2305,7 +2815,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ security_vm_enough_memory_mm(mm, vma_pages(vma)))
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m)
++ return -ENOMEM;
++ }
++#endif
++
+ vma_link(mm, vma, prev, rb_link, rb_parent);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (vma_m)
++ BUG_ON(pax_mirror_vma(vma_m, vma));
++#endif
++
+ return 0;
+ }
+
+@@ -2323,6 +2848,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ struct rb_node **rb_link, *rb_parent;
+ struct mempolicy *pol;
+
++ BUG_ON(vma->vm_mirror);
++
+ /*
+ * If anonymous vma has not yet been faulted, update new pgoff
+ * to match new location, to increase its chance of merging.
+@@ -2373,6 +2900,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ return NULL;
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
++{
++ struct vm_area_struct *prev_m;
++ struct rb_node **rb_link_m, *rb_parent_m;
++ struct mempolicy *pol_m;
++
++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
++ *vma_m = *vma;
++ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
++ if (anon_vma_clone(vma_m, vma))
++ return -ENOMEM;
++ pol_m = vma_policy(vma_m);
++ mpol_get(pol_m);
++ vma_set_policy(vma_m, pol_m);
++ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
++ if (vma_m->vm_file)
++ get_file(vma_m->vm_file);
++ if (vma_m->vm_ops && vma_m->vm_ops->open)
++ vma_m->vm_ops->open(vma_m);
++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
++ vma_m->vm_mirror = vma;
++ vma->vm_mirror = vma_m;
++ return 0;
++}
++#endif
++
+ /*
+ * Return true if the calling process may expand its vm space by the passed
+ * number of pages
+@@ -2384,6 +2944,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+
+ lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
+
++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
+ if (cur + npages > lim)
+ return 0;
+ return 1;
+@@ -2454,6 +3015,22 @@ int install_special_mapping(struct mm_struct *mm,
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->pax_flags & MF_PAX_MPROTECT) {
++#ifndef CONFIG_PAX_MPROTECT_COMPAT
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
++ return -EPERM;
++ if (!(vm_flags & VM_EXEC))
++ vm_flags &= ~VM_MAYEXEC;
++#else
++ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++#endif
++ else
++ vm_flags &= ~VM_MAYWRITE;
++ }
++#endif
++
+ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 5a688a2..fffb9f6 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -23,10 +23,16 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/perf_event.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+
+ #ifndef pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
+ flush_tlb_range(vma, start, end);
+ }
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing except stack expansion */
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++ unsigned long oldlimit, newlimit = 0UL;
++
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
++ return;
++
++ spin_lock(&mm->page_table_lock);
++ oldlimit = mm->context.user_cs_limit;
++ if ((prot & VM_EXEC) && oldlimit < end)
++ /* USER_CS limit moved up */
++ newlimit = end;
++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++ /* USER_CS limit moved down */
++ newlimit = start;
++
++ if (newlimit) {
++ mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++ wmb();
++ cpus_clear(mm->context.cpu_user_cs_mask);
++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
++ }
++ spin_unlock(&mm->page_table_lock);
++ if (newlimit == end) {
++ struct vm_area_struct *vma = find_vma(mm, oldlimit);
++
++ for (; vma && vma->vm_start < end; vma = vma->vm_next)
++ if (is_vm_hugetlb_page(vma))
++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++ else
++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
++ }
++}
++#endif
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ int error;
+ int dirty_accountable = 0;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = NULL;
++ unsigned long start_m, end_m;
++
++ start_m = start + SEGMEXEC_TASK_SIZE;
++ end_m = end + SEGMEXEC_TASK_SIZE;
++#endif
++
+ if (newflags == oldflags) {
+ *pprev = vma;
+ return 0;
+ }
+
++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++ return -ENOMEM;
++
++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++ return -ENOMEM;
++ }
++
+ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
++ if (start != vma->vm_start) {
++ error = split_vma(mm, vma, start, 1);
++ if (error)
++ goto fail;
++ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
++ *pprev = (*pprev)->vm_next;
++ }
++
++ if (end != vma->vm_end) {
++ error = split_vma(mm, vma, end, 0);
++ if (error)
++ goto fail;
++ }
++
++ if (pax_find_mirror_vma(vma)) {
++ error = __do_munmap(mm, start_m, end_m - start_m);
++ if (error)
++ goto fail;
++ } else {
++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++ if (!vma_m) {
++ error = -ENOMEM;
++ goto fail;
++ }
++ vma->vm_flags = newflags;
++ error = pax_mirror_vma(vma_m, vma);
++ if (error) {
++ vma->vm_flags = oldflags;
++ goto fail;
++ }
++ }
++ }
++#endif
++
+ /*
+ * First try to merge with previous and/or next vma.
+ */
+@@ -204,9 +306,21 @@ success:
+ * vm_flags and vm_page_prot are protected by the mmap_sem
+ * held in write mode.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
++#endif
++
+ vma->vm_flags = newflags;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (mm->binfmt && mm->binfmt->handle_mprotect)
++ mm->binfmt->handle_mprotect(vma, newflags);
++#endif
++
+ vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
+- vm_get_page_prot(newflags));
++ vm_get_page_prot(vma->vm_flags));
+
+ if (vma_wants_writenotify(vma)) {
+ vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
+@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ end = start + len;
+ if (end <= start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (!arch_validate_prot(prot))
+ return -EINVAL;
+
+@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ vm_flags = calc_vm_prot_bits(prot);
+@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ if (start > vma->vm_start)
+ prev = vma;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
++ current->mm->binfmt->handle_mprotect(vma, vm_flags);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned long newflags;
+
+@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+
+ /* newflags >> 4 shift VM_MAY% in place of VM_% */
+ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
++ if (prot & (PROT_WRITE | PROT_EXEC))
++ gr_log_rwxmprotect(vma);
++
++ error = -EACCES;
++ goto out;
++ }
++
++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
+ error = -EACCES;
+ goto out;
+ }
+@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
+ if (error)
+ goto out;
++
++ track_exec_limit(current->mm, nstart, tmp, vm_flags);
++
+ nstart = tmp;
+
+ if (nstart < prev->vm_end)
+diff --git a/mm/mremap.c b/mm/mremap.c
+index d6959cb..c9e1e45 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+ continue;
+ pte = ptep_get_and_clear(mm, old_addr, old_pte);
+ pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
++ pte = pte_exprotect(pte);
++#endif
++
+ set_pte_at(mm, new_addr, new_pte, pte);
+ }
+
+@@ -251,7 +257,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
+ * If this were a serious issue, we'd add a flag to do_munmap().
+ */
+ hiwater_vm = mm->hiwater_vm;
+- mm->total_vm += new_len >> PAGE_SHIFT;
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
+
+ if (do_munmap(mm, old_addr, old_len) < 0) {
+@@ -290,6 +295,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
+ if (is_vm_hugetlb_page(vma))
+ goto Einval;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (pax_find_mirror_vma(vma))
++ goto Einval;
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto Efault;
+@@ -346,20 +356,25 @@ static unsigned long mremap_to(unsigned long addr,
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
+ unsigned long map_flags;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+
+- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+- if ((new_addr <= addr) && (new_addr+new_len) > addr)
+- goto out;
+-
+- if ((addr <= new_addr) && (addr+old_len) > new_addr)
++ if (addr + old_len > new_addr && new_addr + new_len > addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+@@ -431,6 +446,7 @@ unsigned long do_mremap(unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
++ unsigned long pax_task_size = TASK_SIZE;
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ goto out;
+@@ -449,6 +465,17 @@ unsigned long do_mremap(unsigned long addr,
+ if (!new_len)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ pax_task_size -= PAGE_SIZE;
++
++ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
++ old_len > pax_task_size || addr > pax_task_size-old_len)
++ goto out;
++
+ if (flags & MREMAP_FIXED) {
+ if (flags & MREMAP_MAYMOVE)
+ ret = mremap_to(addr, old_len, new_addr, new_len);
+@@ -490,7 +517,6 @@ unsigned long do_mremap(unsigned long addr,
+ goto out;
+ }
+
+- mm->total_vm += pages;
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
+ if (vma->vm_flags & VM_LOCKED) {
+ mm->locked_vm += pages;
+@@ -498,6 +524,7 @@ unsigned long do_mremap(unsigned long addr,
+ addr + new_len);
+ }
+ ret = addr;
++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+ goto out;
+ }
+ }
+@@ -524,7 +551,13 @@ unsigned long do_mremap(unsigned long addr,
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
++
++ map_flags = vma->vm_flags;
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
++ if (!(ret & ~PAGE_MASK)) {
++ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++ }
+ }
+ out:
+ if (ret & ~PAGE_MASK)
+diff --git a/mm/nobootmem.c b/mm/nobootmem.c
+index 07c08c4..8d4ad26 100644
+--- a/mm/nobootmem.c
++++ b/mm/nobootmem.c
+@@ -109,19 +109,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
+ unsigned long __init free_all_memory_core_early(int nodeid)
+ {
+ int i;
+- u64 start, end;
++ u64 start, end, startrange, endrange;
+ unsigned long count = 0;
+- struct range *range = NULL;
++ struct range *range = NULL, rangerange = { 0, 0 };
+ int nr_range;
+
+ nr_range = get_free_all_memory_range(&range, nodeid);
++ startrange = __pa(range) >> PAGE_SHIFT;
++ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
+
+ for (i = 0; i < nr_range; i++) {
+ start = range[i].start;
+ end = range[i].end;
++ if (start <= endrange && startrange < end) {
++ BUG_ON(rangerange.start | rangerange.end);
++ rangerange = range[i];
++ continue;
++ }
+ count += end - start;
+ __free_pages_memory(start, end);
+ }
++ start = rangerange.start;
++ end = rangerange.end;
++ count += end - start;
++ __free_pages_memory(start, end);
+
+ return count;
+ }
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 1db7971..5dba7b6 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+
+ atomic_long_t mmap_pages_allocated;
+
+@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ EXPORT_SYMBOL(find_vma);
+
+ /*
+- * find a VMA
+- * - we don't extend stack VMAs under NOMMU conditions
+- */
+-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+-{
+- return find_vma(mm, addr);
+-}
+-
+-/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
+@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
++ INIT_LIST_HEAD(&new->anon_vma_chain);
+ *region = *vma->vm_region;
+ new->vm_region = region;
+
+@@ -1971,8 +1962,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ }
+ EXPORT_SYMBOL(filemap_fault);
+
+-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+- unsigned long addr, void *buf, int len, int write)
++static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
++ unsigned long addr, void *buf, size_t len, int write)
+ {
+ struct vm_area_struct *vma;
+
+@@ -2013,8 +2004,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ *
+ * The caller must hold a reference on @mm.
+ */
+-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+- void *buf, int len, int write)
++ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
++ void *buf, size_t len, int write)
+ {
+ return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ }
+@@ -2023,7 +2014,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ * Access another process' address space.
+ * - source/target buffer must be kernel space
+ */
+-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
++ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
+ {
+ struct mm_struct *mm;
+
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index ea3f83b..001a216 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -522,7 +522,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
+ * card's bdi_dirty may rush to many times higher than bdi_setpoint.
+ * - the bdi dirty thresh drops quickly due to change of JBOD workload
+ */
+-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
++static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
+ unsigned long thresh,
+ unsigned long bg_thresh,
+ unsigned long dirty,
+@@ -1380,7 +1380,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block __cpuinitdata ratelimit_nb = {
++static struct notifier_block ratelimit_nb = {
+ .notifier_call = ratelimit_handler,
+ .next = NULL,
+ };
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d8762b2..7c1d26d 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/memcontrol.h>
+ #include <linux/prefetch.h>
++#include <linux/random.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -341,7 +342,7 @@ out:
+ * This usage means that zero-order pages may not be compound.
+ */
+
+-static void free_compound_page(struct page *page)
++void free_compound_page(struct page *page)
+ {
+ __free_pages_ok(page, compound_order(page));
+ }
+@@ -654,6 +655,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
+ int i;
+ int bad = 0;
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ unsigned long index = 1UL << order;
++#endif
++
+ trace_mm_page_free_direct(page, order);
+ kmemcheck_free_shadow(page, order);
+
+@@ -669,6 +674,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ for (; index; --index)
++ sanitize_highpage(page + index - 1);
++#endif
++
+ arch_free_page(page, order);
+ kernel_map_pages(page, 1 << order, 0);
+
+@@ -692,6 +703,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+ local_irq_restore(flags);
+ }
+
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++bool __meminitdata extra_latent_entropy;
++
++static int __init setup_pax_extra_latent_entropy(char *str)
++{
++ extra_latent_entropy = true;
++ return 0;
++}
++early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
++
++volatile u64 latent_entropy __latent_entropy;
++EXPORT_SYMBOL(latent_entropy);
++#endif
++
+ /*
+ * permit the bootmem allocator to evade page validation on high-order frees
+ */
+@@ -715,6 +740,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
+ set_page_count(p, 0);
+ }
+
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
++ u64 hash = 0;
++ size_t index, end = PAGE_SIZE * (1UL << order) / sizeof hash;
++ const u64 *data = lowmem_page_address(page);
++
++ for (index = 0; index < end; index++)
++ hash ^= hash + data[index];
++ latent_entropy ^= hash;
++ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
++ }
++#endif
++
+ set_page_refcounted(page);
+ __free_pages(page, order);
+ }
+@@ -784,8 +822,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+
++#ifndef CONFIG_PAX_MEMORY_SANITIZE
+ if (gfp_flags & __GFP_ZERO)
+ prep_zero_page(page, order, gfp_flags);
++#endif
+
+ if (order && (gfp_flags & __GFP_COMP))
+ prep_compound_page(page, order);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 5c29750..99f6386 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
+ static unsigned int pcpu_high_unit_cpu __read_mostly;
+
+ /* the address of the first chunk which starts with the kernel static area */
+-void *pcpu_base_addr __read_mostly;
++void *pcpu_base_addr __read_only;
+ EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+ static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index 70e814a..38e1f43 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -13,6 +13,7 @@
+ #include <linux/uio.h>
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
++#include <linux/security.h>
+ #include <linux/ptrace.h>
+ #include <linux/slab.h>
+ #include <linux/syscalls.h>
+@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
+ size_t iov_l_curr_offset = 0;
+ ssize_t iov_len;
+
++ return -ENOSYS; // PaX: until properly audited
++
+ /*
+ * Work out how many pages of struct pages we're going to need
+ * when eventually calling get_user_pages
+ */
+ for (i = 0; i < riovcnt; i++) {
+ iov_len = rvec[i].iov_len;
+- if (iov_len > 0) {
+- nr_pages_iov = ((unsigned long)rvec[i].iov_base
+- + iov_len)
+- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
+- / PAGE_SIZE + 1;
+- nr_pages = max(nr_pages, nr_pages_iov);
+- }
++ if (iov_len <= 0)
++ continue;
++ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
++ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
++ nr_pages = max(nr_pages, nr_pages_iov);
+ }
+
+ if (nr_pages == 0)
+@@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
+ goto free_proc_pages;
+ }
+
+- task_lock(task);
+- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+- task_unlock(task);
++ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
+ rc = -EPERM;
+ goto put_task_struct;
+ }
+- mm = task->mm;
+
+- if (!mm || (task->flags & PF_KTHREAD)) {
+- task_unlock(task);
+- rc = -EINVAL;
++ mm = mm_access(task, PTRACE_MODE_ATTACH);
++ if (!mm || IS_ERR(mm)) {
++ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
++ /*
++ * Explicitly map EACCES to EPERM as EPERM is a more a
++ * appropriate error code for process_vw_readv/writev
++ */
++ if (rc == -EACCES)
++ rc = -EPERM;
+ goto put_task_struct;
+ }
+
+- atomic_inc(&mm->mm_users);
+- task_unlock(task);
+-
+ for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
+ rc = process_vm_rw_single_vec(
+ (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
+diff --git a/mm/readahead.c b/mm/readahead.c
+index cbcbb02..dfdc1de 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -342,7 +342,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
+ * - length of the sequential read sequence, or
+ * - thrashing threshold in memory tight systems
+ */
+-static pgoff_t count_history_pages(struct address_space *mapping,
++static pgoff_t __intentional_overflow(-1) count_history_pages(struct address_space *mapping,
+ struct file_ra_state *ra,
+ pgoff_t offset, unsigned long max)
+ {
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 8685697..7c99a3c 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -153,6 +153,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct anon_vma_chain *avc;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct anon_vma_chain *avc_m = NULL;
++#endif
++
+ might_sleep();
+ if (unlikely(!anon_vma)) {
+ struct mm_struct *mm = vma->vm_mm;
+@@ -162,6 +166,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+ if (!avc)
+ goto out_enomem;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
++ if (!avc_m)
++ goto out_enomem_free_avc;
++#endif
++
+ anon_vma = find_mergeable_anon_vma(vma);
+ allocated = NULL;
+ if (!anon_vma) {
+@@ -175,6 +185,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+ /* page_table_lock to protect against threads */
+ spin_lock(&mm->page_table_lock);
+ if (likely(!vma->anon_vma)) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
++
++ if (vma_m) {
++ BUG_ON(vma_m->anon_vma);
++ vma_m->anon_vma = anon_vma;
++ avc_m->anon_vma = anon_vma;
++ avc_m->vma = vma;
++ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
++ list_add(&avc_m->same_anon_vma, &anon_vma->head);
++ avc_m = NULL;
++ }
++#endif
++
+ vma->anon_vma = anon_vma;
+ avc->anon_vma = anon_vma;
+ avc->vma = vma;
+@@ -188,12 +213,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
+
+ if (unlikely(allocated))
+ put_anon_vma(allocated);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (unlikely(avc_m))
++ anon_vma_chain_free(avc_m);
++#endif
++
+ if (unlikely(avc))
+ anon_vma_chain_free(avc);
+ }
+ return 0;
+
+ out_enomem_free_avc:
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (avc_m)
++ anon_vma_chain_free(avc_m);
++#endif
++
+ anon_vma_chain_free(avc);
+ out_enomem:
+ return -ENOMEM;
+@@ -244,7 +281,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
+ * Attach the anon_vmas from src to dst.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
+ {
+ struct anon_vma_chain *avc, *pavc;
+ struct anon_vma *root = NULL;
+@@ -277,7 +314,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+ * the corresponding VMA in the parent process is attached to.
+ * Returns 0 on success, non-zero on failure.
+ */
+-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
+ {
+ struct anon_vma_chain *avc;
+ struct anon_vma *anon_vma;
+@@ -381,8 +418,10 @@ static void anon_vma_ctor(void *data)
+ void __init anon_vma_init(void)
+ {
+ anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
+- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
+- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
++ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
++ anon_vma_ctor);
++ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
++ SLAB_PANIC|SLAB_NO_SANITIZE);
+ }
+
+ /*
+@@ -581,7 +620,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
+ spinlock_t *ptl;
+
+ if (unlikely(PageHuge(page))) {
++ /* when pud is not present, pte will be NULL */
+ pte = huge_pte_offset(mm, address);
++ if (!pte)
++ return NULL;
++
+ ptl = &mm->page_table_lock;
+ goto check;
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index a78acf0..a31df98 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -31,7 +31,7 @@
+ #include <linux/export.h>
+ #include <linux/swap.h>
+
+-static struct vfsmount *shm_mnt;
++struct vfsmount *shm_mnt;
+
+ #ifdef CONFIG_SHMEM
+ /*
+@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
+ #define BOGO_DIRENT_SIZE 20
+
+ /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+-#define SHORT_SYMLINK_LEN 128
++#define SHORT_SYMLINK_LEN 64
+
+ struct shmem_xattr {
+ struct list_head list; /* anchored by shmem_inode_info->xattr_list */
+@@ -1809,6 +1809,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ static int shmem_xattr_validate(const char *name)
+ {
+ struct { const char *prefix; size_t len; } arr[] = {
++
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
++#endif
++
+ { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+ { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+ };
+@@ -1862,6 +1867,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+ if (err)
+ return err;
+
++#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
++ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++ return -EOPNOTSUPP;
++ if (size > 8)
++ return -EINVAL;
++ }
++#endif
++
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
+
+@@ -2195,8 +2209,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+ int err = -ENOMEM;
+
+ /* Round up to L1_CACHE_BYTES to resist false sharing */
+- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
+- L1_CACHE_BYTES), GFP_KERNEL);
++ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
+ if (!sbinfo)
+ return -ENOMEM;
+
+diff --git a/mm/slab.c b/mm/slab.c
+index 4c3b671..884702c 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -151,7 +151,7 @@
+
+ /* Legal flag mask for kmem_cache_create(). */
+ #if DEBUG
+-# define CREATE_MASK (SLAB_RED_ZONE | \
++# define CREATE_MASK (SLAB_USERCOPY | SLAB_NO_SANITIZE | SLAB_RED_ZONE | \
+ SLAB_POISON | SLAB_HWCACHE_ALIGN | \
+ SLAB_CACHE_DMA | \
+ SLAB_STORE_USER | \
+@@ -159,8 +159,8 @@
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
+ #else
+-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
+- SLAB_CACHE_DMA | \
++# define CREATE_MASK (SLAB_USERCOPY | SLAB_NO_SANITIZE | \
++ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
+ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
+ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
+@@ -288,7 +288,7 @@ struct kmem_list3 {
+ * Need this for bootstrapping a per node allocator.
+ */
+ #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
+-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
++static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
+ #define CACHE_CACHE 0
+ #define SIZE_AC MAX_NUMNODES
+ #define SIZE_L3 (2 * MAX_NUMNODES)
+@@ -389,10 +389,12 @@ static void kmem_list3_init(struct kmem_list3 *parent)
+ if ((x)->max_freeable < i) \
+ (x)->max_freeable = i; \
+ } while (0)
+-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
+-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
+-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
+-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
++#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
++#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
++#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
++#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
++#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
++#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
+ #else
+ #define STATS_INC_ACTIVE(x) do { } while (0)
+ #define STATS_DEC_ACTIVE(x) do { } while (0)
+@@ -409,6 +411,8 @@ static void kmem_list3_init(struct kmem_list3 *parent)
+ #define STATS_INC_ALLOCMISS(x) do { } while (0)
+ #define STATS_INC_FREEHIT(x) do { } while (0)
+ #define STATS_INC_FREEMISS(x) do { } while (0)
++#define STATS_INC_SANITIZED(x) do { } while (0)
++#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
+ #endif
+
+ #if DEBUG
+@@ -538,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+- const struct slab *slab, void *obj)
++ const struct slab *slab, const void *obj)
+ {
+ u32 offset = (obj - slab->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+@@ -559,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
+ struct cache_names {
+ char *name;
+ char *name_dma;
++ char *name_usercopy;
+ };
+
+ static struct cache_names __initdata cache_names[] = {
+-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
+ #include <linux/kmalloc_sizes.h>
+- {NULL,}
++ {NULL}
+ #undef CACHE
+ };
+
+@@ -752,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
+ if (unlikely(gfpflags & GFP_DMA))
+ return csizep->cs_dmacachep;
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (unlikely(gfpflags & GFP_USERCOPY))
++ return csizep->cs_usercopycachep;
++#endif
++
+ return csizep->cs_cachep;
+ }
+
+@@ -1370,7 +1381,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+ return notifier_from_errno(err);
+ }
+
+-static struct notifier_block __cpuinitdata cpucache_notifier = {
++static struct notifier_block cpucache_notifier = {
+ &cpuup_callback, NULL, 0
+ };
+
+@@ -1572,7 +1583,7 @@ void __init kmem_cache_init(void)
+ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+
+ if (INDEX_AC != INDEX_L3) {
+@@ -1580,7 +1591,7 @@ void __init kmem_cache_init(void)
+ kmem_cache_create(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+ }
+
+@@ -1598,7 +1609,7 @@ void __init kmem_cache_init(void)
+ sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
+ NULL);
+ }
+ #ifdef CONFIG_ZONE_DMA
+@@ -1610,6 +1621,16 @@ void __init kmem_cache_init(void)
+ SLAB_PANIC,
+ NULL);
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ sizes->cs_usercopycachep = kmem_cache_create(
++ names->name_usercopy,
++ sizes->cs_size,
++ ARCH_KMALLOC_MINALIGN,
++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
++ NULL);
++#endif
++
+ sizes++;
+ names++;
+ }
+@@ -3662,6 +3683,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+ struct array_cache *ac = cpu_cache_get(cachep);
+
+ check_irq_off();
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab) {
++ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
++ memset(objp, PAX_MEMORY_SANITIZE_VALUE, obj_size(cachep));
++
++ if (cachep->ctor)
++ cachep->ctor(objp);
++
++ STATS_INC_SANITIZED(cachep);
++ } else
++ STATS_INC_NOT_SANITIZED(cachep);
++ }
++#endif
++
+ kmemleak_free_recursive(objp, cachep->flags);
+ objp = cache_free_debugcheck(cachep, objp, caller);
+
+@@ -3879,6 +3915,7 @@ void kfree(const void *objp)
+
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
+ return;
++ VM_BUG_ON(!virt_addr_valid(objp));
+ local_irq_save(flags);
+ kfree_debugcheck(objp);
+ c = virt_to_cache(objp);
+@@ -4216,6 +4253,9 @@ static void print_slabinfo_header(struct seq_file *m)
+ seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
+ "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
+ seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ seq_puts(m, " : pax <sanitized> <not_sanitized>");
++#endif
+ #endif
+ seq_putc(m, '\n');
+ }
+@@ -4325,14 +4365,22 @@ static int s_show(struct seq_file *m, void *p)
+ }
+ /* cpu stats */
+ {
+- unsigned long allochit = atomic_read(&cachep->allochit);
+- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+- unsigned long freehit = atomic_read(&cachep->freehit);
+- unsigned long freemiss = atomic_read(&cachep->freemiss);
++ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
++ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
++ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
++ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
+
+ seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
+ allochit, allocmiss, freehit, freemiss);
+ }
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ {
++ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
++ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
++
++ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
++ }
++#endif
+ #endif
+ seq_putc(m, '\n');
+ return 0;
+@@ -4587,13 +4635,71 @@ static int __init slab_proc_init(void)
+ {
+ proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
+ #ifdef CONFIG_DEBUG_SLAB_LEAK
+- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
++ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
+ #endif
+ return 0;
+ }
+ module_init(slab_proc_init);
+ #endif
+
++bool is_usercopy_object(const void *ptr)
++{
++ struct page *page;
++ struct kmem_cache *cachep;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return false;
++
++ if (!slab_is_available())
++ return false;
++
++ if (!virt_addr_valid(ptr))
++ return false;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return false;
++
++ cachep = page_get_cache(page);
++ return cachep->flags & SLAB_USERCOPY;
++}
++
++#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n)
++{
++ struct page *page;
++ struct kmem_cache *cachep;
++ struct slab *slabp;
++ unsigned int objnr;
++ unsigned long offset;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return "<null>";
++
++ if (!virt_addr_valid(ptr))
++ return NULL;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return NULL;
++
++ cachep = page_get_cache(page);
++ if (!(cachep->flags & SLAB_USERCOPY))
++ return cachep->name;
++
++ slabp = page_get_slab(page);
++ objnr = obj_to_index(cachep, slabp, ptr);
++ BUG_ON(objnr >= cachep->num);
++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
++ return NULL;
++
++ return cachep->name;
++}
++#endif
++
+ /**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+diff --git a/mm/slob.c b/mm/slob.c
+index 8105be4..8c1ce34 100644
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -29,7 +29,7 @@
+ * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
+ * alloc_pages() directly, allocating compound pages so the page order
+ * does not have to be separately tracked, and also stores the exact
+- * allocation size in page->private so that it can be used to accurately
++ * allocation size in slob_page->size so that it can be used to accurately
+ * provide ksize(). These objects are detected in kfree() because slob_page()
+ * is false for them.
+ *
+@@ -58,6 +58,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h> /* struct reclaim_state */
+@@ -100,9 +101,8 @@ struct slob_page {
+ union {
+ struct {
+ unsigned long flags; /* mandatory */
+- atomic_t _count; /* mandatory */
+ slobidx_t units; /* free units left in page */
+- unsigned long pad[2];
++ unsigned long size; /* size when >=PAGE_SIZE */
+ slob_t *free; /* first free slob_t in page */
+ struct list_head list; /* linked list of free pages */
+ };
+@@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
+ */
+ static inline int is_slob_page(struct slob_page *sp)
+ {
+- return PageSlab((struct page *)sp);
++ return PageSlab((struct page *)sp) && !sp->size;
+ }
+
+ static inline void set_slob_page(struct slob_page *sp)
+@@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
+
+ static inline struct slob_page *slob_page(const void *addr)
+ {
+- return (struct slob_page *)virt_to_page(addr);
++ return (struct slob_page *)virt_to_head_page(addr);
+ }
+
+ /*
+@@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
+ /*
+ * Return the size of a slob block.
+ */
+-static slobidx_t slob_units(slob_t *s)
++static slobidx_t slob_units(const slob_t *s)
+ {
+ if (s->units > 0)
+ return s->units;
+@@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
+ /*
+ * Return the next free slob block pointer after this one.
+ */
+-static slob_t *slob_next(slob_t *s)
++static slob_t *slob_next(const slob_t *s)
+ {
+ slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
+ slobidx_t next;
+@@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
+ /*
+ * Returns true if s is the last free block in its page.
+ */
+-static int slob_last(slob_t *s)
++static int slob_last(const slob_t *s)
+ {
+ return !((unsigned long)slob_next(s) & ~PAGE_MASK);
+ }
+@@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
+ if (!page)
+ return NULL;
+
++ set_slob_page(page);
+ return page_address(page);
+ }
+
+@@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+ if (!b)
+ return NULL;
+ sp = slob_page(b);
+- set_slob_page(sp);
+
+ spin_lock_irqsave(&slob_lock, flags);
+ sp->units = SLOB_UNITS(PAGE_SIZE);
+ sp->free = b;
++ sp->size = 0;
+ INIT_LIST_HEAD(&sp->list);
+ set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
+ set_slob_page_free(sp, slob_list);
+@@ -418,6 +419,11 @@ static void slob_free(void *block, int size)
+ return;
+ }
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab)
++ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
++#endif
++
+ if (!slob_page_free(sp)) {
+ /* This slob page is about to become partially free. Easy! */
+ sp->units = units;
+@@ -476,10 +482,9 @@ out:
+ * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
+ */
+
+-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
+ {
+- unsigned int *m;
+- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ slob_t *m;
+ void *ret;
+
+ gfp &= gfp_allowed_mask;
+@@ -494,7 +499,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+
+ if (!m)
+ return NULL;
+- *m = size;
++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
++ m[0].units = size;
++ m[1].units = align;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+@@ -506,16 +514,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+ gfp |= __GFP_COMP;
+ ret = slob_new_pages(gfp, order, node);
+ if (ret) {
+- struct page *page;
+- page = virt_to_page(ret);
+- page->private = size;
++ struct slob_page *sp;
++ sp = slob_page(ret);
++ sp->size = size;
+ }
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
+ }
+
+- kmemleak_alloc(ret, size, 1, gfp);
++ return ret;
++}
++
++void *__kmalloc_node(size_t size, gfp_t gfp, int node)
++{
++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
++ void *ret = __kmalloc_node_align(size, gfp, node, align);
++
++ if (!ZERO_OR_NULL_PTR(ret))
++ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_node);
+@@ -530,16 +547,92 @@ void kfree(const void *block)
+ return;
+ kmemleak_free(block);
+
++ VM_BUG_ON(!virt_addr_valid(block));
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- slob_free(m, *m + align);
+- } else
++ slob_t *m = (slob_t *)(block - align);
++ slob_free(m, m[0].units + align);
++ } else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ put_page(&sp->page);
++ }
+ }
+ EXPORT_SYMBOL(kfree);
+
++bool is_usercopy_object(const void *ptr)
++{
++ if (!slab_is_available())
++ return false;
++
++ // PAX: TODO
++
++ return false;
++}
++
++#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n)
++{
++ struct slob_page *sp;
++ const slob_t *free;
++ const void *base;
++ unsigned long flags;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return "<null>";
++
++ if (!virt_addr_valid(ptr))
++ return NULL;
++
++ sp = slob_page(ptr);
++ if (!PageSlab((struct page *)sp))
++ return NULL;
++
++ if (sp->size) {
++ base = page_address(&sp->page);
++ if (base <= ptr && n <= sp->size - (ptr - base))
++ return NULL;
++ return "<slob>";
++ }
++
++ /* some tricky double walking to find the chunk */
++ spin_lock_irqsave(&slob_lock, flags);
++ base = (void *)((unsigned long)ptr & PAGE_MASK);
++ free = sp->free;
++
++ while ((void *)free <= ptr) {
++ base = free + slob_units(free);
++ free = slob_next(free);
++ }
++
++ while (base < (void *)free) {
++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
++ int size = SLOB_UNIT * SLOB_UNITS(m + align);
++ int offset;
++
++ if (ptr < base + align)
++ break;
++
++ offset = ptr - base - align;
++ if (offset >= m) {
++ base += size;
++ continue;
++ }
++
++ if (n > m - offset)
++ break;
++
++ spin_unlock_irqrestore(&slob_lock, flags);
++ return NULL;
++ }
++
++ spin_unlock_irqrestore(&slob_lock, flags);
++ return "<slob>";
++}
++#endif
++
+ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
+ size_t ksize(const void *block)
+ {
+@@ -552,10 +645,10 @@ size_t ksize(const void *block)
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+- unsigned int *m = (unsigned int *)(block - align);
+- return SLOB_UNITS(*m) * SLOB_UNIT;
++ slob_t *m = (slob_t *)(block - align);
++ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
+ } else
+- return sp->page.private;
++ return sp->size;
+ }
+ EXPORT_SYMBOL(ksize);
+
+@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ {
+ struct kmem_cache *c;
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ c = __kmalloc_node_align(sizeof(struct kmem_cache),
++ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
++#else
+ c = slob_alloc(sizeof(struct kmem_cache),
+ GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
++#endif
+
+ if (c) {
+ c->name = name;
+@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+
+ lockdep_trace_alloc(flags);
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ b = __kmalloc_node_align(c->size, flags, node, c->align);
++#else
+ if (c->size < PAGE_SIZE) {
+ b = slob_alloc(c->size, flags, c->align, node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
++ struct slob_page *sp;
++
+ b = slob_new_pages(flags, get_order(c->size), node);
++ sp = slob_page(b);
++ sp->size = c->size;
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
++#endif
+
+ if (c->ctor)
+ c->ctor(b);
+@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+
+ static void __kmem_cache_free(void *b, int size)
+ {
+- if (size < PAGE_SIZE)
++ struct slob_page *sp = slob_page(b);
++
++ if (is_slob_page(sp))
+ slob_free(b, size);
+- else
++ else {
++ clear_slob_page(sp);
++ free_slob_page(sp);
++ sp->size = 0;
+ slob_free_pages(b, get_order(size));
++ }
+ }
+
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+
+ void kmem_cache_free(struct kmem_cache *c, void *b)
+ {
++ int size = c->size;
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (size + c->align < PAGE_SIZE) {
++ size += c->align;
++ b -= c->align;
++ }
++#endif
++
+ kmemleak_free_recursive(b, c->flags);
+ if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+ struct slob_rcu *slob_rcu;
+- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
+- slob_rcu->size = c->size;
++ slob_rcu = b + (size - sizeof(struct slob_rcu));
++ slob_rcu->size = size;
+ call_rcu(&slob_rcu->head, kmem_rcu_free);
+ } else {
+- __kmem_cache_free(b, c->size);
++ __kmem_cache_free(b, size);
+ }
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ trace_kfree(_RET_IP_, b);
++#else
+ trace_kmem_cache_free(_RET_IP_, b);
++#endif
++
+ }
+ EXPORT_SYMBOL(kmem_cache_free);
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 5710788..3d095c0 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -186,7 +186,7 @@ static enum {
+ PARTIAL, /* Kmem_cache_node works */
+ UP, /* Everything works but does not show up in sysfs */
+ SYSFS /* Sysfs up */
+-} slab_state = DOWN;
++} slab_state __read_only = DOWN;
+
+ /* A list of all slab caches on the system */
+ static DECLARE_RWSEM(slub_lock);
+@@ -208,7 +208,7 @@ struct track {
+
+ enum track_item { TRACK_ALLOC, TRACK_FREE };
+
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_add(struct kmem_cache *);
+ static int sysfs_slab_alias(struct kmem_cache *, const char *);
+ static void sysfs_slab_remove(struct kmem_cache *);
+@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
+ if (!t->addr)
+ return;
+
+- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
++ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
+ s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+ #ifdef CONFIG_STACKTRACE
+ {
+@@ -2537,6 +2537,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
+
+ slab_free_hook(s, x);
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
++ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->objsize);
++ if (s->ctor)
++ s->ctor(x);
++ }
++#endif
++
+ redo:
+ /*
+ * Determine the currently cpus per cpu slab.
+@@ -2572,6 +2580,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
+
+ page = virt_to_head_page(x);
+
++ BUG_ON(!PageSlab(page));
++
+ slab_free(s, page, x, _RET_IP_);
+
+ trace_kmem_cache_free(_RET_IP_, x);
+@@ -2605,7 +2615,7 @@ static int slub_min_objects;
+ * Merge control. If this is set then no merging of slab caches will occur.
+ * (Could be removed. This was introduced to pacify the merge skeptics.)
+ */
+-static int slub_nomerge;
++static int slub_nomerge = 1;
+
+ /*
+ * Calculate the order of allocation given an slab object size.
+@@ -3055,7 +3065,7 @@ static int kmem_cache_open(struct kmem_cache *s,
+ else
+ s->cpu_partial = 30;
+
+- s->refcount = 1;
++ atomic_set(&s->refcount, 1);
+ #ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+ #endif
+@@ -3159,8 +3169,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+ down_write(&slub_lock);
+- s->refcount--;
+- if (!s->refcount) {
++ if (atomic_dec_and_test(&s->refcount)) {
+ list_del(&s->list);
+ up_write(&slub_lock);
+ if (kmem_cache_close(s)) {
+@@ -3189,6 +3198,10 @@ static struct kmem_cache *kmem_cache;
+ static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
+ #endif
+
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
++#endif
++
+ static int __init setup_slub_min_order(char *str)
+ {
+ get_option(&str, &slub_min_order);
+@@ -3303,6 +3316,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+ return kmalloc_dma_caches[index];
+
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ if (flags & SLAB_USERCOPY)
++ return kmalloc_usercopy_caches[index];
++
++#endif
++
+ return kmalloc_caches[index];
+ }
+
+@@ -3371,6 +3391,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+ EXPORT_SYMBOL(__kmalloc_node);
+ #endif
+
++bool is_usercopy_object(const void *ptr)
++{
++ struct page *page;
++ struct kmem_cache *s;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return false;
++
++ if (!slab_is_available())
++ return false;
++
++ if (!virt_addr_valid(ptr))
++ return false;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return false;
++
++ s = page->slab;
++ return s->flags & SLAB_USERCOPY;
++}
++
++#ifdef CONFIG_PAX_USERCOPY
++const char *check_heap_object(const void *ptr, unsigned long n)
++{
++ struct page *page;
++ struct kmem_cache *s;
++ unsigned long offset;
++
++ if (ZERO_OR_NULL_PTR(ptr))
++ return "<null>";
++
++ if (!virt_addr_valid(ptr))
++ return NULL;
++
++ page = virt_to_head_page(ptr);
++
++ if (!PageSlab(page))
++ return NULL;
++
++ s = page->slab;
++ if (!(s->flags & SLAB_USERCOPY))
++ return s->name;
++
++ offset = (ptr - page_address(page)) % s->size;
++ if (offset <= s->objsize && n <= s->objsize - offset)
++ return NULL;
++
++ return s->name;
++}
++#endif
++
+ size_t ksize(const void *object)
+ {
+ struct page *page;
+@@ -3435,6 +3508,7 @@ void kfree(const void *x)
+ if (unlikely(ZERO_OR_NULL_PTR(x)))
+ return;
+
++ VM_BUG_ON(!virt_addr_valid(x));
+ page = virt_to_head_page(x);
+ if (unlikely(!PageSlab(page))) {
+ BUG_ON(!PageCompound(page));
+@@ -3645,7 +3719,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
+ int node;
+
+ list_add(&s->list, &slab_caches);
+- s->refcount = -1;
++ atomic_set(&s->refcount, -1);
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n = get_node(s, node);
+@@ -3762,17 +3836,17 @@ void __init kmem_cache_init(void)
+
+ /* Caches that are not of the two-to-the-power-of size */
+ if (KMALLOC_MIN_SIZE <= 32) {
+- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
++ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
+ caches++;
+ }
+
+ if (KMALLOC_MIN_SIZE <= 64) {
+- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
++ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
+ caches++;
+ }
+
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
+- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
++ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
+ caches++;
+ }
+
+@@ -3814,6 +3888,22 @@ void __init kmem_cache_init(void)
+ }
+ }
+ #endif
++
++#ifdef CONFIG_PAX_USERCOPY_SLABS
++ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
++ struct kmem_cache *s = kmalloc_caches[i];
++
++ if (s && s->size) {
++ char *name = kasprintf(GFP_NOWAIT,
++ "usercopy-kmalloc-%d", s->objsize);
++
++ BUG_ON(!name);
++ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
++ s->objsize, SLAB_USERCOPY);
++ }
++ }
++#endif
++
+ printk(KERN_INFO
+ "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
+ " CPUs=%d, Nodes=%d\n",
+@@ -3840,7 +3930,7 @@ static int slab_unmergeable(struct kmem_cache *s)
+ /*
+ * We may have set a slab to be unmergeable during bootstrap.
+ */
+- if (s->refcount < 0)
++ if (atomic_read(&s->refcount) < 0)
+ return 1;
+
+ return 0;
+@@ -3899,7 +3989,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ down_write(&slub_lock);
+ s = find_mergeable(size, align, flags, name, ctor);
+ if (s) {
+- s->refcount++;
++ atomic_inc(&s->refcount);
+ /*
+ * Adjust the object sizes so that we clear
+ * the complete object on kzalloc.
+@@ -3908,7 +3998,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+
+ if (sysfs_slab_alias(s, name)) {
+- s->refcount--;
++ atomic_dec(&s->refcount);
+ goto err;
+ }
+ up_write(&slub_lock);
+@@ -3979,7 +4069,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata slab_notifier = {
++static struct notifier_block slab_notifier = {
+ .notifier_call = slab_cpuup_callback
+ };
+
+@@ -4037,7 +4127,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+ }
+ #endif
+
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int count_inuse(struct page *page)
+ {
+ return page->inuse;
+@@ -4424,12 +4514,12 @@ static void resiliency_test(void)
+ validate_slab_cache(kmalloc_caches[9]);
+ }
+ #else
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static void resiliency_test(void) {};
+ #endif
+ #endif
+
+-#ifdef CONFIG_SYSFS
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ enum slab_stat_type {
+ SL_ALL, /* All slabs */
+ SL_PARTIAL, /* Only partially allocated slabs */
+@@ -4670,7 +4760,7 @@ SLAB_ATTR_RO(ctor);
+
+ static ssize_t aliases_show(struct kmem_cache *s, char *buf)
+ {
+- return sprintf(buf, "%d\n", s->refcount - 1);
++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
+ }
+ SLAB_ATTR_RO(aliases);
+
+@@ -5237,6 +5327,7 @@ static char *create_unique_id(struct kmem_cache *s)
+ return name;
+ }
+
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_add(struct kmem_cache *s)
+ {
+ int err;
+@@ -5265,7 +5356,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
+ }
+
+ s->kobj.kset = slab_kset;
+- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
++ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
+ if (err) {
+ kobject_put(&s->kobj);
+ return err;
+@@ -5299,6 +5390,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+ kobject_del(&s->kobj);
+ kobject_put(&s->kobj);
+ }
++#endif
+
+ /*
+ * Need to buffer aliases during bootup until sysfs becomes
+@@ -5312,6 +5404,7 @@ struct saved_alias {
+
+ static struct saved_alias *alias_list;
+
++#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
+ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+ {
+ struct saved_alias *al;
+@@ -5334,6 +5427,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+ alias_list = al;
+ return 0;
+ }
++#endif
+
+ static int __init slab_sysfs_init(void)
+ {
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index 1b7e22a..3fcd4f3 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
+ void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (!p)
+ return NULL;
+- pud_populate(&init_mm, pud, p);
++ pud_populate_kernel(&init_mm, pud, p);
+ }
+ return pud;
+ }
+@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
+ void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (!p)
+ return NULL;
+- pgd_populate(&init_mm, pgd, p);
++ pgd_populate_kernel(&init_mm, pgd, p);
+ }
+ return pgd;
+ }
+diff --git a/mm/swap.c b/mm/swap.c
+index 55b266d..a532537 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -31,6 +31,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/hugetlb.h>
+
+ #include "internal.h"
+
+@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
+
+ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
++ if (!PageHuge(page))
++ BUG_ON(dtor != free_compound_page);
+ (*dtor)(page);
+ }
+
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index fad1830..6aa3a7f 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
+
+ static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
+ /* Activity counter to indicate that a swapon or swapoff has occurred */
+-static atomic_t proc_poll_event = ATOMIC_INIT(0);
++static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
+
+ static inline unsigned char swap_count(unsigned char ent)
+ {
+@@ -1668,7 +1668,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ }
+ filp_close(swap_file, NULL);
+ err = 0;
+- atomic_inc(&proc_poll_event);
++ atomic_inc_unchecked(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
+ out_dput:
+@@ -1684,8 +1684,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
+
+ poll_wait(file, &proc_poll_wait, wait);
+
+- if (seq->poll_event != atomic_read(&proc_poll_event)) {
+- seq->poll_event = atomic_read(&proc_poll_event);
++ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
++ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+ return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
+ }
+
+@@ -1783,7 +1783,7 @@ static int swaps_open(struct inode *inode, struct file *file)
+ return ret;
+
+ seq = file->private_data;
+- seq->poll_event = atomic_read(&proc_poll_event);
++ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
+ return 0;
+ }
+
+@@ -2117,7 +2117,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ (p->flags & SWP_DISCARDABLE) ? "D" : "");
+
+ mutex_unlock(&swapon_mutex);
+- atomic_inc(&proc_poll_event);
++ atomic_inc_unchecked(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+
+ if (S_ISREG(inode->i_mode))
+diff --git a/mm/util.c b/mm/util.c
+index 136ac4f..f917fa9 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ }
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index eeba3bb..a22618a 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
++ BUG_ON(!pte_exec(*pte));
++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
++ continue;
++ }
++#endif
++
++ {
++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
++ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
++ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+
+@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
++
++ pax_open_kernel();
+ do {
+ struct page *page = pages[*nr];
+
+- if (WARN_ON(!pte_none(*pte)))
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (pgprot_val(prot) & _PAGE_NX)
++#endif
++
++ if (!pte_none(*pte)) {
++ pax_close_kernel();
++ WARN_ON(1);
+ return -EBUSY;
+- if (WARN_ON(!page))
++ }
++ if (!page) {
++ pax_close_kernel();
++ WARN_ON(1);
+ return -ENOMEM;
++ }
+ set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
+ (*nr)++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
++ pax_close_kernel();
+ return 0;
+ }
+
+@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+ pmd_t *pmd;
+ unsigned long next;
+
+- pmd = pmd_alloc(&init_mm, pud, addr);
++ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+ pud_t *pud;
+ unsigned long next;
+
+- pud = pud_alloc(&init_mm, pgd, addr);
++ pud = pud_alloc_kernel(&init_mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+@@ -196,6 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
+ if (addr >= MODULES_VADDR && addr < MODULES_END)
+ return 1;
+ #endif
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
++ return 1;
++#endif
++
+ return is_vmalloc_addr(x);
+ }
+
+@@ -216,8 +246,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+
+ if (!pgd_none(*pgd)) {
+ pud_t *pud = pud_offset(pgd, addr);
++#ifdef CONFIG_X86
++ if (!pud_large(*pud))
++#endif
+ if (!pud_none(*pud)) {
+ pmd_t *pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_X86
++ if (!pmd_large(*pmd))
++#endif
+ if (!pmd_none(*pmd)) {
+ pte_t *ptep, pte;
+
+@@ -1295,6 +1331,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+ struct vm_struct *area;
+
+ BUG_ON(in_interrupt());
++
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (flags & VM_KERNEXEC) {
++ if (start != VMALLOC_START || end != VMALLOC_END)
++ return NULL;
++ start = (unsigned long)MODULES_EXEC_VADDR;
++ end = (unsigned long)MODULES_EXEC_END;
++ }
++#endif
++
+ if (flags & VM_IOREMAP) {
+ int bit = fls(size);
+
+@@ -1527,6 +1573,11 @@ void *vmap(struct page **pages, unsigned int count,
+ if (count > totalram_pages)
+ return NULL;
+
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ flags |= VM_KERNEXEC;
++#endif
++
+ area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+ __builtin_return_address(0));
+ if (!area)
+@@ -1628,6 +1679,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+ goto fail;
+
++#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
++ if (!(pgprot_val(prot) & _PAGE_NX))
++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
++ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
++ else
++#endif
++
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+ start, end, node, gfp_mask, caller);
+ if (!area)
+@@ -1801,10 +1859,9 @@ EXPORT_SYMBOL(vzalloc_node);
+ * For tight control over page level allocator and protection flags
+ * use __vmalloc() instead.
+ */
+-
+ void *vmalloc_exec(unsigned long size)
+ {
+- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
+ -1, __builtin_return_address(0));
+ }
+
+@@ -2099,6 +2156,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+
++ BUG_ON(vma->vm_mirror);
++
+ if ((PAGE_SIZE-1) & (unsigned long)addr)
+ return -EINVAL;
+
+@@ -2351,8 +2410,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ return NULL;
+ }
+
+- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
+- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
++ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
++ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
+ if (!vas || !vms)
+ goto err_free;
+
+@@ -2536,11 +2595,15 @@ static int s_show(struct seq_file *m, void *p)
+ {
+ struct vm_struct *v = p;
+
+- seq_printf(m, "0x%p-0x%p %7ld",
++ seq_printf(m, "0x%pP-0x%pP %7ld",
+ v->addr, v->addr + v->size, v->size);
+
+ if (v->caller)
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(m, " %pK", v->caller);
++#else
+ seq_printf(m, " %pS", v->caller);
++#endif
+
+ if (v->nr_pages)
+ seq_printf(m, " pages=%d", v->nr_pages);
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 8fd603b..495a5a1 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
+ *
+ * vm_stat contains the global counters
+ */
+-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+ EXPORT_SYMBOL(vm_stat);
+
+ #ifdef CONFIG_SMP
+@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
+ v = p->vm_stat_diff[i];
+ p->vm_stat_diff[i] = 0;
+ local_irq_restore(flags);
+- atomic_long_add(v, &zone->vm_stat[i]);
++ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
+ #ifdef CONFIG_NUMA
+ /* 3 seconds idle till flush */
+@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (global_diff[i])
+- atomic_long_add(global_diff[i], &vm_stat[i]);
++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
+ }
+
+ #endif
+@@ -1193,7 +1193,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __cpuinitdata vmstat_notifier =
++static struct notifier_block vmstat_notifier =
+ { &vmstat_cpuup_callback, NULL, 0 };
+ #endif
+
+@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
+ start_cpu_timer(cpu);
+ #endif
+ #ifdef CONFIG_PROC_FS
+- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
++ {
++ mode_t gr_mode = S_IRUGO;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ gr_mode = S_IRUSR;
++#endif
++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
++#else
++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
++#endif
++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
++ }
+ #endif
+ return 0;
+ }
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 963f285..3e3874d 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -513,7 +513,7 @@ out:
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block vlan_notifier_block __read_mostly = {
++static struct notifier_block vlan_notifier_block = {
+ .notifier_call = vlan_device_event,
+ };
+
+@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+- if ((args.u.name_type >= 0) &&
+- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
+ struct vlan_net *vn;
+
+ vn = net_generic(net, vlan_net_id);
+diff --git a/net/9p/mod.c b/net/9p/mod.c
+index 2664d12..b2803fe 100644
+--- a/net/9p/mod.c
++++ b/net/9p/mod.c
+@@ -57,7 +57,7 @@ static LIST_HEAD(v9fs_trans_list);
+ void v9fs_register_trans(struct p9_trans_module *m)
+ {
+ spin_lock(&v9fs_trans_lock);
+- list_add_tail(&m->list, &v9fs_trans_list);
++ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
+ spin_unlock(&v9fs_trans_lock);
+ }
+ EXPORT_SYMBOL(v9fs_register_trans);
+@@ -70,7 +70,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
+ void v9fs_unregister_trans(struct p9_trans_module *m)
+ {
+ spin_lock(&v9fs_trans_lock);
+- list_del_init(&m->list);
++ pax_list_del_init((struct list_head *)&m->list);
+ spin_unlock(&v9fs_trans_lock);
+ }
+ EXPORT_SYMBOL(v9fs_unregister_trans);
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index fdfdb57..38d368c 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
+ oldfs = get_fs();
+ set_fs(get_ds());
+ /* The cast to a user pointer is valid due to the set_fs() */
+- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
++ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
+ set_fs(oldfs);
+
+ if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
+index f41f026..fe76ea8 100644
+--- a/net/atm/atm_misc.c
++++ b/net/atm/atm_misc.c
+@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
+ if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
+ return 1;
+ atm_return(vcc, truesize);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return 0;
+ }
+ EXPORT_SYMBOL(atm_charge);
+@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
+ }
+ }
+ atm_return(vcc, guess);
+- atomic_inc(&vcc->stats->rx_drop);
++ atomic_inc_unchecked(&vcc->stats->rx_drop);
+ return NULL;
+ }
+ EXPORT_SYMBOL(atm_alloc_charge);
+@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
+
+ void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
+
+ void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
+ __SONET_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff --git a/net/atm/lec.h b/net/atm/lec.h
+index dfc0719..47c5322 100644
+--- a/net/atm/lec.h
++++ b/net/atm/lec.h
+@@ -48,7 +48,7 @@ struct lane2_ops {
+ const u8 *tlvs, u32 sizeoftlvs);
+ void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
+ const u8 *tlvs, u32 sizeoftlvs);
+-};
++} __no_const;
+
+ /*
+ * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
+diff --git a/net/atm/proc.c b/net/atm/proc.c
+index 0d020de..011c7bb 100644
+--- a/net/atm/proc.c
++++ b/net/atm/proc.c
+@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
+ const struct k_atm_aal_stats *stats)
+ {
+ seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
+- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
+- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
+- atomic_read(&stats->rx_drop));
++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
++ atomic_read_unchecked(&stats->rx_drop));
+ }
+
+ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index 23f45ce..c748f1a 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
+ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
+ static void subtract_aal_stats(struct k_atm_aal_stats *from,
+ struct atm_aal_stats *to)
+ {
+-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
+ __AAL_STAT_ITEMS
+ #undef __HANDLE_ITEM
+ }
+diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
+index ebe0ef3..d5b0a8e 100644
+--- a/net/ax25/sysctl_net_ax25.c
++++ b/net/ax25/sysctl_net_ax25.c
+@@ -31,7 +31,7 @@ static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
+
+ static struct ctl_table_header *ax25_table_header;
+
+-static ctl_table *ax25_table;
++static ctl_table_no_const *ax25_table;
+ static int ax25_table_size;
+
+ static struct ctl_path ax25_path[] = {
+@@ -174,7 +174,7 @@ void ax25_register_sysctl(void)
+ }
+
+ for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
+- struct ctl_table *child = kmemdup(ax25_param_table,
++ ctl_table_no_const *child = kmemdup(ax25_param_table,
+ sizeof(ax25_param_table),
+ GFP_ATOMIC);
+ if (!child) {
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 3512e25..2b33401 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
+
+ /* change sequence number to network order */
+ batman_ogm_packet->seqno =
+- htonl((uint32_t)atomic_read(&hard_iface->seqno));
++ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
+
+ batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
+ batman_ogm_packet->tt_crc = htons((uint16_t)
+@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
+ else
+ batman_ogm_packet->gw_flags = NO_FLAGS;
+
+- atomic_inc(&hard_iface->seqno);
++ atomic_inc_unchecked(&hard_iface->seqno);
+
+ slide_own_bcast_window(hard_iface);
+ bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
+ return;
+
+ /* could be changed by schedule_own_packet() */
+- if_incoming_seqno = atomic_read(&if_incoming->seqno);
++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
+
+ has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 7704df4..beb4e16 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
+ hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+ dev_add_pack(&hard_iface->batman_adv_ptype);
+
+- atomic_set(&hard_iface->seqno, 1);
+- atomic_set(&hard_iface->frag_seqno, 1);
++ atomic_set_unchecked(&hard_iface->seqno, 1);
++ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
+ bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
+ hard_iface->net_dev->name);
+
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index f9cc957..efd9dae 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
+
+ /* set broadcast sequence number */
+ bcast_packet->seqno =
+- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
++ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
+
+ add_bcast_packet_to_list(bat_priv, skb, 1);
+
+@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
+ atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+- atomic_set(&bat_priv->bcast_seqno, 1);
++ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
+ atomic_set(&bat_priv->ttvn, 0);
+ atomic_set(&bat_priv->tt_local_changes, 0);
+ atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index ab8d0fe..ceba3fd 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -38,8 +38,8 @@ struct hard_iface {
+ int16_t if_num;
+ char if_status;
+ struct net_device *net_dev;
+- atomic_t seqno;
+- atomic_t frag_seqno;
++ atomic_unchecked_t seqno;
++ atomic_unchecked_t frag_seqno;
+ unsigned char *packet_buff;
+ int packet_len;
+ struct kobject *hardif_obj;
+@@ -154,7 +154,7 @@ struct bat_priv {
+ atomic_t orig_interval; /* uint */
+ atomic_t hop_penalty; /* uint */
+ atomic_t log_level; /* uint */
+- atomic_t bcast_seqno;
++ atomic_unchecked_t bcast_seqno;
+ atomic_t bcast_queue_left;
+ atomic_t batman_queue_left;
+ atomic_t ttvn; /* translation table version number */
+diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
+index 07d1c1d..7e9bea9 100644
+--- a/net/batman-adv/unicast.c
++++ b/net/batman-adv/unicast.c
+@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag2->flags = large_tail;
+
+- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
++ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
+ frag1->seqno = htons(seqno - 1);
+ frag2->seqno = htons(seqno);
+
+diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
+index 9b67f3d..f6d7e5c 100644
+--- a/net/bluetooth/Makefile
++++ b/net/bluetooth/Makefile
+@@ -8,6 +8,6 @@ obj-$(CONFIG_BT_BNEP) += bnep/
+ obj-$(CONFIG_BT_CMTP) += cmtp/
+ obj-$(CONFIG_BT_HIDP) += hidp/
+
+-bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
+-bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
++bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o smp.o
++bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
+ bluetooth-$(CONFIG_BT_SCO) += sco.o
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index aa12649..a22d595 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -235,7 +235,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = cpu_to_le16(conn->handle);
+- memcpy(cp.ltk, ltk, sizeof(ltk));
++ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
+
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+ }
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index bb78c75..a48650e 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -605,7 +605,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
+ uf.event_mask[1] = *((u32 *) f->event_mask + 1);
+ }
+
+- len = min_t(unsigned int, len, sizeof(uf));
++ len = min((size_t)len, sizeof(uf));
+ if (copy_from_user(&uf, optval, len)) {
+ err = -EFAULT;
+ break;
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 0274157..f8afbf3c7 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -945,9 +945,9 @@ static int hidp_setup_hid(struct hidp_session *session,
+ hid->version = req->version;
+ hid->country = req->country;
+
+- strncpy(hid->name, req->name, sizeof(req->name) - 1);
+- strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
+- strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
++ strncpy(hid->name, req->name, sizeof(hid->name) - 1);
++ strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), sizeof(hid->phys) - 1);
++ strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), sizeof(hid->uniq) - 1);
+
+ hid->dev.parent = hidp_get_device(session);
+ hid->ll_driver = &hidp_hid_driver;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index dd7c019..9d19c31 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2181,8 +2181,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
+ break;
+
+ case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
+
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+ rfc.mode != chan->mode)
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 158887a..1b70c49 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -484,7 +484,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+- int len, err = 0;
++ int err = 0;
++ size_t len = optlen;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -506,7 +507,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = (__u16)chan->tx_win;
+
+- len = min_t(unsigned int, sizeof(opts), optlen);
++ len = min(sizeof(opts), len);
+ if (copy_from_user((char *) &opts, optval, len)) {
+ err = -EFAULT;
+ break;
+@@ -572,7 +573,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+ struct bt_security sec;
+ struct bt_power pwr;
+ struct l2cap_conn *conn;
+- int len, err = 0;
++ int err = 0;
++ size_t len = optlen;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -595,7 +597,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+
+ sec.level = BT_SECURITY_LOW;
+
+- len = min_t(unsigned int, sizeof(sec), optlen);
++ len = min(sizeof(sec), len);
+ if (copy_from_user((char *) &sec, optval, len)) {
+ err = -EFAULT;
+ break;
+@@ -671,7 +673,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+
+ pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+
+- len = min_t(unsigned int, sizeof(pwr), optlen);
++ len = min(sizeof(pwr), len);
+ if (copy_from_user((char *) &pwr, optval, len)) {
+ err = -EFAULT;
+ break;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 14c4864..77ff888 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -684,7 +684,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+ struct sock *sk = sock->sk;
+ struct bt_security sec;
+ int err = 0;
+- size_t len;
++ size_t len = optlen;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+@@ -706,7 +706,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
+
+ sec.level = BT_SECURITY_LOW;
+
+- len = min_t(unsigned int, sizeof(sec), optlen);
++ len = min(sizeof(sec), len);
+ if (copy_from_user((char *) &sec, optval, len)) {
+ err = -EFAULT;
+ break;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index a06deca..2269299 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1410,7 +1410,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+ nexthdr = ip6h->nexthdr;
+ offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+
+- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
++ if (nexthdr != IPPROTO_ICMPV6)
+ return 0;
+
+ /* Okay, we found ICMPv6 header */
+diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
+index 5449294..c1d8d99 100644
+--- a/net/bridge/netfilter/ebt_ulog.c
++++ b/net/bridge/netfilter/ebt_ulog.c
+@@ -158,6 +158,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
+ ub->qlen++;
+
+ pm = NLMSG_DATA(nlh);
++ memset(pm, 0, sizeof(*pm));
+
+ /* Fill in the ulog data */
+ pm->version = EBT_ULOG_VERSION;
+@@ -170,8 +171,6 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
+ pm->hook = hooknr;
+ if (uloginfo->prefix != NULL)
+ strcpy(pm->prefix, uloginfo->prefix);
+- else
+- *(pm->prefix) = '\0';
+
+ if (in) {
+ strcpy(pm->physindev, in->name);
+@@ -181,16 +180,14 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
+ strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
+ else
+ strcpy(pm->indev, in->name);
+- } else
+- pm->indev[0] = pm->physindev[0] = '\0';
++ }
+
+ if (out) {
+ /* If out exists, then out is a bridge port */
+ strcpy(pm->physoutdev, out->name);
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
+- } else
+- pm->outdev[0] = pm->physoutdev[0] = '\0';
++ }
+
+ if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
+ BUG();
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 5864cc4..6ddb362 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+ tmp.valid_hooks = t->table->valid_hooks;
+ }
+ mutex_unlock(&ebt_mutex);
+- if (copy_to_user(user, &tmp, *len) != 0){
++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
+ BUGPRINT("c2u Didn't work\n");
+ ret = -EFAULT;
+ break;
+@@ -2323,7 +2323,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+ goto out;
+ tmp.valid_hooks = t->valid_hooks;
+
+- if (copy_to_user(user, &tmp, *len) != 0) {
++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -2334,7 +2334,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+ tmp.entries_size = t->table->entries_size;
+ tmp.valid_hooks = t->table->valid_hooks;
+
+- if (copy_to_user(user, &tmp, *len) != 0) {
++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 7fac75f..5d8351b 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
+ #ifdef CONFIG_DEBUG_FS
+ struct debug_fs_counter {
+ atomic_t caif_nr_socks;
+- atomic_t caif_sock_create;
+- atomic_t num_connect_req;
+- atomic_t num_connect_resp;
+- atomic_t num_connect_fail_resp;
+- atomic_t num_disconnect;
+- atomic_t num_remote_shutdown_ind;
+- atomic_t num_tx_flow_off_ind;
+- atomic_t num_tx_flow_on_ind;
+- atomic_t num_rx_flow_off;
+- atomic_t num_rx_flow_on;
++ atomic_unchecked_t caif_sock_create;
++ atomic_unchecked_t num_connect_req;
++ atomic_unchecked_t num_connect_resp;
++ atomic_unchecked_t num_connect_fail_resp;
++ atomic_unchecked_t num_disconnect;
++ atomic_unchecked_t num_remote_shutdown_ind;
++ atomic_unchecked_t num_tx_flow_off_ind;
++ atomic_unchecked_t num_tx_flow_on_ind;
++ atomic_unchecked_t num_rx_flow_off;
++ atomic_unchecked_t num_rx_flow_on;
+ };
+ static struct debug_fs_counter cnt;
+ #define dbfs_atomic_inc(v) atomic_inc_return(v)
++#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
+ #define dbfs_atomic_dec(v) atomic_dec_return(v)
+ #else
+ #define dbfs_atomic_inc(v) 0
+@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
+ set_rx_flow_off(cf_sk);
+- dbfs_atomic_inc(&cnt.num_rx_flow_off);
++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ set_rx_flow_off(cf_sk);
+ if (net_ratelimit())
+ pr_debug("sending flow OFF due to rmem_schedule\n");
+- dbfs_atomic_inc(&cnt.num_rx_flow_off);
++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+ skb->dev = NULL;
+@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ /* OK from modem to start sending again */
+- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ /* Modem asks us to shut up */
+- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
++ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
+ set_tx_flow_off(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
+ /* We're now connected */
+ caif_client_register_refcnt(&cf_sk->layer,
+ cfsk_hold, cfsk_put);
+- dbfs_atomic_inc(&cnt.num_connect_resp);
++ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
+ cf_sk->sk.sk_state = CAIF_CONNECTED;
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ /* Connect request failed */
+- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
++ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
+ cf_sk->sk.sk_err = ECONNREFUSED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ /* Modem has closed this connection, or device is down. */
+- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
++ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ cf_sk->sk.sk_err = ECONNRESET;
+ set_rx_flow_on(cf_sk);
+@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
+ return;
+
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
+- dbfs_atomic_inc(&cnt.num_rx_flow_on);
++ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
+ set_rx_flow_on(cf_sk);
+ caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
+ }
+@@ -852,7 +853,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
+ /*ifindex = id of the interface.*/
+ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
+
+- dbfs_atomic_inc(&cnt.num_connect_req);
++ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
+ cf_sk->layer.receive = caif_sktrecv_cb;
+
+ err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
+@@ -941,7 +942,7 @@ static int caif_release(struct socket *sock)
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ sock->sk = NULL;
+
+- dbfs_atomic_inc(&cnt.num_disconnect);
++ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
+
+ WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
+ if (cf_sk->debugfs_socket_dir != NULL)
+@@ -1120,7 +1121,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
+ cf_sk->conn_req.protocol = protocol;
+ /* Increase the number of sockets created. */
+ dbfs_atomic_inc(&cnt.caif_nr_socks);
+- num = dbfs_atomic_inc(&cnt.caif_sock_create);
++ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
+ #ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR(debugfsdir)) {
+
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index 84efbe4..51d47bc 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -9,6 +9,7 @@
+ #include <linux/stddef.h>
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
++#include <linux/sched.h>
+ #include <net/caif/caif_layer.h>
+ #include <net/caif/cfpkt.h>
+ #include <net/caif/cfctrl.h>
+@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev_info.id = 0xff;
+ cfsrvl_init(&this->serv, 0, &dev_info, false);
+- atomic_set(&this->req_seq_no, 1);
+- atomic_set(&this->rsp_seq_no, 1);
++ atomic_set_unchecked(&this->req_seq_no, 1);
++ atomic_set_unchecked(&this->rsp_seq_no, 1);
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+ {
+ spin_lock_bh(&ctrl->info_list_lock);
+- atomic_inc(&ctrl->req_seq_no);
+- req->sequence_no = atomic_read(&ctrl->req_seq_no);
++ atomic_inc_unchecked(&ctrl->req_seq_no);
++ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
+ list_add_tail(&req->list, &ctrl->list);
+ spin_unlock_bh(&ctrl->info_list_lock);
+ }
+@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+ if (p != first)
+ pr_warn("Requests are not received in order\n");
+
+- atomic_set(&ctrl->rsp_seq_no,
++ atomic_set_unchecked(&ctrl->rsp_seq_no,
+ p->sequence_no);
+ list_del(&p->list);
+ goto out;
+diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
+index 8656909..a2ae45d 100644
+--- a/net/caif/chnl_net.c
++++ b/net/caif/chnl_net.c
+@@ -74,7 +74,6 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+ struct sk_buff *skb;
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ int pktlen;
+- int err = 0;
+ const u8 *ip_version;
+ u8 buf;
+
+@@ -95,8 +94,11 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+
+ /* check the version of IP */
+ ip_version = skb_header_pointer(skb, 0, 1, &buf);
+- if (!ip_version)
++ if (!ip_version) {
++ kfree_skb(skb);
+ return -EINVAL;
++ }
++
+ switch (*ip_version >> 4) {
+ case 4:
+ skb->protocol = htons(ETH_P_IP);
+@@ -105,6 +107,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
++ kfree_skb(skb);
++ priv->netdev->stats.rx_errors++;
+ return -EINVAL;
+ }
+
+@@ -123,7 +127,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+ priv->netdev->stats.rx_packets++;
+ priv->netdev->stats.rx_bytes += pktlen;
+
+- return err;
++ return 0;
+ }
+
+ static int delete_device(struct chnl_net *dev)
+@@ -221,12 +225,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ if (skb->len > priv->netdev->mtu) {
+ pr_warn("Size of skb exceeded MTU\n");
+- return -ENOSPC;
++ kfree_skb(skb);
++ dev->stats.tx_errors++;
++ return NETDEV_TX_OK;
+ }
+
+ if (!priv->flowenabled) {
+ pr_debug("dropping packets flow off\n");
+- return NETDEV_TX_BUSY;
++ kfree_skb(skb);
++ dev->stats.tx_dropped++;
++ return NETDEV_TX_OK;
+ }
+
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+@@ -240,9 +248,8 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ /* Send the packet down the stack. */
+ result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
+ if (result) {
+- if (result == -EAGAIN)
+- result = NETDEV_TX_BUSY;
+- return result;
++ dev->stats.tx_dropped++;
++ return NETDEV_TX_OK;
+ }
+
+ /* Update statistics. */
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 0ce2ad0..cb92a90 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -818,7 +818,7 @@ static const struct net_proto_family can_family_ops = {
+ };
+
+ /* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
++static struct notifier_block can_netdev_notifier = {
+ .notifier_call = can_notifier,
+ };
+
+diff --git a/net/can/gw.c b/net/can/gw.c
+index f78f898..d7aa843 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+ MODULE_ALIAS("can-gw");
+
+ HLIST_HEAD(cgw_list);
+-static struct notifier_block notifier;
+
+ static struct kmem_cache *cgw_cache __read_mostly;
+
+@@ -911,6 +910,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+ return err;
+ }
+
++static struct notifier_block notifier = {
++ .notifier_call = cgw_notifier
++};
++
+ static __init int cgw_module_init(void)
+ {
+ printk(banner);
+@@ -922,7 +925,6 @@ static __init int cgw_module_init(void)
+ return -ENOMEM;
+
+ /* set notifier */
+- notifier.notifier_call = cgw_notifier;
+ register_netdevice_notifier(&notifier);
+
+ if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
+diff --git a/net/compat.c b/net/compat.c
+index 41724c9..630f046 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+- kmsg->msg_name = compat_ptr(tmp1);
+- kmsg->msg_iov = compat_ptr(tmp2);
+- kmsg->msg_control = compat_ptr(tmp3);
++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
+ return 0;
+ }
+
+@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+
+ if (kern_msg->msg_namelen) {
+ if (mode == VERIFY_READ) {
+- int err = move_addr_to_kernel(kern_msg->msg_name,
++ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
+ kern_msg->msg_namelen,
+ kern_address);
+ if (err < 0)
+@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ kern_msg->msg_name = NULL;
+
+ tot_len = iov_from_user_compat_to_kern(kern_iov,
+- (struct compat_iovec __user *)kern_msg->msg_iov,
++ (struct compat_iovec __force_user *)kern_msg->msg_iov,
+ kern_msg->msg_iovlen);
+ if (tot_len >= 0)
+ kern_msg->msg_iov = kern_iov;
+@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+
+ #define CMSG_COMPAT_FIRSTHDR(msg) \
+ (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
+- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
++ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
+ (struct compat_cmsghdr __user *)NULL)
+
+ #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
+ ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
+ (ucmlen) <= (unsigned long) \
+ ((mhdr)->msg_controllen - \
+- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
++ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
+
+ static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
+ struct compat_cmsghdr __user *cmsg, int cmsg_len)
+ {
+ char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
+- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
++ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
+ msg->msg_controllen)
+ return NULL;
+ return (struct compat_cmsghdr __user *)ptr;
+@@ -224,7 +224,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
+ {
+ struct compat_timeval ctv;
+ struct compat_timespec cts[3];
+- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+ struct compat_cmsghdr cmhdr;
+ int cmlen;
+
+@@ -276,7 +276,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
+
+ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
+ {
+- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
++ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
+ int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
+ int fdnum = scm->fp->count;
+ struct file **fp = scm->fp->fp;
+@@ -329,14 +329,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
+ __scm_destroy(scm);
+ }
+
+-/*
+- * A struct sock_filter is architecture independent.
+- */
+-struct compat_sock_fprog {
+- u16 len;
+- compat_uptr_t filter; /* struct sock_filter * */
+-};
+-
+ static int do_set_attach_filter(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+ {
+@@ -373,7 +365,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
+ return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
++ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
+ set_fs(old_fs);
+
+ return err;
+@@ -434,7 +426,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
+ len = sizeof(ktime);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
++ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
+ set_fs(old_fs);
+
+ if (!err) {
+@@ -569,7 +561,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ {
+- struct compat_group_req __user *gr32 = (void *)optval;
++ struct compat_group_req __user *gr32 = (void __user *)optval;
+ struct group_req __user *kgr =
+ compat_alloc_user_space(sizeof(struct group_req));
+ u32 interface;
+@@ -590,7 +582,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ {
+- struct compat_group_source_req __user *gsr32 = (void *)optval;
++ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
+ struct group_source_req __user *kgsr = compat_alloc_user_space(
+ sizeof(struct group_source_req));
+ u32 interface;
+@@ -611,7 +603,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+ }
+ case MCAST_MSFILTER:
+ {
+- struct compat_group_filter __user *gf32 = (void *)optval;
++ struct compat_group_filter __user *gf32 = (void __user *)optval;
+ struct group_filter __user *kgf;
+ u32 interface, fmode, numsrc;
+
+@@ -649,7 +641,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
+ char __user *optval, int __user *optlen,
+ int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
+ {
+- struct compat_group_filter __user *gf32 = (void *)optval;
++ struct compat_group_filter __user *gf32 = (void __user *)optval;
+ struct group_filter __user *kgf;
+ int __user *koptlen;
+ u32 interface, fmode, numsrc;
+@@ -802,7 +794,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
+
+ if (call < SYS_SOCKET || call > SYS_SENDMMSG)
+ return -EINVAL;
+- if (copy_from_user(a, args, nas[call]))
++ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
+ return -EFAULT;
+ a0 = a[0];
+ a1 = a[1];
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 68bbf9f..5ef0d12 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+ }
+
+ kfree_skb(skb);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ sk_mem_reclaim_partial(sk);
+
+ return err;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7bcf37d..15d6bb8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1142,10 +1142,14 @@ void dev_load(struct net *net, const char *name)
+ if (no_module && capable(CAP_NET_ADMIN))
+ no_module = request_module("netdev-%s", name);
+ if (no_module && capable(CAP_SYS_MODULE)) {
++#ifdef CONFIG_GRKERNSEC_MODHARDEN
++ ___request_module(true, "grsec_modharden_netdev", "%s", name);
++#else
+ if (!request_module("%s", name))
+ pr_err("Loading kernel module for a network device "
+ "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
+ "instead\n", name);
++#endif
+ }
+ }
+ EXPORT_SYMBOL(dev_load);
+@@ -1597,7 +1601,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+- atomic_long_inc(&dev->rx_dropped);
++ atomic_long_inc_unchecked(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -1607,7 +1611,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ nf_reset(skb);
+
+ if (unlikely(!is_skb_forwardable(dev, skb))) {
+- atomic_long_inc(&dev->rx_dropped);
++ atomic_long_inc_unchecked(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -2047,7 +2051,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+
+ struct dev_gso_cb {
+ void (*destructor)(struct sk_buff *skb);
+-};
++} __no_const;
+
+ #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+
+@@ -2969,7 +2973,7 @@ enqueue:
+
+ local_irq_restore(flags);
+
+- atomic_long_inc(&skb->dev->rx_dropped);
++ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -3043,7 +3047,7 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
+-static void net_tx_action(struct softirq_action *h)
++static __latent_entropy void net_tx_action(void)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+
+@@ -3342,7 +3346,7 @@ ncls:
+ if (pt_prev) {
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ } else {
+- atomic_long_inc(&skb->dev->rx_dropped);
++ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+ /* Jamal, now you will not able to escape explaining
+ * me how you were going to use this. :-)
+@@ -3907,7 +3911,7 @@ void netif_napi_del(struct napi_struct *napi)
+ }
+ EXPORT_SYMBOL(netif_napi_del);
+
+-static void net_rx_action(struct softirq_action *h)
++static __latent_entropy void net_rx_action(void)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ unsigned long time_limit = jiffies + 2;
+@@ -4377,8 +4381,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
+ else
+ seq_printf(seq, "%04x", ntohs(pt->type));
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ seq_printf(seq, " %-8s %p\n",
++ pt->dev ? pt->dev->name : "", NULL);
++#else
+ seq_printf(seq, " %-8s %pF\n",
+ pt->dev ? pt->dev->name : "", pt->func);
++#endif
+ }
+
+ return 0;
+@@ -4440,7 +4449,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
+ proc_net_remove(net, "dev");
+ }
+
+-static struct pernet_operations __net_initdata dev_proc_ops = {
++static struct pernet_operations __net_initconst dev_proc_ops = {
+ .init = dev_proc_net_init,
+ .exit = dev_proc_net_exit,
+ };
+@@ -5935,7 +5944,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ } else {
+ netdev_stats_to_stats64(storage, &dev->stats);
+ }
+- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
++ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
+ return storage;
+ }
+ EXPORT_SYMBOL(dev_get_stats);
+@@ -6514,7 +6523,7 @@ static void __net_exit netdev_exit(struct net *net)
+ kfree(net->dev_index_head);
+ }
+
+-static struct pernet_operations __net_initdata netdev_net_ops = {
++static struct pernet_operations __net_initconst netdev_net_ops = {
+ .init = netdev_init,
+ .exit = netdev_exit,
+ };
+@@ -6576,7 +6585,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
+ rtnl_unlock();
+ }
+
+-static struct pernet_operations __net_initdata default_device_ops = {
++static struct pernet_operations __net_initconst default_device_ops = {
+ .exit = default_device_exit,
+ .exit_batch = default_device_exit_batch,
+ };
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index cd09414..d070f83 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -723,7 +723,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
+ proc_net_remove(net, "dev_mcast");
+ }
+
+-static struct pernet_operations __net_initdata dev_mc_net_ops = {
++static struct pernet_operations __net_initconst dev_mc_net_ops = {
+ .init = dev_mc_net_init,
+ .exit = dev_mc_net_exit,
+ };
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 2367246..4a0a677 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1612,10 +1612,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
+ if (ret)
+ return ret;
+
+- len = (tmp.len > dump.len) ? dump.len : tmp.len;
++ len = min(tmp.len, dump.len);
+ if (!len)
+ return -EFAULT;
+
++ /* Don't ever let the driver think there's more space available
++ * than it requested with .get_dump_flag().
++ */
++ dump.len = len;
++
++ /* Always allocate enough space to hold the whole thing so that the
++ * driver does not need to check the length and bother with partial
++ * dumping.
++ */
+ data = vzalloc(tmp.len);
+ if (!data)
+ return -ENOMEM;
+@@ -1623,6 +1632,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
+ if (ret)
+ goto out;
+
++ /* There are two sane possibilities:
++ * 1. The driver's .get_dump_data() does not touch dump.len.
++ * 2. Or it may set dump.len to how much it really writes, which
++ * should be tmp.len (or len if it can do a partial dump).
++ * In any case respond to userspace with the actual length of data
++ * it's receiving.
++ */
++ WARN_ON(dump.len != len && dump.len != tmp.len);
++ dump.len = len;
++
+ if (copy_to_user(useraddr, &dump, sizeof(dump))) {
+ ret = -EFAULT;
+ goto out;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 5dea452..d775edc 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -39,6 +39,7 @@
+ #include <linux/filter.h>
+ #include <linux/reciprocal_div.h>
+ #include <linux/ratelimit.h>
++#include <linux/seccomp.h>
+
+ /* No hurry in this branch */
+ static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
+@@ -350,6 +351,11 @@ load_b:
+ A = 0;
+ continue;
+ }
++#ifdef CONFIG_SECCOMP_FILTER
++ case BPF_S_ANC_SECCOMP_LD_W:
++ A = seccomp_bpf_load(fentry->k);
++ continue;
++#endif
+ default:
+ WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
+ fentry->code, fentry->jt,
+diff --git a/net/core/flow.c b/net/core/flow.c
+index e318c7e..168b1d0 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -61,7 +61,7 @@ struct flow_cache {
+ struct timer_list rnd_timer;
+ };
+
+-atomic_t flow_cache_genid = ATOMIC_INIT(0);
++atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
+ EXPORT_SYMBOL(flow_cache_genid);
+ static struct flow_cache flow_cache_global;
+ static struct kmem_cache *flow_cachep __read_mostly;
+@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
+
+ static int flow_entry_valid(struct flow_cache_entry *fle)
+ {
+- if (atomic_read(&flow_cache_genid) != fle->genid)
++ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
+ return 0;
+ if (fle->object && !fle->object->ops->check(fle->object))
+ return 0;
+@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
+ hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+ fcp->hash_count++;
+ }
+- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
++ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
+ flo = fle->object;
+ if (!flo)
+ goto ret_object;
+@@ -280,7 +280,7 @@ nocache:
+ }
+ flo = resolver(net, key, family, dir, flo, ctx);
+ if (fle) {
+- fle->genid = atomic_read(&flow_cache_genid);
++ fle->genid = atomic_read_unchecked(&flow_cache_genid);
+ if (!IS_ERR(flo))
+ fle->object = flo;
+ else
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index 139ef93..7afaa2f 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
+ if (m->msg_namelen) {
+ if (mode == VERIFY_READ) {
+ void __user *namep;
+- namep = (void __user __force *) m->msg_name;
++ namep = (void __force_user *) m->msg_name;
+ err = move_addr_to_kernel(namep, m->msg_namelen,
+ address);
+ if (err < 0)
+@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
+ }
+
+ size = m->msg_iovlen * sizeof(struct iovec);
+- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
++ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
+ return -EFAULT;
+
+ m->msg_iov = iov;
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 0329404..ab4e13a 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1334,7 +1334,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
+ }
+ EXPORT_SYMBOL(netdev_class_remove_file);
+
+-int netdev_kobject_init(void)
++int __init netdev_kobject_init(void)
+ {
+ kobj_ns_type_register(&net_ns_type_operations);
+ return class_register(&net_class);
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index dd00b71..74d1779 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -422,7 +422,7 @@ static int __register_pernet_operations(struct list_head *list,
+ int error;
+ LIST_HEAD(net_exit_list);
+
+- list_add_tail(&ops->list, list);
++ pax_list_add_tail((struct list_head *)&ops->list, list);
+ if (ops->init || (ops->id && ops->size)) {
+ for_each_net(net) {
+ error = ops_init(ops, net);
+@@ -435,7 +435,7 @@ static int __register_pernet_operations(struct list_head *list,
+
+ out_undo:
+ /* If I have an error cleanup all namespaces I initialized */
+- list_del(&ops->list);
++ pax_list_del((struct list_head *)&ops->list);
+ ops_exit_list(ops, &net_exit_list);
+ ops_free_list(ops, &net_exit_list);
+ return error;
+@@ -446,7 +446,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
+ struct net *net;
+ LIST_HEAD(net_exit_list);
+
+- list_del(&ops->list);
++ pax_list_del((struct list_head *)&ops->list);
+ for_each_net(net)
+ list_add_tail(&net->exit_list, &net_exit_list);
+ ops_exit_list(ops, &net_exit_list);
+@@ -580,7 +580,7 @@ int register_pernet_device(struct pernet_operations *ops)
+ mutex_lock(&net_mutex);
+ error = register_pernet_operations(&pernet_list, ops);
+ if (!error && (first_device == &pernet_list))
+- first_device = &ops->list;
++ first_device = (struct list_head *)&ops->list;
+ mutex_unlock(&net_mutex);
+ return error;
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5b7d5f2..ecb9676 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -57,7 +57,7 @@ struct rtnl_link {
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+ rtnl_calcit_func calcit;
+-};
++} __no_const;
+
+ static DEFINE_MUTEX(rtnl_mutex);
+
+@@ -284,10 +284,13 @@ static LIST_HEAD(link_ops);
+ */
+ int __rtnl_link_register(struct rtnl_link_ops *ops)
+ {
+- if (!ops->dellink)
+- ops->dellink = unregister_netdevice_queue;
++ if (!ops->dellink) {
++ pax_open_kernel();
++ *(void **)&ops->dellink = unregister_netdevice_queue;
++ pax_close_kernel();
++ }
+
+- list_add_tail(&ops->list, &link_ops);
++ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_register);
+@@ -334,7 +337,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
+ for_each_net(net) {
+ __rtnl_kill_links(net, ops);
+ }
+- list_del(&ops->list);
++ pax_list_del((struct list_head *)&ops->list);
+ }
+ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+
+diff --git a/net/core/scm.c b/net/core/scm.c
+index ff52ad0..aff1c0f 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
+ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ {
+ struct cmsghdr __user *cm
+- = (__force struct cmsghdr __user *)msg->msg_control;
++ = (struct cmsghdr __force_user *)msg->msg_control;
+ struct cmsghdr cmhdr;
+ int cmlen = CMSG_LEN(len);
+ int err;
+@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ err = -EFAULT;
+ if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
+ goto out;
+- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
++ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
+ goto out;
+ cmlen = CMSG_SPACE(len);
+ if (msg->msg_controllen < cmlen)
+@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
+ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ {
+ struct cmsghdr __user *cm
+- = (__force struct cmsghdr __user*)msg->msg_control;
++ = (struct cmsghdr __force_user *)msg->msg_control;
+
+ int fdmax = 0;
+ int fdnum = scm->fp->count;
+@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ if (fdnum < fdmax)
+ fdmax = fdnum;
+
+- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
++ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
+ i++, cmfptr++)
+ {
+ int new_fd;
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index 925991a..209a505 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -12,12 +12,10 @@
+
+ static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+-static int __init net_secret_init(void)
++void net_secret_init(void)
+ {
+ get_random_bytes(net_secret, sizeof(net_secret));
+- return 0;
+ }
+-late_initcall(net_secret_init);
+
+ #ifdef CONFIG_INET
+ static u32 seq_scale(u32 seq)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index af9c3c6..76914a3 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2902,13 +2902,15 @@ void __init skb_init(void)
+ skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+ sizeof(struct sk_buff),
+ 0,
+- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
++ SLAB_NO_SANITIZE,
+ NULL);
+ skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+ (2*sizeof(struct sk_buff)) +
+ sizeof(atomic_t),
+ 0,
+- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
++ SLAB_NO_SANITIZE,
+ NULL);
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 8a2c2dd..3ba3cf1 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ trace_sock_rcvqueue_full(sk, skb);
+ return -ENOMEM;
+ }
+@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ return err;
+
+ if (!sk_rmem_schedule(sk, skb->truesize)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ return -ENOBUFS;
+ }
+
+@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ skb_dst_force(skb);
+
+ spin_lock_irqsave(&list->lock, flags);
+- skb->dropcount = atomic_read(&sk->sk_drops);
++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+ skb->dev = NULL;
+
+ if (sk_rcvqueues_full(sk, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+ if (nested)
+@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ } else if (sk_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
+@@ -406,7 +406,7 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
+ }
+ EXPORT_SYMBOL(sk_dst_check);
+
+-static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
++static int sock_bindtodevice(struct sock *sk, char __user *optval, unsigned int optlen)
+ {
+ int ret = -ENOPROTOOPT;
+ #ifdef CONFIG_NETDEVICES
+@@ -420,7 +420,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
+ goto out;
+
+ ret = -EINVAL;
+- if (optlen < 0)
++ if (optlen > INT_MAX)
+ goto out;
+
+ /* Bind this socket to a particular device like "eth0",
+@@ -786,12 +786,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ struct timeval tm;
+ } v;
+
+- int lv = sizeof(int);
+- int len;
++ unsigned int lv = sizeof(int);
++ unsigned int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+- if (len < 0)
++ if (len > INT_MAX)
+ return -EINVAL;
+
+ memset(&v, 0, sizeof(v));
+@@ -932,18 +932,18 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ if (len > sizeof(peercred))
+ len = sizeof(peercred);
+ cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+- if (copy_to_user(optval, &peercred, len))
++ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
+ return -EFAULT;
+ goto lenout;
+ }
+
+ case SO_PEERNAME:
+ {
+- char address[128];
++ char address[_K_SS_MAXSIZE];
+
+ if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
+ return -ENOTCONN;
+- if (lv < len)
++ if (lv < len || sizeof address < len)
+ return -EINVAL;
+ if (copy_to_user(optval, address, len))
+ return -EFAULT;
+@@ -978,7 +978,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+
+ if (len > lv)
+ len = lv;
+- if (copy_to_user(optval, &v, len))
++ if (len > sizeof(v) || copy_to_user(optval, &v, len))
+ return -EFAULT;
+ lenout:
+ if (put_user(len, optlen))
+@@ -2027,7 +2027,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ */
+ smp_wmb();
+ atomic_set(&sk->sk_refcnt, 1);
+- atomic_set(&sk->sk_drops, 0);
++ atomic_set_unchecked(&sk->sk_drops, 0);
+ }
+ EXPORT_SYMBOL(sock_init_data);
+
+@@ -2564,7 +2564,7 @@ static __net_exit void proto_exit_net(struct net *net)
+ }
+
+
+-static __net_initdata struct pernet_operations proto_net_ops = {
++static __net_initconst struct pernet_operations proto_net_ops = {
+ .init = proto_init_net,
+ .exit = proto_exit_net,
+ };
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index f0bdd36..957fc06 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
+ {
+ unsigned int orig_size, size;
+ int ret, i;
+- ctl_table tmp = {
++ ctl_table_no_const tmp = {
+ .data = &size,
+ .maxlen = sizeof(size),
+ .mode = table->mode
+@@ -210,29 +210,27 @@ __net_initdata struct ctl_path net_core_path[] = {
+
+ static __net_init int sysctl_core_net_init(struct net *net)
+ {
+- struct ctl_table *tbl;
++ ctl_table_no_const *tbl = NULL;
+
+ net->core.sysctl_somaxconn = SOMAXCONN;
+
+- tbl = netns_core_table;
+ if (!net_eq(net, &init_net)) {
+- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
++ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_dup;
+
+ tbl[0].data = &net->core.sysctl_somaxconn;
+- }
++ net->core.sysctl_hdr = register_net_sysctl_table(net, net_core_path, tbl);
++ } else
++ net->core.sysctl_hdr = register_net_sysctl_table(net, net_core_path, netns_core_table);
+
+- net->core.sysctl_hdr = register_net_sysctl_table(net,
+- net_core_path, tbl);
+ if (net->core.sysctl_hdr == NULL)
+ goto err_reg;
+
+ return 0;
+
+ err_reg:
+- if (tbl != netns_core_table)
+- kfree(tbl);
++ kfree(tbl);
+ err_dup:
+ return -ENOMEM;
+ }
+@@ -247,7 +245,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
+ kfree(tbl);
+ }
+
+-static __net_initdata struct pernet_operations sysctl_core_ops = {
++static __net_initconst struct pernet_operations sysctl_core_ops = {
+ .init = sysctl_core_net_init,
+ .exit = sysctl_core_net_exit,
+ };
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 16fbf8c..ff4b0fc 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -469,6 +469,7 @@ static struct proto dn_proto = {
+ .sysctl_rmem = sysctl_decnet_rmem,
+ .max_header = DN_MAX_NSP_DATA_HEADER + 64,
+ .obj_size = sizeof(struct dn_sock),
++ .slab_flags = SLAB_USERCOPY,
+ };
+
+ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
+diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
+index d50a13c..1f612ff 100644
+--- a/net/decnet/sysctl_net_decnet.c
++++ b/net/decnet/sysctl_net_decnet.c
+@@ -175,7 +175,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
+
+ if (len > *lenp) len = *lenp;
+
+- if (copy_to_user(buffer, addr, len))
++ if (len > sizeof addr || copy_to_user(buffer, addr, len))
+ return -EFAULT;
+
+ *lenp = len;
+@@ -238,7 +238,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
+
+ if (len > *lenp) len = *lenp;
+
+- if (copy_to_user(buffer, devname, len))
++ if (len > sizeof devname || copy_to_user(buffer, devname, len))
+ return -EFAULT;
+
+ *lenp = len;
+diff --git a/net/econet/Kconfig b/net/econet/Kconfig
+index 39a2d29..f39c0fe 100644
+--- a/net/econet/Kconfig
++++ b/net/econet/Kconfig
+@@ -4,7 +4,7 @@
+
+ config ECONET
+ tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
+- depends on EXPERIMENTAL && INET
++ depends on EXPERIMENTAL && INET && BROKEN
+ ---help---
+ Econet is a fairly old and slow networking protocol mainly used by
+ Acorn computers to access file and print servers. It uses native
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index 5d42df2..10638af 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -329,7 +329,7 @@ static int lowpan_header_create(struct sk_buff *skb,
+ hc06_ptr += 3;
+ } else {
+ /* compress nothing */
+- memcpy(hc06_ptr, &hdr, 4);
++ memcpy(hc06_ptr, hdr, 4);
+ /* replace the top byte with new ECN | DSCP format */
+ *hc06_ptr = tmp;
+ hc06_ptr += 4;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 5d228de..91bdee5 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -114,6 +114,7 @@
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
+ #include <net/net_namespace.h>
++#include <net/secure_seq.h>
+ #ifdef CONFIG_IP_MROUTE
+ #include <linux/mroute.h>
+ #endif
+@@ -241,8 +242,10 @@ void build_ehash_secret(void)
+ get_random_bytes(&rnd, sizeof(rnd));
+ } while (rnd == 0);
+
+- if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
++ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
+ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
++ net_secret_init();
++ }
+ }
+ EXPORT_SYMBOL(build_ehash_secret);
+
+@@ -1612,7 +1615,7 @@ static __net_exit void ipv4_mib_exit_net(struct net *net)
+ snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
+ }
+
+-static __net_initdata struct pernet_operations ipv4_mib_ops = {
++static __net_initconst struct pernet_operations ipv4_mib_ops = {
+ .init = ipv4_mib_init_net,
+ .exit = ipv4_mib_exit_net,
+ };
+@@ -1646,13 +1649,9 @@ static int __init inet_init(void)
+
+ BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
+
+- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
+- if (!sysctl_local_reserved_ports)
+- goto out;
+-
+ rc = proto_register(&tcp_prot, 1);
+ if (rc)
+- goto out_free_reserved_ports;
++ goto out;
+
+ rc = proto_register(&udp_prot, 1);
+ if (rc)
+@@ -1759,8 +1758,6 @@ out_unregister_udp_proto:
+ proto_unregister(&udp_prot);
+ out_unregister_tcp_proto:
+ proto_unregister(&tcp_prot);
+-out_free_reserved_ports:
+- kfree(sysctl_local_reserved_ports);
+ goto out;
+ }
+
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 59a7041..060976d 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -945,24 +945,25 @@ static void parp_redo(struct sk_buff *skb)
+ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+ {
+- struct arphdr *arp;
++ const struct arphdr *arp;
+
+- /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
+- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
+- goto freeskb;
+-
+- arp = arp_hdr(skb);
+- if (arp->ar_hln != dev->addr_len ||
+- dev->flags & IFF_NOARP ||
++ if (dev->flags & IFF_NOARP ||
+ skb->pkt_type == PACKET_OTHERHOST ||
+- skb->pkt_type == PACKET_LOOPBACK ||
+- arp->ar_pln != 4)
++ skb->pkt_type == PACKET_LOOPBACK)
+ goto freeskb;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+- if (skb == NULL)
++ if (!skb)
+ goto out_of_mem;
+
++ /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
++ if (!pskb_may_pull(skb, arp_hdr_len(dev)))
++ goto freeskb;
++
++ arp = arp_hdr(skb);
++ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
++ goto freeskb;
++
+ memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
+
+ return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index e41c40f..fbed7a7 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -827,9 +827,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+ if (!ifa) {
+ ret = -ENOBUFS;
+ ifa = inet_alloc_ifa();
++ if (!ifa)
++ break;
+ INIT_HLIST_NODE(&ifa->hash);
+- if (!ifa)
+- break;
+ if (colon)
+ memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
+ else
+@@ -1584,7 +1584,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+ #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
+ DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
+
+-static struct devinet_sysctl_table {
++static const struct devinet_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
+ char *dev_name;
+@@ -1729,7 +1729,7 @@ static __net_init int devinet_init_net(struct net *net)
+ int err;
+ struct ipv4_devconf *all, *dflt;
+ #ifdef CONFIG_SYSCTL
+- struct ctl_table *tbl = ctl_forward_entry;
++ ctl_table_no_const *tbl = NULL;
+ struct ctl_table_header *forw_hdr;
+ #endif
+
+@@ -1747,7 +1747,7 @@ static __net_init int devinet_init_net(struct net *net)
+ goto err_alloc_dflt;
+
+ #ifdef CONFIG_SYSCTL
+- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
++ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_alloc_ctl;
+
+@@ -1767,7 +1767,10 @@ static __net_init int devinet_init_net(struct net *net)
+ goto err_reg_dflt;
+
+ err = -ENOMEM;
+- forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
++ if (!net_eq(net, &init_net))
++ forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
++ else
++ forw_hdr = register_net_sysctl_table(net, net_ipv4_path, ctl_forward_entry);
+ if (forw_hdr == NULL)
+ goto err_reg_ctl;
+ net->ipv4.forw_hdr = forw_hdr;
+@@ -1783,8 +1786,7 @@ err_reg_ctl:
+ err_reg_dflt:
+ __devinet_sysctl_unregister(all);
+ err_reg_all:
+- if (tbl != ctl_forward_entry)
+- kfree(tbl);
++ kfree(tbl);
+ err_alloc_ctl:
+ #endif
+ if (dflt != &ipv4_devconf_dflt)
+@@ -1811,7 +1813,7 @@ static __net_exit void devinet_exit_net(struct net *net)
+ kfree(net->ipv4.devconf_all);
+ }
+
+-static __net_initdata struct pernet_operations devinet_ops = {
++static __net_initconst struct pernet_operations devinet_ops = {
+ .init = devinet_init_net,
+ .exit = devinet_exit_net,
+ };
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 238fc3b..4455673 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -472,7 +472,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
+ }
+
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+- net_adj) & ~(align - 1)) + (net_adj - 2);
++ net_adj) & ~(align - 1)) + net_adj - 2;
+ }
+
+ static void esp4_err(struct sk_buff *skb, u32 info)
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 92fc5f6..b790d91 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ fib_sync_up(dev);
+ #endif
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ rt_cache_flush(dev_net(dev), -1);
+ break;
+ case NETDEV_DOWN:
+ fib_del_ifaddr(ifa, NULL);
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ if (ifa->ifa_dev->ifa_list == NULL) {
+ /* Last address was deleted from this interface.
+ * Disable IP.
+@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ fib_sync_up(dev);
+ #endif
+- atomic_inc(&net->ipv4.dev_addr_genid);
++ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
+ rt_cache_flush(dev_net(dev), -1);
+ break;
+ case NETDEV_DOWN:
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index d01f9c6..284c56c 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
+ nh->nh_saddr = inet_select_addr(nh->nh_dev,
+ nh->nh_gw,
+ nh->nh_parent->fib_scope);
+- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
++ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
+
+ return nh->nh_saddr;
+ }
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index ab188ae..662585c 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -1195,7 +1195,7 @@ fail:
+ return err;
+ }
+
+-static struct pernet_operations __net_initdata icmp_sk_ops = {
++static struct pernet_operations __net_initconst icmp_sk_ops = {
+ .init = icmp_sk_init,
+ .exit = icmp_sk_exit,
+ };
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 907ef2c..eba7111 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
+ .range = { 32768, 61000 },
+ };
+
+-unsigned long *sysctl_local_reserved_ports;
++unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
+ EXPORT_SYMBOL(sysctl_local_reserved_ports);
+
+ void inet_get_local_port_range(int *low, int *high)
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index ccee270..2b3d4de 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -114,11 +114,21 @@ static int inet_csk_diag_fill(struct sock *sk,
+ r->idiag_retrans = 0;
+
+ r->id.idiag_if = sk->sk_bound_dev_if;
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ r->id.idiag_cookie[0] = 0;
++ r->id.idiag_cookie[1] = 0;
++#else
+ r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
++#endif
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = inet->inet_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = inet->inet_rcv_saddr;
+ r->id.idiag_dst[0] = inet->inet_daddr;
+
+@@ -209,13 +219,26 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
+
+ r->idiag_family = tw->tw_family;
+ r->idiag_retrans = 0;
++
+ r->id.idiag_if = tw->tw_bound_dev_if;
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ r->id.idiag_cookie[0] = 0;
++ r->id.idiag_cookie[1] = 0;
++#else
+ r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
++#endif
++
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = tw->tw_rcv_saddr;
+ r->id.idiag_dst[0] = tw->tw_daddr;
++
+ r->idiag_state = tw->tw_substate;
+ r->idiag_timer = 3;
+ r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
+@@ -294,12 +317,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
+ if (sk == NULL)
+ goto unlock;
+
++#ifndef CONFIG_GRKERNSEC_HIDESYM
+ err = -ESTALE;
+ if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
+ req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
+ ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
+ (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+ goto out;
++#endif
+
+ err = -ENOMEM;
+ rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+@@ -589,8 +614,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
+ r->idiag_retrans = req->retrans;
+
+ r->id.idiag_if = sk->sk_bound_dev_if;
++
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ r->id.idiag_cookie[0] = 0;
++ r->id.idiag_cookie[1] = 0;
++#else
+ r->id.idiag_cookie[0] = (u32)(unsigned long)req;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
++#endif
+
+ tmo = req->expires - jiffies;
+ if (tmo < 0)
+@@ -598,8 +629,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = ireq->rmt_port;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = ireq->loc_addr;
+ r->id.idiag_dst[0] = ireq->rmt_addr;
++
+ r->idiag_expires = jiffies_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 4afcf31..392d206 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -18,12 +18,15 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/wait.h>
++#include <linux/security.h>
+
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/secure_seq.h>
+ #include <net/ip.h>
+
++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++
+ /*
+ * Allocate and initialize a new local port bind bucket.
+ * The bindhash mutex for snum's hash chain must be held here.
+@@ -530,6 +533,8 @@ ok:
+ twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+ spin_unlock(&head->lock);
+
++ gr_update_task_in_ip_table(current, inet_sk(sk));
++
+ if (tw) {
+ inet_twsk_deschedule(tw, death_row);
+ while (twrefcnt) {
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 58c4e696..4f025f0 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -487,8 +487,8 @@ relookup:
+ if (p) {
+ p->daddr = *daddr;
+ atomic_set(&p->refcnt, 1);
+- atomic_set(&p->rid, 0);
+- atomic_set(&p->ip_id_count,
++ atomic_set_unchecked(&p->rid, 0);
++ atomic_set_unchecked(&p->ip_id_count,
+ (daddr->family == AF_INET) ?
+ secure_ip_id(daddr->addr.a4) :
+ secure_ipv6_id(daddr->addr.a6));
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 8f441b2..a56d38e 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
+ return 0;
+
+ start = qp->rid;
+- end = atomic_inc_return(&peer->rid);
++ end = atomic_inc_return_unchecked(&peer->rid);
+ qp->rid = end;
+
+ rc = qp->q.fragments && (end - start) > max;
+@@ -773,21 +773,21 @@ static struct ctl_table ip4_frags_ctl_table[] = {
+
+ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table = NULL;
+ struct ctl_table_header *hdr;
+
+- table = ip4_frags_ns_ctl_table;
+ if (!net_eq(net, &init_net)) {
+- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
++ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
+ if (table == NULL)
+ goto err_alloc;
+
+ table[0].data = &net->ipv4.frags.high_thresh;
+ table[1].data = &net->ipv4.frags.low_thresh;
+ table[2].data = &net->ipv4.frags.timeout;
+- }
++ hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
++ } else
++ hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, ip4_frags_ns_ctl_table);
+
+- hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
+ if (hdr == NULL)
+ goto err_reg;
+
+@@ -795,8 +795,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+ return 0;
+
+ err_reg:
+- if (!net_eq(net, &init_net))
+- kfree(table);
++ kfree(table);
+ err_alloc:
+ return -ENOMEM;
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 5f28fab..ebd7a97 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -118,7 +118,7 @@
+ Alexey Kuznetsov.
+ */
+
+-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
++static struct rtnl_link_ops ipgre_link_ops;
+ static int ipgre_tunnel_init(struct net_device *dev);
+ static void ipgre_tunnel_setup(struct net_device *dev);
+ static int ipgre_tunnel_bind_dev(struct net_device *dev);
+@@ -1669,7 +1669,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
+ [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
+ };
+
+-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
++static struct rtnl_link_ops ipgre_link_ops = {
+ .kind = "gre",
+ .maxtype = IFLA_GRE_MAX,
+ .policy = ipgre_policy,
+@@ -1682,7 +1682,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
+ .fill_info = ipgre_fill_info,
+ };
+
+-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
++static struct rtnl_link_ops ipgre_tap_ops = {
+ .kind = "gretap",
+ .maxtype = IFLA_GRE_MAX,
+ .policy = ipgre_policy,
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 542a9c1..5b792eb 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1121,7 +1121,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ len = min_t(unsigned int, len, opt->optlen);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, opt->__data, len))
++ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
++ copy_to_user(optval, opt->__data, len))
+ return -EFAULT;
+ return 0;
+ }
+@@ -1249,7 +1250,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ if (sk->sk_type != SOCK_STREAM)
+ return -ENOPROTOOPT;
+
+- msg.msg_control = optval;
++ msg.msg_control = (void __force_kernel *)optval;
+ msg.msg_controllen = len;
+ msg.msg_flags = flags;
+
+diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
+index 99ec116..c5628fe 100644
+--- a/net/ipv4/ipconfig.c
++++ b/net/ipv4/ipconfig.c
+@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
+
+ mm_segment_t oldfs = get_fs();
+ set_fs(get_ds());
+- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
++ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
+ set_fs(oldfs);
+ return res;
+ }
+@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
+
+ mm_segment_t oldfs = get_fs();
+ set_fs(get_ds());
+- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
++ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
+ set_fs(oldfs);
+ return res;
+ }
+@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
+
+ mm_segment_t oldfs = get_fs();
+ set_fs(get_ds());
+- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
++ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
+ set_fs(oldfs);
+ return res;
+ }
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index b5e64e4..4a9a5c4 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1320,6 +1320,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
+ if (get_user(v, (u32 __user *)optval))
+ return -EFAULT;
+
++ /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
++ if (v != RT_TABLE_DEFAULT && v >= 1000000000)
++ return -EINVAL;
++
+ rtnl_lock();
+ ret = 0;
+ if (sk == rtnl_dereference(mrt->mroute_sk)) {
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index fd7a3f6..21e76da 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ #endif
+
+ static int get_info(struct net *net, void __user *user,
+- const int *len, int compat)
++ int len, int compat)
+ {
+ char name[XT_TABLE_MAXNAMELEN];
+ struct xt_table *t;
+ int ret;
+
+- if (*len != sizeof(struct arpt_getinfo)) {
+- duprintf("length %u != %Zu\n", *len,
++ if (len != sizeof(struct arpt_getinfo)) {
++ duprintf("length %u != %Zu\n", len,
+ sizeof(struct arpt_getinfo));
+ return -EINVAL;
+ }
+@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
+ info.size = private->size;
+ strcpy(info.name, name);
+
+- if (copy_to_user(user, &info, *len) != 0)
++ if (copy_to_user(user, &info, len) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
+
+ switch (cmd) {
+ case ARPT_SO_GET_INFO:
+- ret = get_info(sock_net(sk), user, len, 1);
++ ret = get_info(sock_net(sk), user, *len, 1);
+ break;
+ case ARPT_SO_GET_ENTRIES:
+ ret = compat_get_entries(sock_net(sk), user, len);
+@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
+
+ switch (cmd) {
+ case ARPT_SO_GET_INFO:
+- ret = get_info(sock_net(sk), user, len, 0);
++ ret = get_info(sock_net(sk), user, *len, 0);
+ break;
+
+ case ARPT_SO_GET_ENTRIES:
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 24e556e..f6918b4 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1069,14 +1069,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ #endif
+
+ static int get_info(struct net *net, void __user *user,
+- const int *len, int compat)
++ int len, int compat)
+ {
+ char name[XT_TABLE_MAXNAMELEN];
+ struct xt_table *t;
+ int ret;
+
+- if (*len != sizeof(struct ipt_getinfo)) {
+- duprintf("length %u != %zu\n", *len,
++ if (len != sizeof(struct ipt_getinfo)) {
++ duprintf("length %u != %zu\n", len,
+ sizeof(struct ipt_getinfo));
+ return -EINVAL;
+ }
+@@ -1113,7 +1113,7 @@ static int get_info(struct net *net, void __user *user,
+ info.size = private->size;
+ strcpy(info.name, name);
+
+- if (copy_to_user(user, &info, *len) != 0)
++ if (copy_to_user(user, &info, len) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+@@ -1967,7 +1967,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+
+ switch (cmd) {
+ case IPT_SO_GET_INFO:
+- ret = get_info(sock_net(sk), user, len, 1);
++ ret = get_info(sock_net(sk), user, *len, 1);
+ break;
+ case IPT_SO_GET_ENTRIES:
+ ret = compat_get_entries(sock_net(sk), user, len);
+@@ -2014,7 +2014,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+
+ switch (cmd) {
+ case IPT_SO_GET_INFO:
+- ret = get_info(sock_net(sk), user, len, 0);
++ ret = get_info(sock_net(sk), user, *len, 0);
+ break;
+
+ case IPT_SO_GET_ENTRIES:
+diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
+index b550815..c3b44d5 100644
+--- a/net/ipv4/netfilter/ipt_ULOG.c
++++ b/net/ipv4/netfilter/ipt_ULOG.c
+@@ -202,6 +202,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
+ ub->qlen++;
+
+ pm = NLMSG_DATA(nlh);
++ memset(pm, 0, sizeof(*pm));
+
+ /* We might not have a timestamp, get one */
+ if (skb->tstamp.tv64 == 0)
+@@ -218,8 +219,6 @@ static void ipt_ulog_packet(unsigned int hooknum,
+ strncpy(pm->prefix, prefix, sizeof(pm->prefix));
+ else if (loginfo->prefix[0] != '\0')
+ strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
+- else
+- *(pm->prefix) = '\0';
+
+ if (in && in->hard_header_len > 0 &&
+ skb->mac_header != skb->network_header &&
+@@ -231,13 +230,9 @@ static void ipt_ulog_packet(unsigned int hooknum,
+
+ if (in)
+ strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+- else
+- pm->indev_name[0] = '\0';
+
+ if (out)
+ strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+- else
+- pm->outdev_name[0] = '\0';
+
+ /* copy_len <= skb->len, so can't fail. */
+ if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 00975b6..e922b06 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -835,7 +835,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+ atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops), len);
++ atomic_read_unchecked(&sp->sk_drops), len);
+ }
+
+ static int ping_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
+index f7fdbe9..63740b7 100644
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -487,7 +487,7 @@ static __net_exit void ip_proc_exit_net(struct net *net)
+ proc_net_remove(net, "sockstat");
+ }
+
+-static __net_initdata struct pernet_operations ip_proc_ops = {
++static __net_initconst struct pernet_operations ip_proc_ops = {
+ .init = ip_proc_init_net,
+ .exit = ip_proc_exit_net,
+ };
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index cfded93..7b72cc0 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -305,7 +305,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
+ int raw_rcv(struct sock *sk, struct sk_buff *skb)
+ {
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
+
+ static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
+ {
++ struct icmp_filter filter;
++
+ if (optlen > sizeof(struct icmp_filter))
+ optlen = sizeof(struct icmp_filter);
+- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
++ if (copy_from_user(&filter, optval, optlen))
+ return -EFAULT;
++ raw_sk(sk)->filter = filter;
+ return 0;
+ }
+
+ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
+ {
+ int len, ret = -EFAULT;
++ struct icmp_filter filter;
+
+ if (get_user(len, optlen))
+ goto out;
+@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
+ if (len > sizeof(struct icmp_filter))
+ len = sizeof(struct icmp_filter);
+ ret = -EFAULT;
+- if (put_user(len, optlen) ||
+- copy_to_user(optval, &raw_sk(sk)->filter, len))
++ filter = raw_sk(sk)->filter;
++ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
+ goto out;
+ ret = 0;
+ out: return ret;
+@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
+ sk_wmem_alloc_get(sp),
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ static int raw_seq_show(struct seq_file *seq, void *v)
+@@ -1049,7 +1059,7 @@ static __net_exit void raw_exit_net(struct net *net)
+ proc_net_remove(net, "raw");
+ }
+
+-static __net_initdata struct pernet_operations raw_net_ops = {
++static __net_initconst struct pernet_operations raw_net_ops = {
+ .init = raw_init_net,
+ .exit = raw_exit_net,
+ };
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6768ce2..c682a62 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
+
+ static inline int rt_genid(struct net *net)
+ {
+- return atomic_read(&net->ipv4.rt_genid);
++ return atomic_read_unchecked(&net->ipv4.rt_genid);
+ }
+
+ #ifdef CONFIG_PROC_FS
+@@ -641,7 +641,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
+ #endif
+ }
+
+-static struct pernet_operations ip_rt_proc_ops __net_initdata = {
++static struct pernet_operations ip_rt_proc_ops __net_initconst = {
+ .init = ip_rt_do_proc_init,
+ .exit = ip_rt_do_proc_exit,
+ };
+@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
+ unsigned char shuffle;
+
+ get_random_bytes(&shuffle, sizeof(shuffle));
+- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
++ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
+ inetpeer_invalidate_tree(AF_INET);
+ }
+@@ -3023,7 +3023,7 @@ static int rt_fill_info(struct net *net,
+ error = rt->dst.error;
+ if (peer) {
+ inet_peer_refcheck(rt->peer);
+- id = atomic_read(&peer->ip_id_count) & 0xffff;
++ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
+ if (peer->tcp_ts_stamp) {
+ ts = peer->tcp_ts;
+ tsage = get_seconds() - peer->tcp_ts_stamp;
+@@ -3222,7 +3222,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
+ {
+ if (write) {
+ int flush_delay;
+- ctl_table ctl;
++ ctl_table_no_const ctl;
+ struct net *net;
+
+ memcpy(&ctl, __ctl, sizeof(ctl));
+@@ -3371,6 +3371,7 @@ static struct ctl_table ipv4_route_flush_table[] = {
+ .maxlen = sizeof(int),
+ .mode = 0200,
+ .proc_handler = ipv4_sysctl_rtcache_flush,
++ .extra1 = &init_net,
+ },
+ { },
+ };
+@@ -3384,25 +3385,23 @@ static __net_initdata struct ctl_path ipv4_route_path[] = {
+
+ static __net_init int sysctl_route_net_init(struct net *net)
+ {
+- struct ctl_table *tbl;
++ ctl_table_no_const *tbl = NULL;
+
+- tbl = ipv4_route_flush_table;
+ if (!net_eq(net, &init_net)) {
+- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
++ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_dup;
+- }
+- tbl[0].extra1 = net;
+
+- net->ipv4.route_hdr =
+- register_net_sysctl_table(net, ipv4_route_path, tbl);
++ net->ipv4.route_hdr = register_net_sysctl_table(net, ipv4_route_path, tbl);
++ } else
++ net->ipv4.route_hdr = register_net_sysctl_table(net, ipv4_route_path, ipv4_route_flush_table);
++
+ if (net->ipv4.route_hdr == NULL)
+ goto err_reg;
+ return 0;
+
+ err_reg:
+- if (tbl != ipv4_route_flush_table)
+- kfree(tbl);
++ kfree(tbl);
+ err_dup:
+ return -ENOMEM;
+ }
+@@ -3417,7 +3416,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net)
+ kfree(tbl);
+ }
+
+-static __net_initdata struct pernet_operations sysctl_route_ops = {
++static __net_initconst struct pernet_operations sysctl_route_ops = {
+ .init = sysctl_route_net_init,
+ .exit = sysctl_route_net_exit,
+ };
+@@ -3432,7 +3431,7 @@ static __net_init int rt_genid_init(struct net *net)
+ return 0;
+ }
+
+-static __net_initdata struct pernet_operations rt_genid_ops = {
++static __net_initconst struct pernet_operations rt_genid_ops = {
+ .init = rt_genid_init,
+ };
+
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 739b073..7ac6591 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -53,7 +53,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
+ {
+ int ret;
+ int range[2];
+- ctl_table tmp = {
++ ctl_table_no_const tmp = {
+ .data = &range,
+ .maxlen = sizeof(range),
+ .mode = table->mode,
+@@ -104,7 +104,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
+ {
+ int ret;
+ gid_t range[2];
+- ctl_table tmp = {
++ ctl_table_no_const tmp = {
+ .data = &range,
+ .maxlen = sizeof(range),
+ .mode = table->mode,
+@@ -125,7 +125,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+ char val[TCP_CA_NAME_MAX];
+- ctl_table tbl = {
++ ctl_table_no_const tbl = {
+ .data = val,
+ .maxlen = TCP_CA_NAME_MAX,
+ };
+@@ -144,7 +144,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+ {
+- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
++ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
+ int ret;
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+@@ -161,7 +161,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+ {
+- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
++ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
+ int ret;
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+@@ -361,7 +361,7 @@ static struct ctl_table ipv4_table[] = {
+ },
+ {
+ .procname = "ip_local_reserved_ports",
+- .data = NULL, /* initialized in sysctl_ipv4_init */
++ .data = sysctl_local_reserved_ports,
+ .maxlen = 65536,
+ .mode = 0644,
+ .proc_handler = proc_do_large_bitmap,
+@@ -744,11 +744,10 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
+
+ static __net_init int ipv4_sysctl_init_net(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table = NULL;
+
+- table = ipv4_net_table;
+ if (!net_eq(net, &init_net)) {
+- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
++ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
+ if (table == NULL)
+ goto err_alloc;
+
+@@ -780,16 +779,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
+
+ net->ipv4.sysctl_rt_cache_rebuild_count = 4;
+
+- net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
+- net_ipv4_ctl_path, table);
++ if (!net_eq(net, &init_net))
++ net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
++ else
++ net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, ipv4_net_table);
+ if (net->ipv4.ipv4_hdr == NULL)
+ goto err_reg;
+
+ return 0;
+
+ err_reg:
+- if (!net_eq(net, &init_net))
+- kfree(table);
++ kfree(table);
+ err_alloc:
+ return -ENOMEM;
+ }
+@@ -803,7 +803,7 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
+ kfree(table);
+ }
+
+-static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
++static __net_initconst struct pernet_operations ipv4_sysctl_ops = {
+ .init = ipv4_sysctl_init_net,
+ .exit = ipv4_sysctl_exit_net,
+ };
+@@ -811,16 +811,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
+ static __init int sysctl_ipv4_init(void)
+ {
+ struct ctl_table_header *hdr;
+- struct ctl_table *i;
+-
+- for (i = ipv4_table; i->procname; i++) {
+- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
+- i->data = sysctl_local_reserved_ports;
+- break;
+- }
+- }
+- if (!i->procname)
+- return -EINVAL;
+
+ hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
+ if (hdr == NULL)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index c1ed01e..bb914c3 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4739,7 +4739,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+ * simplifies code)
+ */
+ static void
+-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
++__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+ struct sk_buff *head, struct sk_buff *tail,
+ u32 start, u32 end)
+ {
+@@ -5554,6 +5554,9 @@ slow_path:
+ if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
+ goto csum_error;
+
++ if (!th->ack && !th->rst)
++ goto discard;
++
+ /*
+ * Standard slow path.
+ */
+@@ -5562,8 +5565,7 @@ slow_path:
+ return 0;
+
+ step5:
+- if (th->ack &&
+- tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
++ if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
+ goto discard;
+
+ tcp_rcv_rtt_measure_ts(sk, skb);
+@@ -5794,6 +5796,7 @@ discard:
+ tcp_paws_reject(&tp->rx_opt, 0))
+ goto discard_and_undo;
+
++#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
+ if (th->syn) {
+ /* We see SYN without ACK. It is attempt of
+ * simultaneous connect with crossed SYNs.
+@@ -5842,6 +5845,7 @@ discard:
+ goto discard;
+ #endif
+ }
++#endif
+ /* "fifth, if neither of the SYN or RST bits is set then
+ * drop the segment and return."
+ */
+@@ -5885,7 +5889,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+
+ if (th->syn) {
+- if (th->fin)
++ if (th->fin || th->urg || th->psh)
+ goto discard;
+ if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
+ return 1;
+@@ -5924,11 +5928,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ return 0;
+ }
+
++ if (!th->ack && !th->rst)
++ goto discard;
++
+ if (!tcp_validate_incoming(sk, skb, th, 0))
+ return 0;
+
+ /* step 5: check the ACK field */
+- if (th->ack) {
++ if (true) {
+ int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+ FLAG_UPDATE_TS_RECENT) > 0;
+
+@@ -6034,8 +6041,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ }
+ break;
+ }
+- } else
+- goto discard;
++ }
+
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 92d7138..df6f00f 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
+ int sysctl_tcp_low_latency __read_mostly;
+ EXPORT_SYMBOL(sysctl_tcp_low_latency);
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
+
+ #ifdef CONFIG_TCP_MD5SIG
+ static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
+@@ -1636,6 +1639,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+ return 0;
+
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole)
++#endif
+ tcp_v4_send_reset(rsk, skb);
+ discard:
+ kfree_skb(skb);
+@@ -1698,12 +1704,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ TCP_SKB_CB(skb)->sacked = 0;
+
+ sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 1;
++#endif
+ goto no_tcp_socket;
+-
++ }
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 2;
++#endif
+ goto do_time_wait;
++ }
+
+ if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+@@ -1753,6 +1766,10 @@ no_tcp_socket:
+ bad_packet:
+ TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
+ tcp_v4_send_reset(NULL, skb);
+ }
+
+@@ -2413,7 +2430,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
+ 0, /* non standard timer */
+ 0, /* open_requests have no inode */
+ atomic_read(&sk->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
+ req,
++#endif
+ len);
+ }
+
+@@ -2463,7 +2484,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
+ sock_i_uid(sk),
+ icsk->icsk_probes_out,
+ sock_i_ino(sk),
+- atomic_read(&sk->sk_refcnt), sk,
++ atomic_read(&sk->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sk,
++#endif
+ jiffies_to_clock_t(icsk->icsk_rto),
+ jiffies_to_clock_t(icsk->icsk_ack.ato),
+ (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
+@@ -2491,7 +2517,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
+ i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
+ 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+- atomic_read(&tw->tw_refcnt), tw, len);
++ atomic_read(&tw->tw_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ tw,
++#endif
++ len);
+ }
+
+ #define TMPSZ 150
+@@ -2662,7 +2694,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+ inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
+ }
+
+-static struct pernet_operations __net_initdata tcp_sk_ops = {
++static struct pernet_operations __net_initconst tcp_sk_ops = {
+ .init = tcp_sk_init,
+ .exit = tcp_sk_exit,
+ .exit_batch = tcp_sk_exit_batch,
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 66363b6..b0654a3 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -27,6 +27,10 @@
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ int sysctl_tcp_syncookies __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_syncookies);
+
+@@ -751,6 +755,10 @@ listen_overflow:
+
+ embryonic_reset:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
++
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole)
++#endif
+ if (!(flg & TCP_FLAG_RST))
+ req->rsk_ops->send_reset(sk, skb);
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 0d5a118..5f6f0e6 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1319,7 +1319,7 @@ static void tcp_cwnd_validate(struct sock *sk)
+ * modulo only when the receiver window alone is the limiting factor or
+ * when we would be allowed to send the split-due-to-Nagle skb fully.
+ */
+-static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
++static unsigned int __intentional_overflow(0) tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
+ unsigned int mss_now, unsigned int max_segs)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
+index 85ee7eb..53277ab 100644
+--- a/net/ipv4/tcp_probe.c
++++ b/net/ipv4/tcp_probe.c
+@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
+ if (cnt + width >= len)
+ break;
+
+- if (copy_to_user(buf + cnt, tbuf, width))
++ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
+ return -EFAULT;
+ cnt += width;
+ }
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 2e0f0af..e2948bf 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -22,6 +22,10 @@
+ #include <linux/gfp.h>
+ #include <net/tcp.h>
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_lastack_retries;
++#endif
++
+ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
+ int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
+ int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
+@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
+ }
+ }
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if ((sk->sk_state == TCP_LAST_ACK) &&
++ (grsec_lastack_retries > 0) &&
++ (grsec_lastack_retries < retry_until))
++ retry_until = grsec_lastack_retries;
++#endif
++
+ if (retransmits_timed_out(sk, retry_until,
+ syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
+ /* Has it gone just too far? */
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 8c2e259..076bc5b 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -86,6 +86,7 @@
+ #include <linux/types.h>
+ #include <linux/fcntl.h>
+ #include <linux/module.h>
++#include <linux/security.h>
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+ #include <linux/igmp.h>
+@@ -108,6 +109,10 @@
+ #include <trace/events/udp.h>
+ #include "udp_impl.h"
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ struct udp_table udp_table __read_mostly;
+ EXPORT_SYMBOL(udp_table);
+
+@@ -565,6 +570,9 @@ found:
+ return s;
+ }
+
++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
++
+ /*
+ * This routine is called by the ICMP module when it gets some
+ * sort of error condition. If err < 0 then the socket should
+@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ dport = usin->sin_port;
+ if (dport == 0)
+ return -EINVAL;
++
++ err = gr_search_udp_sendmsg(sk, usin);
++ if (err)
++ return err;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
++
++ err = gr_search_udp_sendmsg(sk, NULL);
++ if (err)
++ return err;
++
+ daddr = inet->inet_daddr;
+ dport = inet->inet_dport;
+ /* Open fast path for connected socket.
+@@ -1103,7 +1120,7 @@ static unsigned int first_packet_length(struct sock *sk)
+ udp_lib_checksum_complete(skb)) {
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ __skb_unlink(skb, rcvq);
+ __skb_queue_tail(&list_kill, skb);
+ }
+@@ -1183,6 +1200,10 @@ try_again:
+ if (!skb)
+ goto out;
+
++ err = gr_search_udp_recvmsg(sk, skb);
++ if (err)
++ goto out_free;
++
+ ulen = skb->len - sizeof(struct udphdr);
+ copied = len;
+ if (copied > ulen)
+@@ -1486,7 +1507,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+
+ drop:
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return -1;
+ }
+@@ -1505,7 +1526,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ if (!skb1) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+@@ -1674,6 +1695,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ goto csum_error;
+
+ UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ /*
+@@ -2097,8 +2121,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+ sk_wmem_alloc_get(sp),
+ sk_rmem_alloc_get(sp),
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops), len);
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops), len);
+ }
+
+ int udp4_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index a0b4c5d..a5818a1 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -190,11 +190,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ fl4->flowi4_tos = iph->tos;
+ }
+
+-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
++static int xfrm4_garbage_collect(struct dst_ops *ops)
+ {
+ struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
+
+- xfrm4_policy_afinfo.garbage_collect(net);
++ xfrm_garbage_collect_deferred(net);
+ return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
+ }
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 5d41293..19a815f 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2157,7 +2157,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
+ p.iph.ihl = 5;
+ p.iph.protocol = IPPROTO_IPV6;
+ p.iph.ttl = 64;
+- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
++ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
+
+ if (ops->ndo_do_ioctl) {
+ mm_segment_t oldfs = get_fs();
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 65dd543..e6c6e6d 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -164,8 +164,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
+ struct esp_data *esp = x->data;
+
+ /* skb is pure payload to encrypt */
+- err = -ENOMEM;
+-
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+
+@@ -200,8 +198,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
+- if (!tmp)
++ if (!tmp) {
++ err = -ENOMEM;
+ goto error;
++ }
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+@@ -419,7 +419,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
+ net_adj = 0;
+
+ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
+- net_adj) & ~(align - 1)) + (net_adj - 2);
++ net_adj) & ~(align - 1)) + net_adj - 2;
+ }
+
+ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index d505453..ff99535 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -969,7 +969,7 @@ ctl_table ipv6_icmp_table_template[] = {
+
+ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = kmemdup(ipv6_icmp_table_template,
+ sizeof(ipv6_icmp_table_template),
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index 1567fb1..29af910 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
+ #ifdef CONFIG_XFRM
+ {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
++ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
+ }
+ #endif
+ }
+@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
+ #ifdef CONFIG_XFRM
+ if (dst) {
+ struct rt6_info *rt = (struct rt6_info *)dst;
+- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
++ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
+ __sk_dst_reset(sk);
+ dst = NULL;
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index d3fde7e..f526e49 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -600,8 +600,8 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+
+ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ {
+- static atomic_t ipv6_fragmentation_id;
+- int old, new;
++ static atomic_unchecked_t ipv6_fragmentation_id;
++ int id;
+
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
+ struct inet_peer *peer;
+@@ -614,13 +614,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ return;
+ }
+ }
+- do {
+- old = atomic_read(&ipv6_fragmentation_id);
+- new = old + 1;
+- if (!new)
+- new = 1;
+- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
+- fhdr->identification = htonl(new);
++ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
++ if (!id)
++ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
++ fhdr->identification = htonl(id);
+ }
+
+ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index b204df8..8f274f4 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -961,7 +961,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ if (sk->sk_type != SOCK_STREAM)
+ return -ENOPROTOOPT;
+
+- msg.msg_control = optval;
++ msg.msg_control = (void __force_kernel *)optval;
+ msg.msg_controllen = len;
+ msg.msg_flags = flags;
+
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 94874b0..a47969c 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1091,14 +1091,14 @@ static int compat_table_info(const struct xt_table_info *info,
+ #endif
+
+ static int get_info(struct net *net, void __user *user,
+- const int *len, int compat)
++ int len, int compat)
+ {
+ char name[XT_TABLE_MAXNAMELEN];
+ struct xt_table *t;
+ int ret;
+
+- if (*len != sizeof(struct ip6t_getinfo)) {
+- duprintf("length %u != %zu\n", *len,
++ if (len != sizeof(struct ip6t_getinfo)) {
++ duprintf("length %u != %zu\n", len,
+ sizeof(struct ip6t_getinfo));
+ return -EINVAL;
+ }
+@@ -1135,7 +1135,7 @@ static int get_info(struct net *net, void __user *user,
+ info.size = private->size;
+ strcpy(info.name, name);
+
+- if (copy_to_user(user, &info, *len) != 0)
++ if (copy_to_user(user, &info, len) != 0)
+ ret = -EFAULT;
+ else
+ ret = 0;
+@@ -1989,7 +1989,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+
+ switch (cmd) {
+ case IP6T_SO_GET_INFO:
+- ret = get_info(sock_net(sk), user, len, 1);
++ ret = get_info(sock_net(sk), user, *len, 1);
+ break;
+ case IP6T_SO_GET_ENTRIES:
+ ret = compat_get_entries(sock_net(sk), user, len);
+@@ -2036,7 +2036,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+
+ switch (cmd) {
+ case IP6T_SO_GET_INFO:
+- ret = get_info(sock_net(sk), user, len, 0);
++ ret = get_info(sock_net(sk), user, *len, 0);
+ break;
+
+ case IP6T_SO_GET_ENTRIES:
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 9ecbc84..7dd6ff7 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -109,7 +109,7 @@ found:
+ */
+ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
+ {
+- struct icmp6hdr *_hdr;
++ struct icmp6hdr _hdr;
+ const struct icmp6hdr *hdr;
+
+ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ {
+ if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
+ skb_checksum_complete(skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
+ struct raw6_sock *rp = raw6_sk(sk);
+
+ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
+
+ if (inet->hdrincl) {
+ if (skb_checksum_complete(skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+@@ -598,7 +598,7 @@ out:
+ return err;
+ }
+
+-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
+ struct flowi6 *fl6, struct dst_entry **dstp,
+ unsigned int flags)
+ {
+@@ -906,12 +906,15 @@ do_confirm:
+ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+ char __user *optval, int optlen)
+ {
++ struct icmp6_filter filter;
++
+ switch (optname) {
+ case ICMPV6_FILTER:
+ if (optlen > sizeof(struct icmp6_filter))
+ optlen = sizeof(struct icmp6_filter);
+- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
++ if (copy_from_user(&filter, optval, optlen))
+ return -EFAULT;
++ raw6_sk(sk)->filter = filter;
+ return 0;
+ default:
+ return -ENOPROTOOPT;
+@@ -924,6 +927,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+ {
+ int len;
++ struct icmp6_filter filter;
+
+ switch (optname) {
+ case ICMPV6_FILTER:
+@@ -935,7 +939,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+ len = sizeof(struct icmp6_filter);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
++ filter = raw6_sk(sk)->filter;
++ if (len > sizeof filter || copy_to_user(optval, &filter, len))
+ return -EFAULT;
+ return 0;
+ default:
+@@ -1242,7 +1247,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
+ 0, 0L, 0,
+ sock_i_uid(sp), 0,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ static int raw6_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index eba5deb..61e026f 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -651,21 +651,21 @@ static struct ctl_table ip6_frags_ctl_table[] = {
+
+ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table = NULL;
+ struct ctl_table_header *hdr;
+
+- table = ip6_frags_ns_ctl_table;
+ if (!net_eq(net, &init_net)) {
+- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
++ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
+ if (table == NULL)
+ goto err_alloc;
+
+ table[0].data = &net->ipv6.frags.high_thresh;
+ table[1].data = &net->ipv6.frags.low_thresh;
+ table[2].data = &net->ipv6.frags.timeout;
+- }
++ hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
++ } else
++ hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, ip6_frags_ns_ctl_table);
+
+- hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
+ if (hdr == NULL)
+ goto err_reg;
+
+@@ -673,8 +673,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
+ return 0;
+
+ err_reg:
+- if (!net_eq(net, &init_net))
+- kfree(table);
++ kfree(table);
+ err_alloc:
+ return -ENOMEM;
+ }
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1768238..b28b21a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2812,7 +2812,7 @@ ctl_table ipv6_route_table_template[] = {
+
+ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = kmemdup(ipv6_route_table_template,
+ sizeof(ipv6_route_table_template),
+diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
+index 166a57c..dc4e6b8 100644
+--- a/net/ipv6/sysctl_net_ipv6.c
++++ b/net/ipv6/sysctl_net_ipv6.c
+@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
+
+ static int __net_init ipv6_sysctl_net_init(struct net *net)
+ {
+- struct ctl_table *ipv6_table;
++ ctl_table_no_const *ipv6_table;
+ struct ctl_table *ipv6_route_table;
+ struct ctl_table *ipv6_icmp_table;
+ int err;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c69358c..d1e5855 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+ }
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ static void tcp_v6_hash(struct sock *sk)
+ {
+ if (sk->sk_state != TCP_CLOSE) {
+@@ -1657,6 +1661,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ return 0;
+
+ reset:
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole)
++#endif
+ tcp_v6_send_reset(sk, skb);
+ discard:
+ if (opt_skb)
+@@ -1736,12 +1743,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ TCP_SKB_CB(skb)->sacked = 0;
+
+ sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 1;
++#endif
+ goto no_tcp_socket;
++ }
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ ret = 2;
++#endif
+ goto do_time_wait;
++ }
+
+ if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+@@ -1789,6 +1804,10 @@ no_tcp_socket:
+ bad_packet:
+ TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
+ tcp_v6_send_reset(NULL, skb);
+ }
+
+@@ -2049,7 +2068,13 @@ static void get_openreq6(struct seq_file *seq,
+ uid,
+ 0, /* non standard timer */
+ 0, /* open_requests have no inode */
+- 0, req);
++ 0,
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL
++#else
++ req
++#endif
++ );
+ }
+
+ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
+@@ -2099,7 +2124,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
+ sock_i_uid(sp),
+ icsk->icsk_probes_out,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp,
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
+ jiffies_to_clock_t(icsk->icsk_rto),
+ jiffies_to_clock_t(icsk->icsk_ack.ato),
+ (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
+@@ -2134,7 +2164,13 @@ static void get_timewait6_sock(struct seq_file *seq,
+ dest->s6_addr32[2], dest->s6_addr32[3], destp,
+ tw->tw_substate, 0, 0,
+ 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+- atomic_read(&tw->tw_refcnt), tw);
++ atomic_read(&tw->tw_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL
++#else
++ tw
++#endif
++ );
+ }
+
+ static int tcp6_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index f8bec1e..8628321 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -50,6 +50,10 @@
+ #include <linux/seq_file.h>
+ #include "udp_impl.h"
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++extern int grsec_enable_blackhole;
++#endif
++
+ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
+ {
+ const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
+@@ -546,7 +550,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+
+ return 0;
+ drop:
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ drop_no_sk_drops_inc:
+ UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ kfree_skb(skb);
+@@ -622,7 +626,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+ continue;
+ }
+ drop:
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+@@ -777,6 +781,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
+ proto == IPPROTO_UDPLITE);
+
++#ifdef CONFIG_GRKERNSEC_BLACKHOLE
++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
+@@ -793,7 +800,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb);
+ else if (sk_add_backlog(sk, skb)) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ goto discard;
+@@ -1409,8 +1416,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
+ 0, 0L, 0,
+ sock_i_uid(sp), 0,
+ sock_i_ino(sp),
+- atomic_read(&sp->sk_refcnt), sp,
+- atomic_read(&sp->sk_drops));
++ atomic_read(&sp->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sp,
++#endif
++ atomic_read_unchecked(&sp->sk_drops));
+ }
+
+ int udp6_seq_show(struct seq_file *seq, void *v)
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index db78e7d..c88f974 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -202,11 +202,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ }
+ }
+
+-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
++static int xfrm6_garbage_collect(struct dst_ops *ops)
+ {
+ struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
+
+- xfrm6_policy_afinfo.garbage_collect(net);
++ xfrm_garbage_collect_deferred(net);
+ return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
+ }
+
+diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
+index 253695d..9481ce8 100644
+--- a/net/irda/ircomm/ircomm_tty.c
++++ b/net/irda/ircomm/ircomm_tty.c
+@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+ add_wait_queue(&self->open_wait, &wait);
+
+ IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
+
+ /* As far as I can see, we protect open_count - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+- self->open_count--;
++ local_dec(&self->open_count);
+ }
+ spin_unlock_irqrestore(&self->spinlock, flags);
+- self->blocked_open++;
++ local_inc(&self->blocked_open);
+
+ while (1) {
+ if (tty->termios->c_cflag & CBAUD) {
+@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+ }
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count );
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
+
+ schedule();
+ }
+@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
+ if (extra_count) {
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ local_inc(&self->open_count);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+ }
+- self->blocked_open--;
++ local_dec(&self->blocked_open);
+
+ IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
+- __FILE__,__LINE__, tty->driver->name, self->open_count);
++ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
+
+ if (!retval)
+ self->flags |= ASYNC_NORMAL_ACTIVE;
+@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
+ }
+ /* ++ is not atomic, so this should be protected - Jean II */
+ spin_lock_irqsave(&self->spinlock, flags);
+- self->open_count++;
++ local_inc(&self->open_count);
+
+ tty->driver_data = self;
+ self->tty = tty;
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
+- self->line, self->open_count);
++ self->line, local_read(&self->open_count));
+
+ /* Not really used by us, but lets do it anyway */
+ self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
+ return;
+ }
+
+- if ((tty->count == 1) && (self->open_count != 1)) {
++ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. state->count should always
+@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
+ */
+ IRDA_DEBUG(0, "%s(), bad serial port count; "
+ "tty->count is 1, state->count is %d\n", __func__ ,
+- self->open_count);
+- self->open_count = 1;
++ local_read(&self->open_count));
++ local_set(&self->open_count, 1);
+ }
+
+- if (--self->open_count < 0) {
++ if (local_dec_return(&self->open_count) < 0) {
+ IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
+- __func__, self->line, self->open_count);
+- self->open_count = 0;
++ __func__, self->line, local_read(&self->open_count));
++ local_set(&self->open_count, 0);
+ }
+- if (self->open_count) {
++ if (local_read(&self->open_count)) {
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
+@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
+ tty->closing = 0;
+ self->tty = NULL;
+
+- if (self->blocked_open) {
++ if (local_read(&self->blocked_open)) {
+ if (self->close_delay)
+ schedule_timeout_interruptible(self->close_delay);
+ wake_up_interruptible(&self->open_wait);
+@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
+ spin_lock_irqsave(&self->spinlock, flags);
+ self->flags &= ~ASYNC_NORMAL_ACTIVE;
+ self->tty = NULL;
+- self->open_count = 0;
++ local_set(&self->open_count, 0);
+ spin_unlock_irqrestore(&self->spinlock, flags);
+
+ wake_up_interruptible(&self->open_wait);
+@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
+ seq_putc(m, '\n');
+
+ seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
+- seq_printf(m, "Open count: %d\n", self->open_count);
++ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
+ seq_printf(m, "Max data size: %d\n", self->max_data_size);
+ seq_printf(m, "Max header size: %d\n", self->max_header_size);
+
+diff --git a/net/irda/iriap.c b/net/irda/iriap.c
+index e71e85b..29340a9 100644
+--- a/net/irda/iriap.c
++++ b/net/irda/iriap.c
+@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
+ /* case CS_ISO_8859_9: */
+ /* case CS_UNICODE: */
+ default:
+- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
+- __func__, ias_charset_types[charset]);
++ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
++ __func__, charset,
++ charset < ARRAY_SIZE(ias_charset_types) ?
++ ias_charset_types[charset] :
++ "(unknown)");
+
+ /* Aborting, close connection! */
+ iriap_disconnect_request(self);
+diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
+index 8c00416..9ea0c93 100644
+--- a/net/irda/irlap_frame.c
++++ b/net/irda/irlap_frame.c
+@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
+ /*
+ * We now have some discovery info to deliver!
+ */
+- discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
++ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
+ if (!discovery) {
+ IRDA_WARNING("%s: unable to malloc!\n", __func__);
+ return;
+diff --git a/net/irda/irttp.c b/net/irda/irttp.c
+index 32e3bb0..a4e5eb8 100644
+--- a/net/irda/irttp.c
++++ b/net/irda/irttp.c
+@@ -441,6 +441,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
+ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0);
+ if (lsap == NULL) {
+ IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__);
++ __irttp_close_tsap(self);
+ return NULL;
+ }
+
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index cf98d62..7bf2972 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -786,10 +786,10 @@ static int iucv_sock_autobind(struct sock *sk)
+
+ write_lock_bh(&iucv_sk_list.lock);
+
+- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
++ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+ while (__iucv_get_sock_by_name(name)) {
+ sprintf(name, "%08x",
+- atomic_inc_return(&iucv_sk_list.autobind_name));
++ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
+ }
+
+ write_unlock_bh(&iucv_sk_list.lock);
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index 403be43..87f09da 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata iucv_cpu_notifier = {
++static struct notifier_block iucv_cpu_notifier = {
+ .notifier_call = iucv_cpu_notify,
+ };
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index dc8d7ef..9d37285 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1097,7 +1097,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+
+ x->id.proto = proto;
+ x->id.spi = sa->sadb_sa_spi;
+- x->props.replay_window = sa->sadb_sa_replay;
++ x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
++ (sizeof(x->replay.bitmap) * 8));
+ if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
+ x->props.flags |= XFRM_STATE_NOECN;
+ if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
+@@ -1924,6 +1925,9 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
+ int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy);
+ struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
+
++ if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
++ return -EINVAL;
++
+ while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+ if ((err = parse_ipsecrequest(xp, rq)) < 0)
+ return err;
+@@ -3020,10 +3024,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
+ static u32 get_acqseq(void)
+ {
+ u32 res;
+- static atomic_t acqseq;
++ static atomic_unchecked_t acqseq;
+
+ do {
+- res = atomic_inc_return(&acqseq);
++ res = atomic_inc_return_unchecked(&acqseq);
+ } while (!res);
+ return res;
+ }
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 93a41a0..d4b4edb 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -78,8 +78,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &l2tp_nl_family, 0, L2TP_CMD_NOOP);
+- if (IS_ERR(hdr)) {
+- ret = PTR_ERR(hdr);
++ if (!hdr) {
++ ret = -EMSGSIZE;
+ goto err_out;
+ }
+
+@@ -228,8 +228,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+
+ hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
+ L2TP_CMD_TUNNEL_GET);
+- if (IS_ERR(hdr))
+- return PTR_ERR(hdr);
++ if (!hdr)
++ return -EMSGSIZE;
+
+ NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
+ NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
+@@ -560,8 +560,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
+ sk = tunnel->sock;
+
+ hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+- if (IS_ERR(hdr))
+- return PTR_ERR(hdr);
++ if (!hdr)
++ return -EMSGSIZE;
+
+ NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
+ NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index a9cf593..b04a2d5 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -27,6 +27,7 @@
+ #include <net/ieee80211_radiotap.h>
+ #include <net/cfg80211.h>
+ #include <net/mac80211.h>
++#include <asm/local.h>
+ #include "key.h"
+ #include "sta_info.h"
+
+@@ -767,7 +768,7 @@ struct ieee80211_local {
+ /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
+ spinlock_t queue_stop_reason_lock;
+
+- int open_count;
++ local_t open_count;
+ int monitors, cooked_mntrs;
+ /* number of interfaces with corresponding FIF_ flags */
+ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 8260cd5..f13516d 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+ break;
+ }
+
+- if (local->open_count == 0) {
++ if (local_read(&local->open_count) == 0) {
+ res = drv_start(local);
+ if (res)
+ goto err_del_bss;
+@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ drv_stop(local);
+ return -EADDRNOTAVAIL;
+ }
+@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+ mutex_unlock(&local->mtx);
+
+ if (coming_up)
+- local->open_count++;
++ local_inc(&local->open_count);
+
+ if (hw_reconf_flags) {
+ ieee80211_hw_config(local, hw_reconf_flags);
+@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+ err_del_interface:
+ drv_remove_interface(local, &sdata->vif);
+ err_stop:
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ drv_stop(local);
+ err_del_bss:
+ sdata->bss = NULL;
+@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ }
+
+ if (going_down)
+- local->open_count--;
++ local_dec(&local->open_count);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+@@ -543,7 +543,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+
+ ieee80211_recalc_ps(local, -1);
+
+- if (local->open_count == 0) {
++ if (local_read(&local->open_count) == 0) {
+ if (local->ops->napi_poll)
+ napi_disable(&local->napi);
+ ieee80211_clear_tx_pending(local);
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 7d9b21d..0687004 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+ local->hw.conf.power_level = power;
+ }
+
+- if (changed && local->open_count) {
++ if (changed && local_read(&local->open_count)) {
+ ret = drv_config(local, changed);
+ /*
+ * Goal:
+diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
+index 9ee7164..56c5061 100644
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta;
+
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ goto suspend;
+
+ ieee80211_scan_cancel(local);
+@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ cancel_work_sync(&local->dynamic_ps_enable_work);
+ del_timer_sync(&local->dynamic_ps_timer);
+
+- local->wowlan = wowlan && local->open_count;
++ local->wowlan = wowlan && local_read(&local->open_count);
+ if (local->wowlan) {
+ int err = drv_suspend(local, wowlan);
+ if (err < 0) {
+@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ }
+
+ /* stop hardware - this must stop RX */
+- if (local->open_count)
++ if (local_read(&local->open_count))
+ ieee80211_stop_device(local);
+
+ suspend:
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index 7d84b87..6a69cd9 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
+
+ ASSERT_RTNL();
+
+- if (local->open_count)
++ if (local_read(&local->open_count))
+ return -EBUSY;
+
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
+index c97a065..ff61928 100644
+--- a/net/mac80211/rc80211_pid_debugfs.c
++++ b/net/mac80211/rc80211_pid_debugfs.c
+@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
+
+ spin_unlock_irqrestore(&events->lock, status);
+
+- if (copy_to_user(buf, pb, p))
++ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
+ return -EFAULT;
+
+ return p;
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 7095ae5..85ba5e9 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ drv_set_coverage_class(local, hw->wiphy->coverage_class);
+
+ /* everything else happens only if HW was up & running */
+- if (!local->open_count)
++ if (!local_read(&local->open_count))
+ goto wake_up;
+
+ /*
+diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
+index d5597b7..ab6d39c 100644
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config NETFILTER_XT_MATCH_GRADM
++ tristate '"gradm" match support'
++ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
++ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
++ ---help---
++ The gradm match allows to match on grsecurity RBAC being enabled.
++ It is useful when iptables rules are applied early on bootup to
++ prevent connections to the machine (except from a trusted host)
++ while the RBAC system is disabled.
++
+ config NETFILTER_XT_MATCH_HASHLIMIT
+ tristate '"hashlimit" match support'
+ depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
+index 1a02853..5d8c22e 100644
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
++obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
+ obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 86137b5..c12e721 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1679,7 +1679,7 @@ done:
+ return ret;
+ }
+
+-static struct nf_sockopt_ops so_set __read_mostly = {
++static struct nf_sockopt_ops so_set = {
+ .pf = PF_INET,
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
+index e13095d..6617217 100644
+--- a/net/netfilter/ipset/ip_set_hash_netiface.c
++++ b/net/netfilter/ipset/ip_set_hash_netiface.c
+@@ -761,7 +761,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
+- .len = IPSET_MAXNAMELEN - 1 },
++ .len = IFNAMSIZ - 1 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index 29fa5ba..8debc79 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
+ /* Increase the refcnt counter of the dest */
+ atomic_inc(&dest->refcnt);
+
+- conn_flags = atomic_read(&dest->conn_flags);
++ conn_flags = atomic_read_unchecked(&dest->conn_flags);
+ if (cp->protocol != IPPROTO_UDP)
+ conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
+ /* Bind with the destination and its corresponding transmitter */
+@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
+ atomic_set(&cp->refcnt, 1);
+
+ atomic_set(&cp->n_control, 0);
+- atomic_set(&cp->in_pkts, 0);
++ atomic_set_unchecked(&cp->in_pkts, 0);
+
+ atomic_inc(&ipvs->conn_count);
+ if (flags & IP_VS_CONN_F_NO_CPORT)
+@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
+
+ /* Don't drop the entry if its number of incoming packets is not
+ located in [0, 8] */
+- i = atomic_read(&cp->in_pkts);
++ i = atomic_read_unchecked(&cp->in_pkts);
+ if (i > 8 || i < 0) return 0;
+
+ if (!todrop_rate[i]) return 0;
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 6dc7d7d..e45913a 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+ ret = cp->packet_xmit(skb, cp, pd->pp);
+ /* do not touch skb anymore */
+
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ ip_vs_conn_put(cp);
+ return ret;
+ }
+@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ pkts = sysctl_sync_threshold(ipvs);
+ else
+- pkts = atomic_add_return(1, &cp->in_pkts);
++ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
+ cp->protocol == IPPROTO_SCTP) {
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 72f4253..c9a3f57 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
+ ip_vs_rs_hash(ipvs, dest);
+ write_unlock_bh(&ipvs->rs_lock);
+ }
+- atomic_set(&dest->conn_flags, conn_flags);
++ atomic_set_unchecked(&dest->conn_flags, conn_flags);
+
+ /* bind the service */
+ if (!dest->svc) {
+@@ -1666,7 +1666,7 @@ proc_do_sync_mode(ctl_table *table, int write,
+ * align with netns init in ip_vs_control_net_init()
+ */
+
+-static struct ctl_table vs_vars[] = {
++static ctl_table_no_const vs_vars[] __read_only = {
+ {
+ .procname = "amemthresh",
+ .maxlen = sizeof(int),
+@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
+ " %-7s %-6d %-10d %-10d\n",
+ &dest->addr.in6,
+ ntohs(dest->port),
+- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+ atomic_read(&dest->weight),
+ atomic_read(&dest->activeconns),
+ atomic_read(&dest->inactconns));
+@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
+ "%-7s %-6d %-10d %-10d\n",
+ ntohl(dest->addr.ip),
+ ntohs(dest->port),
+- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
++ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
+ atomic_read(&dest->weight),
+ atomic_read(&dest->activeconns),
+ atomic_read(&dest->inactconns));
+@@ -2503,13 +2503,14 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
+ struct ip_vs_dest *dest;
+ struct ip_vs_dest_entry entry;
+
++ memset(&entry, 0, sizeof(entry));
+ list_for_each_entry(dest, &svc->destinations, n_list) {
+ if (count >= get->num_dests)
+ break;
+
+ entry.addr = dest->addr.ip;
+ entry.port = dest->port;
+- entry.conn_flags = atomic_read(&dest->conn_flags);
++ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
+ entry.weight = atomic_read(&dest->weight);
+ entry.u_threshold = dest->u_threshold;
+ entry.l_threshold = dest->l_threshold;
+@@ -3043,7 +3044,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
+ NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
+
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
++ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
+ NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
+@@ -3626,7 +3627,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+ {
+ int idx;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+- struct ctl_table *tbl;
++ ctl_table_no_const *tbl;
+
+ atomic_set(&ipvs->dropentry, 0);
+ spin_lock_init(&ipvs->dropentry_lock);
+diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
+index 0f16283..9ec4d21 100644
+--- a/net/netfilter/ipvs/ip_vs_lblc.c
++++ b/net/netfilter/ipvs/ip_vs_lblc.c
+@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
+ * IPVS LBLC sysctl table
+ */
+ #ifdef CONFIG_SYSCTL
+-static ctl_table vs_vars_table[] = {
++static ctl_table_no_const vs_vars_table[] __read_only = {
+ {
+ .procname = "lblc_expiration",
+ .data = NULL,
+diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
+index eec797f..6f3ec56 100644
+--- a/net/netfilter/ipvs/ip_vs_lblcr.c
++++ b/net/netfilter/ipvs/ip_vs_lblcr.c
+@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
+ * IPVS LBLCR sysctl table
+ */
+
+-static ctl_table vs_vars_table[] = {
++static ctl_table_no_const vs_vars_table[] __read_only = {
+ {
+ .procname = "lblcr_expiration",
+ .data = NULL,
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 2b6678c0..aaa41fc 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -649,7 +649,7 @@ control:
+ * i.e only increment in_pkts for Templates.
+ */
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+- int pkts = atomic_add_return(1, &cp->in_pkts);
++ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
+
+ if (pkts % sysctl_sync_period(ipvs) != 1)
+ return;
+@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+
+ if (opt)
+ memcpy(&cp->in_seq, opt, sizeof(*opt));
+- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
++ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+ cp->state = state;
+ cp->old_state = cp->state;
+ /*
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 38c0813..a29519d 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ else
+ rc = NF_ACCEPT;
+ /* do not touch skb anymore */
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ goto out;
+ }
+
+@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+ else
+ rc = NF_ACCEPT;
+ /* do not touch skb anymore */
+- atomic_inc(&cp->in_pkts);
++ atomic_inc_unchecked(&cp->in_pkts);
+ goto out;
+ }
+
+diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
+index 369df3f..b660190 100644
+--- a/net/netfilter/nf_conntrack_acct.c
++++ b/net/netfilter/nf_conntrack_acct.c
+@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
+ #ifdef CONFIG_SYSCTL
+ static int nf_conntrack_acct_init_sysctl(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
+ GFP_KERNEL);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 7489bd3..5f4df88 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1491,6 +1491,10 @@ err_proto:
+ #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
+ #define DYING_NULLS_VAL ((1<<30)+1)
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
++#endif
++
+ static int nf_conntrack_init_net(struct net *net)
+ {
+ int ret;
+@@ -1504,7 +1508,11 @@ static int nf_conntrack_init_net(struct net *net)
+ goto err_stat;
+ }
+
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
++#else
+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++#endif
+ if (!net->ct.slabname) {
+ ret = -ENOMEM;
+ goto err_slabname;
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index 14af632..9914188 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -185,7 +185,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
+ #ifdef CONFIG_SYSCTL
+ static int nf_conntrack_event_init_sysctl(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
+ GFP_KERNEL);
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index 2e664a6..c854e4a 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -391,7 +391,7 @@ struct dccp_net {
+ unsigned int dccp_timeout[CT_DCCP_MAX + 1];
+ #ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_header;
+- struct ctl_table *sysctl_table;
++ ctl_table_no_const *sysctl_table;
+ #endif
+ };
+
+@@ -431,7 +431,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ const char *msg;
+ u_int8_t state;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+
+ state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
+@@ -459,7 +459,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+
+ out_invalid:
+ if (LOG_INVALID(net, IPPROTO_DCCP))
+- nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, msg);
++ nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "%s", msg);
+ return false;
+ }
+
+@@ -483,7 +483,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
+ u_int8_t type, old_state, new_state;
+ enum ct_dccp_roles role;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+ type = dh->dccph_type;
+
+@@ -575,7 +575,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
+ unsigned int cscov;
+ const char *msg;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ if (dh == NULL) {
+ msg = "nf_ct_dccp: short packet ";
+ goto out_invalid;
+@@ -612,7 +612,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
+
+ out_invalid:
+ if (LOG_INVALID(net, IPPROTO_DCCP))
+- nf_log_packet(pf, 0, skb, NULL, NULL, NULL, msg);
++ nf_log_packet(pf, 0, skb, NULL, NULL, NULL, "%s", msg);
+ return -NF_ACCEPT;
+ }
+
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 57ad466..e53ab60 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -519,7 +519,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
+ __u32 seq, ack, sack, end, win, swin;
+ s16 receiver_offset;
+- bool res;
++ bool res, in_recv_win;
+
+ /*
+ * Get the required data from the packet.
+@@ -642,14 +642,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+ receiver->td_scale);
+
++ /* Is the ending sequence in the receive window (if available)? */
++ in_recv_win = !receiver->td_maxwin ||
++ after(end, sender->td_end - receiver->td_maxwin - 1);
++
+ pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
+ before(seq, sender->td_maxend + 1),
+- after(end, sender->td_end - receiver->td_maxwin - 1),
++ (in_recv_win ? 1 : 0),
+ before(sack, receiver->td_end + 1),
+ after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
+
+ if (before(seq, sender->td_maxend + 1) &&
+- after(end, sender->td_end - receiver->td_maxwin - 1) &&
++ in_recv_win &&
+ before(sack, receiver->td_end + 1) &&
+ after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
+ /*
+@@ -718,7 +722,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: %s ",
+ before(seq, sender->td_maxend + 1) ?
+- after(end, sender->td_end - receiver->td_maxwin - 1) ?
++ in_recv_win ?
+ before(sack, receiver->td_end + 1) ?
+ after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
+ : "ACK is under the lower bound (possible overly delayed ACK)"
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 05e9feb..3b519f3 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -475,7 +475,7 @@ static struct ctl_path nf_ct_path[] = {
+
+ static int nf_conntrack_standalone_init_sysctl(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ if (net_eq(net, &init_net)) {
+ nf_ct_netfilter_header =
+diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
+index af7dd31..7535cd7 100644
+--- a/net/netfilter/nf_conntrack_timestamp.c
++++ b/net/netfilter/nf_conntrack_timestamp.c
+@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+ #ifdef CONFIG_SYSCTL
+ static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+ GFP_KERNEL);
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 957374a..dcbce7a 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -222,7 +222,7 @@ static struct ctl_path nf_log_sysctl_path[] = {
+ };
+
+ static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
+-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
++static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
+ static struct ctl_table_header *nf_log_dir_header;
+
+ static int nf_log_proc_dostring(ctl_table *table, int write,
+@@ -253,14 +253,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
+ rcu_assign_pointer(nf_loggers[tindex], logger);
+ mutex_unlock(&nf_log_mutex);
+ } else {
++ ctl_table_no_const nf_log_table = *table;
++
+ mutex_lock(&nf_log_mutex);
+ logger = rcu_dereference_protected(nf_loggers[tindex],
+ lockdep_is_held(&nf_log_mutex));
+ if (!logger)
+- table->data = "NONE";
++ nf_log_table.data = "NONE";
+ else
+- table->data = logger->name;
+- r = proc_dostring(table, write, buffer, lenp, ppos);
++ nf_log_table.data = logger->name;
++ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
+ mutex_unlock(&nf_log_mutex);
+ }
+
+diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
+index f042ae5..30ea486 100644
+--- a/net/netfilter/nf_sockopt.c
++++ b/net/netfilter/nf_sockopt.c
+@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
+ }
+ }
+
+- list_add(&reg->list, &nf_sockopts);
++ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
+ out:
+ mutex_unlock(&nf_sockopt_mutex);
+ return ret;
+@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
+ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
+ {
+ mutex_lock(&nf_sockopt_mutex);
+- list_del(&reg->list);
++ pax_list_del((struct list_head *)&reg->list);
+ mutex_unlock(&nf_sockopt_mutex);
+ }
+ EXPORT_SYMBOL(nf_unregister_sockopt);
+diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
+index 66b2c54..4ea49be 100644
+--- a/net/netfilter/nfnetlink_log.c
++++ b/net/netfilter/nfnetlink_log.c
+@@ -70,7 +70,7 @@ struct nfulnl_instance {
+ };
+
+ static DEFINE_SPINLOCK(instances_lock);
+-static atomic_t global_seq;
++static atomic_unchecked_t global_seq;
+
+ #define INSTANCE_BUCKETS 16
+ static struct hlist_head instance_table[INSTANCE_BUCKETS];
+@@ -388,6 +388,7 @@ __build_packet_message(struct nfulnl_instance *inst,
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(inst->group_num);
+
++ memset(&pmsg, 0, sizeof(pmsg));
+ pmsg.hw_protocol = skb->protocol;
+ pmsg.hook = hooknum;
+
+@@ -456,7 +457,10 @@ __build_packet_message(struct nfulnl_instance *inst,
+ if (indev && skb->dev &&
+ skb->mac_header != skb->network_header) {
+ struct nfulnl_msg_packet_hw phw;
+- int len = dev_parse_header(skb, phw.hw_addr);
++ int len;
++
++ memset(&phw, 0, sizeof(phw));
++ len = dev_parse_header(skb, phw.hw_addr);
+ if (len > 0) {
+ phw.hw_addrlen = htons(len);
+ NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
+@@ -502,7 +506,7 @@ __build_packet_message(struct nfulnl_instance *inst,
+ /* global sequence number */
+ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
+ NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
+- htonl(atomic_inc_return(&global_seq)));
++ htonl(atomic_inc_return_unchecked(&global_seq)));
+
+ if (data_len) {
+ struct nlattr *nla;
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index a80b0cb..f7e08e7 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -344,7 +344,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
+ if (indev && entskb->dev &&
+ entskb->mac_header != entskb->network_header) {
+ struct nfqnl_msg_packet_hw phw;
+- int len = dev_parse_header(entskb, phw.hw_addr);
++ int len;
++
++ memset(&phw, 0, sizeof(phw));
++ len = dev_parse_header(entskb, phw.hw_addr);
+ if (len) {
+ phw.hw_addrlen = htons(len);
+ NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
+diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
+index 9e63b43..a61bc90 100644
+--- a/net/netfilter/xt_TCPMSS.c
++++ b/net/netfilter/xt_TCPMSS.c
+@@ -50,7 +50,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ unsigned int minlen)
+ {
+ struct tcphdr *tcph;
+- unsigned int tcplen, i;
++ int len, tcp_hdrlen;
++ unsigned int i;
+ __be16 oldval;
+ u16 newmss;
+ u8 *opt;
+@@ -58,11 +59,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ if (!skb_make_writable(skb, skb->len))
+ return -1;
+
+- tcplen = skb->len - tcphoff;
++ len = skb->len - tcphoff;
++ if (len < (int)sizeof(struct tcphdr))
++ return -1;
++
+ tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
++ tcp_hdrlen = tcph->doff * 4;
+
+- /* Header cannot be larger than the packet */
+- if (tcplen < tcph->doff*4)
++ if (len < tcp_hdrlen)
+ return -1;
+
+ if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+@@ -83,9 +87,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ newmss = info->mss;
+
+ opt = (u_int8_t *)tcph;
+- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
+- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
+- opt[i+1] == TCPOLEN_MSS) {
++ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
++ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
+ u_int16_t oldmss;
+
+ oldmss = (opt[i+2] << 8) | opt[i+3];
+@@ -108,9 +111,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ }
+
+ /* There is data after the header so the option can't be added
+- without moving it, and doing so may make the SYN packet
+- itself too large. Accept the packet unmodified instead. */
+- if (tcplen > tcph->doff*4)
++ * without moving it, and doing so may make the SYN packet
++ * itself too large. Accept the packet unmodified instead.
++ */
++ if (len > tcp_hdrlen)
+ return 0;
+
+ /*
+@@ -127,10 +131,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ skb_put(skb, TCPOLEN_MSS);
+
+ opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
+- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
++ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
+
+ inet_proto_csum_replace2(&tcph->check, skb,
+- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
++ htons(len), htons(len + TCPOLEN_MSS), 1);
+ opt[0] = TCPOPT_MSS;
+ opt[1] = TCPOLEN_MSS;
+ opt[2] = (newmss & 0xff00) >> 8;
+diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
+new file mode 100644
+index 0000000..c566332
+--- /dev/null
++++ b/net/netfilter/xt_gradm.c
+@@ -0,0 +1,51 @@
++/*
++ * gradm match for netfilter
++ * Copyright © Zbigniew Krzystolik, 2010
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License; either version
++ * 2 or 3 as published by the Free Software Foundation.
++ */
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/grsecurity.h>
++#include <linux/netfilter/xt_gradm.h>
++
++static bool
++gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
++{
++ const struct xt_gradm_mtinfo *info = par->matchinfo;
++ bool retval = false;
++ if (gr_acl_is_enabled())
++ retval = true;
++ return retval ^ info->invflags;
++}
++
++static struct xt_match gradm_mt_reg __read_mostly = {
++ .name = "gradm",
++ .revision = 0,
++ .family = NFPROTO_UNSPEC,
++ .match = gradm_mt,
++ .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
++ .me = THIS_MODULE,
++};
++
++static int __init gradm_mt_init(void)
++{
++ return xt_register_match(&gradm_mt_reg);
++}
++
++static void __exit gradm_mt_exit(void)
++{
++ xt_unregister_match(&gradm_mt_reg);
++}
++
++module_init(gradm_mt_init);
++module_exit(gradm_mt_exit);
++MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
++MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_gradm");
++MODULE_ALIAS("ip6t_gradm");
+diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
+index 4fe4fb4..87a89e5 100644
+--- a/net/netfilter/xt_statistic.c
++++ b/net/netfilter/xt_statistic.c
+@@ -19,7 +19,7 @@
+ #include <linux/module.h>
+
+ struct xt_statistic_priv {
+- atomic_t count;
++ atomic_unchecked_t count;
+ } ____cacheline_aligned_in_smp;
+
+ MODULE_LICENSE("GPL");
+@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ break;
+ case XT_STATISTIC_MODE_NTH:
+ do {
+- oval = atomic_read(&info->master->count);
++ oval = atomic_read_unchecked(&info->master->count);
+ nval = (oval == info->u.nth.every) ? 0 : oval + 1;
+- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
++ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
+ if (nval == 0)
+ ret = !ret;
+ break;
+@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
+ info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
+ if (info->master == NULL)
+ return -ENOMEM;
+- atomic_set(&info->master->count, info->u.nth.count);
++ atomic_set_unchecked(&info->master->count, info->u.nth.count);
+
+ return 0;
+ }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 2369e96..3c3f7de 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -753,7 +753,7 @@ static void netlink_overrun(struct sock *sk)
+ sk->sk_error_report(sk);
+ }
+ }
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ }
+
+ static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
+@@ -2011,7 +2011,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+ sk_wmem_alloc_get(s),
+ nlk->cb,
+ atomic_read(&s->sk_refcnt),
+- atomic_read(&s->sk_drops),
++ atomic_read_unchecked(&s->sk_drops),
+ sock_i_ino(s)
+ );
+
+@@ -2118,7 +2118,7 @@ static void __init netlink_add_usersock_entry(void)
+ netlink_table_ungrab();
+ }
+
+-static struct pernet_operations __net_initdata netlink_net_ops = {
++static struct pernet_operations __net_initconst netlink_net_ops = {
+ .init = netlink_net_init,
+ .exit = netlink_net_exit,
+ };
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 874f8ff..339bb58 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -288,18 +288,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
+ goto errout;
+ }
+
++ pax_open_kernel();
+ if (ops->dumpit)
+- ops->flags |= GENL_CMD_CAP_DUMP;
++ *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DUMP;
+ if (ops->doit)
+- ops->flags |= GENL_CMD_CAP_DO;
++ *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DO;
+ if (ops->policy)
+- ops->flags |= GENL_CMD_CAP_HASPOL;
++ *(unsigned int *)&ops->flags |= GENL_CMD_CAP_HASPOL;
++ pax_close_kernel();
+
+ genl_lock();
+- list_add_tail(&ops->ops_list, &family->ops_list);
++ pax_list_add_tail((struct list_head *)&ops->ops_list, &family->ops_list);
+ genl_unlock();
+
+- genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
++ genl_ctrl_event(CTRL_CMD_NEWOPS, (void *)ops);
+ err = 0;
+ errout:
+ return err;
+@@ -329,9 +331,9 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
+ genl_lock();
+ list_for_each_entry(rc, &family->ops_list, ops_list) {
+ if (rc == ops) {
+- list_del(&ops->ops_list);
++ pax_list_del((struct list_head *)&ops->ops_list);
+ genl_unlock();
+- genl_ctrl_event(CTRL_CMD_DELOPS, ops);
++ genl_ctrl_event(CTRL_CMD_DELOPS, (void *)ops);
+ return 0;
+ }
+ }
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index b4d889b..bb33240 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct sock *sk = sock->sk;
+ struct nr_sock *nr = nr_sk(sk);
+
++ memset(sax, 0, sizeof(*sax));
+ lock_sock(sk);
+ if (peer != 0) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
+@@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
+ *uaddr_len = sizeof(struct full_sockaddr_ax25);
+ } else {
+ sax->fsa_ax25.sax25_family = AF_NETROM;
+- sax->fsa_ax25.sax25_ndigis = 0;
+ sax->fsa_ax25.sax25_call = nr->source_addr;
+ *uaddr_len = sizeof(struct sockaddr_ax25);
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 4f19bf2..5770069 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1678,7 +1678,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_packets++;
+- skb->dropcount = atomic_read(&sk->sk_drops);
++ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk, skb->len);
+@@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+ drop_n_acct:
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_drops++;
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ drop_n_restore:
+@@ -2623,6 +2623,7 @@ out:
+
+ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ {
++ struct sock_extended_err ee;
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+@@ -2644,8 +2645,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
++ ee = serr->ee;
+ put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
+- sizeof(serr->ee), &serr->ee);
++ sizeof ee, &ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+@@ -3273,7 +3275,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ case PACKET_HDRLEN:
+ if (len > sizeof(int))
+ len = sizeof(int);
+- if (copy_from_user(&val, optval, len))
++ if (len > sizeof(val) || copy_from_user(&val, optval, len))
+ return -EFAULT;
+ switch (val) {
+ case TPACKET_V1:
+@@ -3323,7 +3325,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, data, len))
++
++ if ((data == &val && len > sizeof(val)) ||
++ (data == &st_u.stats3 && len > sizeof(st_u.stats3)) ||
++ (data == &st && len > sizeof(st)) ||
++ copy_to_user(optval, data, len))
+ return -EFAULT;
+ return 0;
+ }
+diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
+index d65f699..855d175 100644
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
+ {
+ int err = 0;
+
+- if (protocol >= PHONET_NPROTO)
++ if (protocol < 0 || protocol >= PHONET_NPROTO)
+ return -EINVAL;
+
+ err = proto_register(pp->prot, 1);
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index 007546d..9a8e5c6 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
+
+ case PNS_PEP_CTRL_REQ:
+ if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ break;
+ }
+ __skb_pull(skb, 4);
+@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
+ }
+
+ if (pn->rx_credits == 0) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ err = -ENOBUFS;
+ break;
+ }
+@@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
+ }
+
+ if (pn->rx_credits == 0) {
+- atomic_inc(&sk->sk_drops);
++ atomic_inc_unchecked(&sk->sk_drops);
+ err = NET_RX_DROP;
+ break;
+ }
+diff --git a/net/phonet/socket.c b/net/phonet/socket.c
+index 4c7eff3..59c727f 100644
+--- a/net/phonet/socket.c
++++ b/net/phonet/socket.c
+@@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
+ pn->resource, sk->sk_state,
+ sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
+ sock_i_uid(sk), sock_i_ino(sk),
+- atomic_read(&sk->sk_refcnt), sk,
+- atomic_read(&sk->sk_drops), &len);
++ atomic_read(&sk->sk_refcnt),
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ NULL,
++#else
++ sk,
++#endif
++ atomic_read_unchecked(&sk->sk_drops), &len);
+ }
+ seq_printf(seq, "%*s\n", 127 - len, "");
+ return 0;
+diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
+index cea1c7d..e74ee16 100644
+--- a/net/phonet/sysctl.c
++++ b/net/phonet/sysctl.c
+@@ -62,7 +62,7 @@ static int proc_local_port_range(ctl_table *table, int write,
+ {
+ int ret;
+ int range[2] = {local_port_range[0], local_port_range[1]};
+- ctl_table tmp = {
++ ctl_table_no_const tmp = {
+ .data = &range,
+ .maxlen = sizeof(range),
+ .mode = table->mode,
+diff --git a/net/rds/cong.c b/net/rds/cong.c
+index e5b65ac..f3b6fb7 100644
+--- a/net/rds/cong.c
++++ b/net/rds/cong.c
+@@ -78,7 +78,7 @@
+ * finds that the saved generation number is smaller than the global generation
+ * number, it wakes up the process.
+ */
+-static atomic_t rds_cong_generation = ATOMIC_INIT(0);
++static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
+
+ /*
+ * Congestion monitoring
+@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
+ rdsdebug("waking map %p for %pI4\n",
+ map, &map->m_addr);
+ rds_stats_inc(s_cong_update_received);
+- atomic_inc(&rds_cong_generation);
++ atomic_inc_unchecked(&rds_cong_generation);
+ if (waitqueue_active(&map->m_waitq))
+ wake_up(&map->m_waitq);
+ if (waitqueue_active(&rds_poll_waitq))
+@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
+
+ int rds_cong_updated_since(unsigned long *recent)
+ {
+- unsigned long gen = atomic_read(&rds_cong_generation);
++ unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
+
+ if (likely(*recent == gen))
+ return 0;
+diff --git a/net/rds/ib.h b/net/rds/ib.h
+index edfaaaf..8c89879 100644
+--- a/net/rds/ib.h
++++ b/net/rds/ib.h
+@@ -128,7 +128,7 @@ struct rds_ib_connection {
+ /* sending acks */
+ unsigned long i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_t i_ack_next; /* next ACK to send */
++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
+ #else
+ spinlock_t i_ack_lock; /* protect i_ack_next */
+ u64 i_ack_next; /* next ACK to send */
+diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
+index 51c8689..36c555f 100644
+--- a/net/rds/ib_cm.c
++++ b/net/rds/ib_cm.c
+@@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
+ /* Clear the ACK state */
+ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_set(&ic->i_ack_next, 0);
++ atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+ ic->i_ack_next = 0;
+ #endif
+diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
+index e29e0ca..fa3a6a3 100644
+--- a/net/rds/ib_recv.c
++++ b/net/rds/ib_recv.c
+@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
+ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
+ int ack_required)
+ {
+- atomic64_set(&ic->i_ack_next, seq);
++ atomic64_set_unchecked(&ic->i_ack_next, seq);
+ if (ack_required) {
+ smp_mb__before_clear_bit();
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ smp_mb__after_clear_bit();
+
+- return atomic64_read(&ic->i_ack_next);
++ return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+
+diff --git a/net/rds/iw.h b/net/rds/iw.h
+index 04ce3b1..48119a6 100644
+--- a/net/rds/iw.h
++++ b/net/rds/iw.h
+@@ -134,7 +134,7 @@ struct rds_iw_connection {
+ /* sending acks */
+ unsigned long i_ack_flags;
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_t i_ack_next; /* next ACK to send */
++ atomic64_unchecked_t i_ack_next; /* next ACK to send */
+ #else
+ spinlock_t i_ack_lock; /* protect i_ack_next */
+ u64 i_ack_next; /* next ACK to send */
+diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
+index 9556d28..f046d0e 100644
+--- a/net/rds/iw_cm.c
++++ b/net/rds/iw_cm.c
+@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
+ /* Clear the ACK state */
+ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
+ #ifdef KERNEL_HAS_ATOMIC64
+- atomic64_set(&ic->i_ack_next, 0);
++ atomic64_set_unchecked(&ic->i_ack_next, 0);
+ #else
+ ic->i_ack_next = 0;
+ #endif
+diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
+index 5e57347..3916042 100644
+--- a/net/rds/iw_recv.c
++++ b/net/rds/iw_recv.c
+@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+ int ack_required)
+ {
+- atomic64_set(&ic->i_ack_next, seq);
++ atomic64_set_unchecked(&ic->i_ack_next, seq);
+ if (ack_required) {
+ smp_mb__before_clear_bit();
+ set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+ clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+ smp_mb__after_clear_bit();
+
+- return atomic64_read(&ic->i_ack_next);
++ return atomic64_read_unchecked(&ic->i_ack_next);
+ }
+ #endif
+
+diff --git a/net/rds/rds.h b/net/rds/rds.h
+index 7eaba18..a3c303f 100644
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -449,7 +449,7 @@ struct rds_transport {
+ void (*sync_mr)(void *trans_private, int direction);
+ void (*free_mr)(void *trans_private, int invalidate);
+ void (*flush_mrs)(void);
+-};
++} __do_const;
+
+ struct rds_sock {
+ struct sock rs_sk;
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index edac9ef..16bcb98 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
+ int val = 1;
+
+ set_fs(KERNEL_DS);
+- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
++ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
+ sizeof(val));
+ set_fs(oldfs);
+ }
+diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
+index 1b4fd68..2234175 100644
+--- a/net/rds/tcp_send.c
++++ b/net/rds/tcp_send.c
+@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
++ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
+ sizeof(val));
+ set_fs(oldfs);
+ }
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index 74c064c..fdec26f 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
+ __be32 rxrpc_epoch;
+
+ /* current debugging ID */
+-atomic_t rxrpc_debug_id;
++atomic_unchecked_t rxrpc_debug_id;
+
+ /* count of skbs currently in use */
+ atomic_t rxrpc_n_skbs;
+diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
+index f99cfce..cc529dd 100644
+--- a/net/rxrpc/ar-ack.c
++++ b/net/rxrpc/ar-ack.c
+@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
+
+ _enter("{%d,%d,%d,%d},",
+ call->acks_hard, call->acks_unacked,
+- atomic_read(&call->sequence),
++ atomic_read_unchecked(&call->sequence),
+ CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
+
+ stop = 0;
+@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
+
+ /* each Tx packet has a new serial number */
+ sp->hdr.serial =
+- htonl(atomic_inc_return(&call->conn->serial));
++ htonl(atomic_inc_return_unchecked(&call->conn->serial));
+
+ hdr = (struct rxrpc_header *) txb->head;
+ hdr->serial = sp->hdr.serial;
+@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
+ */
+ static void rxrpc_clear_tx_window(struct rxrpc_call *call)
+ {
+- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
++ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
+ }
+
+ /*
+@@ -629,7 +629,7 @@ process_further:
+
+ latest = ntohl(sp->hdr.serial);
+ hard = ntohl(ack.firstPacket);
+- tx = atomic_read(&call->sequence);
++ tx = atomic_read_unchecked(&call->sequence);
+
+ _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ latest,
+@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
+ goto maybe_reschedule;
+
+ send_ACK_with_skew:
+- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
++ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
+ ntohl(ack.serial));
+ send_ACK:
+ mtu = call->conn->trans->peer->if_mtu;
+@@ -1173,7 +1173,7 @@ send_ACK:
+ ackinfo.rxMTU = htonl(5692);
+ ackinfo.jumbo_max = htonl(4);
+
+- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+ _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ ntohl(hdr.serial),
+ ntohs(ack.maxSkew),
+@@ -1191,7 +1191,7 @@ send_ACK:
+ send_message:
+ _debug("send message");
+
+- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
+ _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
+ send_message_2:
+
+diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
+index bf656c2..48f9d27 100644
+--- a/net/rxrpc/ar-call.c
++++ b/net/rxrpc/ar-call.c
+@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+ spin_lock_init(&call->lock);
+ rwlock_init(&call->state_lock);
+ atomic_set(&call->usage, 1);
+- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+
+ memset(&call->sock_node, 0xed, sizeof(call->sock_node));
+diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
+index 4106ca9..a338d7a 100644
+--- a/net/rxrpc/ar-connection.c
++++ b/net/rxrpc/ar-connection.c
+@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
+ rwlock_init(&conn->lock);
+ spin_lock_init(&conn->state_lock);
+ atomic_set(&conn->usage, 1);
+- conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ conn->avail_calls = RXRPC_MAXCALLS;
+ conn->size_align = 4;
+ conn->header_size = sizeof(struct rxrpc_header);
+diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
+index e7ed43a..6afa140 100644
+--- a/net/rxrpc/ar-connevent.c
++++ b/net/rxrpc/ar-connevent.c
+@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- hdr.serial = htonl(atomic_inc_return(&conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
+index 1a2b0633..e8d1382 100644
+--- a/net/rxrpc/ar-input.c
++++ b/net/rxrpc/ar-input.c
+@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
+ /* track the latest serial number on this connection for ACK packet
+ * information */
+ serial = ntohl(sp->hdr.serial);
+- hi_serial = atomic_read(&call->conn->hi_serial);
++ hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
+ while (serial > hi_serial)
+- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
++ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
+ serial);
+
+ /* request ACK generation for any ACK or DATA packet that requests
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index 8e22bd3..f66d1c0 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -272,8 +272,8 @@ struct rxrpc_connection {
+ int error; /* error code for local abort */
+ int debug_id; /* debug ID for printks */
+ unsigned call_counter; /* call ID counter */
+- atomic_t serial; /* packet serial number counter */
+- atomic_t hi_serial; /* highest serial number received */
++ atomic_unchecked_t serial; /* packet serial number counter */
++ atomic_unchecked_t hi_serial; /* highest serial number received */
+ u8 avail_calls; /* number of calls available */
+ u8 size_align; /* data size alignment (for security) */
+ u8 header_size; /* rxrpc + security header size */
+@@ -346,7 +346,7 @@ struct rxrpc_call {
+ spinlock_t lock;
+ rwlock_t state_lock; /* lock for state transition */
+ atomic_t usage;
+- atomic_t sequence; /* Tx data packet sequence counter */
++ atomic_unchecked_t sequence; /* Tx data packet sequence counter */
+ u32 abort_code; /* local/remote abort code */
+ enum { /* current state of call */
+ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
+@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
+ */
+ extern atomic_t rxrpc_n_skbs;
+ extern __be32 rxrpc_epoch;
+-extern atomic_t rxrpc_debug_id;
++extern atomic_unchecked_t rxrpc_debug_id;
+ extern struct workqueue_struct *rxrpc_workqueue;
+
+ /*
+diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
+index 43ea7de..eafaa2e 100644
+--- a/net/rxrpc/ar-key.c
++++ b/net/rxrpc/ar-key.c
+@@ -232,7 +232,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ if (toklen <= (n_parts + 1) * 4)
+ return -EINVAL;
+
+- princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL);
++ princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
+ if (!princ->name_parts)
+ return -ENOMEM;
+
+@@ -356,7 +356,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
+
+ _debug("n_elem %d", n_elem);
+
+- td = kcalloc(sizeof(struct krb5_tagged_data), n_elem,
++ td = kcalloc(n_elem, sizeof(struct krb5_tagged_data),
+ GFP_KERNEL);
+ if (!td)
+ return -ENOMEM;
+diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
+index 87f7135..74d3703 100644
+--- a/net/rxrpc/ar-local.c
++++ b/net/rxrpc/ar-local.c
+@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
+ spin_lock_init(&local->lock);
+ rwlock_init(&local->services_lock);
+ atomic_set(&local->usage, 1);
+- local->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ memcpy(&local->srx, srx, sizeof(*srx));
+ }
+
+diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
+index 338d793..47391d0 100644
+--- a/net/rxrpc/ar-output.c
++++ b/net/rxrpc/ar-output.c
+@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
+ sp->hdr.cid = call->cid;
+ sp->hdr.callNumber = call->call_id;
+ sp->hdr.seq =
+- htonl(atomic_inc_return(&call->sequence));
++ htonl(atomic_inc_return_unchecked(&call->sequence));
+ sp->hdr.serial =
+- htonl(atomic_inc_return(&conn->serial));
++ htonl(atomic_inc_return_unchecked(&conn->serial));
+ sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
+ sp->hdr.userStatus = 0;
+ sp->hdr.securityIndex = conn->security_ix;
+diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
+index 2754f09..b20e38f 100644
+--- a/net/rxrpc/ar-peer.c
++++ b/net/rxrpc/ar-peer.c
+@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
+ INIT_LIST_HEAD(&peer->error_targets);
+ spin_lock_init(&peer->lock);
+ atomic_set(&peer->usage, 1);
+- peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+ memcpy(&peer->srx, srx, sizeof(*srx));
+
+ rxrpc_assess_MTU_size(peer);
+diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
+index 38047f7..9f48511 100644
+--- a/net/rxrpc/ar-proc.c
++++ b/net/rxrpc/ar-proc.c
+@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
+ atomic_read(&conn->usage),
+ rxrpc_conn_states[conn->state],
+ key_serial(conn->key),
+- atomic_read(&conn->serial),
+- atomic_read(&conn->hi_serial));
++ atomic_read_unchecked(&conn->serial),
++ atomic_read_unchecked(&conn->hi_serial));
+
+ return 0;
+ }
+diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
+index 92df566..87ec1bf 100644
+--- a/net/rxrpc/ar-transport.c
++++ b/net/rxrpc/ar-transport.c
+@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
+ spin_lock_init(&trans->client_lock);
+ rwlock_init(&trans->conn_lock);
+ atomic_set(&trans->usage, 1);
+- trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
++ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
+
+ if (peer->srx.transport.family == AF_INET) {
+ switch (peer->srx.transport_type) {
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index 7635107..4670276 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
+
+ len = iov[0].iov_len + iov[1].iov_len;
+
+- hdr.serial = htonl(atomic_inc_return(&conn->serial));
++ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
+@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
+
+ len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
+
+- hdr->serial = htonl(atomic_inc_return(&conn->serial));
++ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
+ _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
+
+ ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 0b6a391..febcef2 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -961,7 +961,7 @@ static const struct inet6_protocol sctpv6_protocol = {
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+
+-static struct sctp_af sctp_af_inet6 = {
++static struct sctp_af sctp_af_inet6 __read_only = {
+ .sa_family = AF_INET6,
+ .sctp_xmit = sctp_v6_xmit,
+ .setsockopt = ipv6_setsockopt,
+@@ -993,7 +993,7 @@ static struct sctp_af sctp_af_inet6 = {
+ #endif
+ };
+
+-static struct sctp_pf sctp_pf_inet6 = {
++static struct sctp_pf sctp_pf_inet6 __read_only = {
+ .event_msgname = sctp_inet6_event_msgname,
+ .skb_msgname = sctp_inet6_skb_msgname,
+ .af_supported = sctp_inet6_af_supported,
+@@ -1018,7 +1018,7 @@ void sctp_v6_pf_init(void)
+
+ void sctp_v6_pf_exit(void)
+ {
+- list_del(&sctp_af_inet6.list);
++ pax_list_del(&sctp_af_inet6.list);
+ }
+
+ /* Initialize IPv6 support and register with socket layer. */
+diff --git a/net/sctp/probe.c b/net/sctp/probe.c
+index bc6cd75..749e4eb 100644
+--- a/net/sctp/probe.c
++++ b/net/sctp/probe.c
+@@ -63,7 +63,7 @@ static struct {
+ struct timespec tstart;
+ } sctpw;
+
+-static void printl(const char *fmt, ...)
++static __printf(1, 2) void printl(const char *fmt, ...)
+ {
+ va_list args;
+ int len;
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c
+index 1e2eee8..ce3967e 100644
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
+ seq_printf(seq,
+ "%8pK %8pK %-3d %-3d %-2d %-4d "
+ "%4d %8d %8d %7d %5lu %-5d %5d ",
+- assoc, sk, sctp_sk(sk)->type, sk->sk_state,
++ assoc, sk,
++ sctp_sk(sk)->type, sk->sk_state,
+ assoc->state, hash,
+ assoc->assoc_id,
+ assoc->sndbuf_used,
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 6f6ad86..d52dc47 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -862,8 +862,10 @@ int sctp_register_af(struct sctp_af *af)
+ return 0;
+ }
+
++ pax_open_kernel();
+ INIT_LIST_HEAD(&af->list);
+- list_add_tail(&af->list, &sctp_address_families);
++ pax_close_kernel();
++ pax_list_add_tail(&af->list, &sctp_address_families);
+ return 1;
+ }
+
+@@ -994,7 +996,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
+
+ static struct sctp_af sctp_af_inet;
+
+-static struct sctp_pf sctp_pf_inet = {
++static struct sctp_pf sctp_pf_inet __read_only = {
+ .event_msgname = sctp_inet_event_msgname,
+ .skb_msgname = sctp_inet_skb_msgname,
+ .af_supported = sctp_inet_af_supported,
+@@ -1064,7 +1066,7 @@ static const struct net_protocol sctp_protocol = {
+ };
+
+ /* IPv4 address related functions. */
+-static struct sctp_af sctp_af_inet = {
++static struct sctp_af sctp_af_inet __read_only = {
+ .sa_family = AF_INET,
+ .sctp_xmit = sctp_v4_xmit,
+ .setsockopt = ip_setsockopt,
+@@ -1149,7 +1151,7 @@ static void sctp_v4_pf_init(void)
+
+ static void sctp_v4_pf_exit(void)
+ {
+- list_del(&sctp_af_inet.list);
++ pax_list_del(&sctp_af_inet.list);
+ }
+
+ static int sctp_v4_protosw_init(void)
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 76388b0..a967f68 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -441,7 +441,7 @@ static void sctp_generate_sack_event(unsigned long data)
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
+ }
+
+-sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
++sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
+ NULL,
+ sctp_generate_t1_cookie_event,
+ sctp_generate_t1_init_event,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c53d01e..9659111 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2160,11 +2160,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
+ {
+ struct sctp_association *asoc;
+ struct sctp_ulpevent *event;
++ struct sctp_event_subscribe subscribe;
+
+ if (optlen > sizeof(struct sctp_event_subscribe))
+ return -EINVAL;
+- if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
++ if (copy_from_user(&subscribe, optval, optlen))
+ return -EFAULT;
++ sctp_sk(sk)->subscribe = subscribe;
+
+ /*
+ * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+@@ -4150,13 +4152,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+ int __user *optlen)
+ {
++ struct sctp_event_subscribe subscribe;
++
+ if (len <= 0)
+ return -EINVAL;
+ if (len > sizeof(struct sctp_event_subscribe))
+ len = sizeof(struct sctp_event_subscribe);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
++ subscribe = sctp_sk(sk)->subscribe;
++ if (copy_to_user(optval, &subscribe, len))
+ return -EFAULT;
+ return 0;
+ }
+@@ -4174,6 +4179,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+ */
+ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
+ {
++ __u32 autoclose;
++
+ /* Applicable to UDP-style socket only */
+ if (sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+@@ -4182,7 +4189,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
+ len = sizeof(int);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
++ autoclose = sctp_sk(sk)->autoclose;
++ if (copy_to_user(optval, &autoclose, sizeof(int)))
+ return -EFAULT;
+ return 0;
+ }
+@@ -4546,12 +4554,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
+ */
+ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
+ {
++ struct sctp_initmsg initmsg;
++
+ if (len < sizeof(struct sctp_initmsg))
+ return -EINVAL;
+ len = sizeof(struct sctp_initmsg);
+ if (put_user(len, optlen))
+ return -EFAULT;
+- if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
++ initmsg = sctp_sk(sk)->initmsg;
++ if (copy_to_user(optval, &initmsg, len))
+ return -EFAULT;
+ return 0;
+ }
+@@ -4592,6 +4603,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
+ addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+ if (space_left < addrlen)
+ return -ENOMEM;
++ if (addrlen > sizeof(temp) || addrlen < 0)
++ return -EFAULT;
+ if (copy_to_user(to, &temp, addrlen))
+ return -EFAULT;
+ to += addrlen;
+diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
+index 442ad4e..825ea94 100644
+--- a/net/sctp/ssnmap.c
++++ b/net/sctp/ssnmap.c
+@@ -41,8 +41,6 @@
+ #include <net/sctp/sctp.h>
+ #include <net/sctp/sm.h>
+
+-#define MAX_KMALLOC_SIZE 131072
+-
+ static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
+ __u16 out);
+
+@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
+ int size;
+
+ size = sctp_ssnmap_size(in, out);
+- if (size <= MAX_KMALLOC_SIZE)
++ if (size <= KMALLOC_MAX_SIZE)
+ retval = kmalloc(size, gfp);
+ else
+ retval = (struct sctp_ssnmap *)
+@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
+ return retval;
+
+ fail_map:
+- if (size <= MAX_KMALLOC_SIZE)
++ if (size <= KMALLOC_MAX_SIZE)
+ kfree(retval);
+ else
+ free_pages((unsigned long)retval, get_order(size));
+@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
+ int size;
+
+ size = sctp_ssnmap_size(map->in.len, map->out.len);
+- if (size <= MAX_KMALLOC_SIZE)
++ if (size <= KMALLOC_MAX_SIZE)
+ kfree(map);
+ else
+ free_pages((unsigned long)map, get_order(size));
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 8da4481..d02565e 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -317,7 +317,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
+ * 1/8, rto_alpha would be expressed as 3.
+ */
+ tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
+- + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
++ + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> sctp_rto_beta);
+ tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
+ + (rtt >> sctp_rto_alpha);
+ } else {
+diff --git a/net/socket.c b/net/socket.c
+index d4faade..2492841 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -88,6 +88,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/magic.h>
+ #include <linux/slab.h>
++#include <linux/in.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -105,6 +106,8 @@
+ #include <linux/sockios.h>
+ #include <linux/atalk.h>
+
++#include <linux/grsock.h>
++
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+@@ -156,7 +159,7 @@ static const struct file_operations socket_file_ops = {
+ */
+
+ static DEFINE_SPINLOCK(net_family_lock);
+-static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
++const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
+
+ /*
+ * Statistics counters of the socket lists
+@@ -322,7 +325,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+ &sockfs_dentry_operations, SOCKFS_MAGIC);
+ }
+
+-static struct vfsmount *sock_mnt __read_mostly;
++struct vfsmount *sock_mnt __read_mostly;
+
+ static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+@@ -1188,6 +1191,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
+ return -EAFNOSUPPORT;
+ if (type < 0 || type >= SOCK_MAX)
+ return -EINVAL;
++ if (protocol < 0)
++ return -EINVAL;
+
+ /* Compatibility.
+
+@@ -1208,6 +1213,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
+ if (err)
+ return err;
+
++ if(!kern && !gr_search_socket(family, type, protocol)) {
++ if (rcu_access_pointer(net_families[family]) == NULL)
++ return -EAFNOSUPPORT;
++ else
++ return -EACCES;
++ }
++
++ if (!kern && gr_handle_sock_all(family, type, protocol)) {
++ if (rcu_access_pointer(net_families[family]) == NULL)
++ return -EAFNOSUPPORT;
++ else
++ return -EACCES;
++ }
++
+ /*
+ * Allocate the socket and allow the family to set things up. if
+ * the protocol is 0, the family is instructed to select an appropriate
+@@ -1432,6 +1451,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+ if (sock) {
+ err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
+ if (err >= 0) {
++ if (gr_handle_sock_server((struct sockaddr *)&address)) {
++ err = -EACCES;
++ goto error;
++ }
++ err = gr_search_bind(sock, (struct sockaddr_in *)&address);
++ if (err)
++ goto error;
++
+ err = security_socket_bind(sock,
+ (struct sockaddr *)&address,
+ addrlen);
+@@ -1440,6 +1467,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+ (struct sockaddr *)
+ &address, addrlen);
+ }
++error:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1463,10 +1491,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+ if ((unsigned)backlog > somaxconn)
+ backlog = somaxconn;
+
++ if (gr_handle_sock_server_other(sock->sk)) {
++ err = -EPERM;
++ goto error;
++ }
++
++ err = gr_search_listen(sock);
++ if (err)
++ goto error;
++
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = sock->ops->listen(sock, backlog);
+
++error:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+@@ -1510,6 +1548,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ newsock->type = sock->type;
+ newsock->ops = sock->ops;
+
++ if (gr_handle_sock_server_other(sock->sk)) {
++ err = -EPERM;
++ sock_release(newsock);
++ goto out_put;
++ }
++
++ err = gr_search_accept(sock);
++ if (err) {
++ sock_release(newsock);
++ goto out_put;
++ }
++
+ /*
+ * We don't need try_module_get here, as the listening socket (sock)
+ * has the protocol module (sock->ops->owner) held.
+@@ -1548,6 +1598,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ fd_install(newfd, newfile);
+ err = newfd;
+
++ gr_attach_curr_ip(newsock->sk);
++
+ out_put:
+ fput_light(sock->file, fput_needed);
+ out:
+@@ -1580,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ int, addrlen)
+ {
+ struct socket *sock;
++ struct sockaddr *sck;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+@@ -1590,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ if (err < 0)
+ goto out_put;
+
++ sck = (struct sockaddr *)&address;
++
++ if (gr_handle_sock_client(sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
++ err = gr_search_connect(sock, (struct sockaddr_in *)sck);
++ if (err)
++ goto out_put;
++
+ err =
+ security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
+ if (err)
+@@ -1671,6 +1735,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ * the protocol.
+ */
+
++asmlinkage long sys_sendto(int, void *, size_t, unsigned, struct sockaddr *, int);
++
+ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int, addr_len)
+@@ -1737,7 +1803,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ struct socket *sock;
+ struct iovec iov;
+ struct msghdr msg;
+- struct sockaddr_storage address;
++ struct sockaddr_storage address = { };
+ int err, err2;
+ int fput_needed;
+
+@@ -1966,7 +2032,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ * checking falls down on this.
+ */
+ if (copy_from_user(ctl_buf,
+- (void __user __force *)msg_sys->msg_control,
++ (void __force_user *)msg_sys->msg_control,
+ ctl_len))
+ goto out_freectl;
+ msg_sys->msg_control = ctl_buf;
+@@ -2117,7 +2183,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ int err, iov_size, total_len, len;
+
+ /* kernel mode address */
+- struct sockaddr_storage addr;
++ struct sockaddr_storage addr = { };
+
+ /* user mode address pointers */
+ struct sockaddr __user *uaddr;
+@@ -2148,7 +2214,8 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ /* Save the user-mode address (verify_iovec will change the
+ * kernel msghdr to use the kernel address space)
+ */
+- uaddr = (__force void __user *)msg_sys->msg_name;
++
++ uaddr = (void __force_user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+ if (MSG_CMSG_COMPAT & flags)
+ err = verify_compat_iovec(msg_sys, iov,
+@@ -2792,7 +2859,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
+ }
+
+ ifr = compat_alloc_user_space(buf_size);
+- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
++ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
+
+ if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+ return -EFAULT;
+@@ -2816,12 +2883,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
+ offsetof(struct ethtool_rxnfc, fs.ring_cookie));
+
+ if (copy_in_user(rxnfc, compat_rxnfc,
+- (void *)(&rxnfc->fs.m_ext + 1) -
+- (void *)rxnfc) ||
++ (void __user *)(&rxnfc->fs.m_ext + 1) -
++ (void __user *)rxnfc) ||
+ copy_in_user(&rxnfc->fs.ring_cookie,
+ &compat_rxnfc->fs.ring_cookie,
+- (void *)(&rxnfc->fs.location + 1) -
+- (void *)&rxnfc->fs.ring_cookie) ||
++ (void __user *)(&rxnfc->fs.location + 1) -
++ (void __user *)&rxnfc->fs.ring_cookie) ||
+ copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
+ return -EFAULT;
+@@ -2833,12 +2900,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
+
+ if (convert_out) {
+ if (copy_in_user(compat_rxnfc, rxnfc,
+- (const void *)(&rxnfc->fs.m_ext + 1) -
+- (const void *)rxnfc) ||
++ (const void __user *)(&rxnfc->fs.m_ext + 1) -
++ (const void __user *)rxnfc) ||
+ copy_in_user(&compat_rxnfc->fs.ring_cookie,
+ &rxnfc->fs.ring_cookie,
+- (const void *)(&rxnfc->fs.location + 1) -
+- (const void *)&rxnfc->fs.ring_cookie) ||
++ (const void __user *)(&rxnfc->fs.location + 1) -
++ (const void __user *)&rxnfc->fs.ring_cookie) ||
+ copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
+ return -EFAULT;
+@@ -2908,7 +2975,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = dev_ioctl(net, cmd,
+- (struct ifreq __user __force *) &kifr);
++ (struct ifreq __force_user *) &kifr);
+ set_fs(old_fs);
+
+ return err;
+@@ -3017,7 +3084,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+- err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
++ err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
+ set_fs(old_fs);
+
+ if (cmd == SIOCGIFMAP && !err) {
+@@ -3122,7 +3189,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
+ ret |= __get_user(rtdev, &(ur4->rt_dev));
+ if (rtdev) {
+ ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
+- r4.rt_dev = (char __user __force *)devname;
++ r4.rt_dev = (char __force_user *)devname;
+ devname[15] = 0;
+ } else
+ r4.rt_dev = NULL;
+@@ -3362,8 +3429,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
+ int __user *uoptlen;
+ int err;
+
+- uoptval = (char __user __force *) optval;
+- uoptlen = (int __user __force *) optlen;
++ uoptval = (char __force_user *) optval;
++ uoptlen = (int __force_user *) optlen;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+@@ -3383,7 +3450,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
+ char __user *uoptval;
+ int err;
+
+- uoptval = (char __user __force *) optval;
++ uoptval = (char __force_user *) optval;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b2250da..db374b7 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -163,10 +163,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
+ err = rpciod_up();
+ if (err)
+ goto out_no_rpciod;
+- err = -EINVAL;
+- if (!xprt)
+- goto out_no_xprt;
+
++ err = -EINVAL;
+ if (args->version >= program->nrvers)
+ goto out_err;
+ version = program->version[args->version];
+@@ -259,10 +257,9 @@ out_no_stats:
+ kfree(clnt->cl_server);
+ kfree(clnt);
+ out_err:
+- xprt_put(xprt);
+-out_no_xprt:
+ rpciod_down();
+ out_no_rpciod:
++ xprt_put(xprt);
+ return ERR_PTR(err);
+ }
+
+@@ -903,7 +900,9 @@ call_start(struct rpc_task *task)
+ (RPC_IS_ASYNC(task) ? "async" : "sync"));
+
+ /* Increment call count */
+- task->tk_msg.rpc_proc->p_count++;
++ pax_open_kernel();
++ (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
++ pax_close_kernel();
+ clnt->cl_stats->rpccnt++;
+ task->tk_action = call_reserve;
+ }
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index bfddd68..567429b 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -1059,6 +1059,8 @@ static struct file_system_type rpc_pipe_fs_type = {
+ .mount = rpc_mount,
+ .kill_sb = kill_litter_super,
+ };
++MODULE_ALIAS_FS("rpc_pipefs");
++MODULE_ALIAS("rpc_pipefs");
+
+ static void
+ init_once(void *foo)
+@@ -1104,6 +1106,3 @@ void unregister_rpc_pipefs(void)
+ kmem_cache_destroy(rpc_inode_cachep);
+ unregister_filesystem(&rpc_pipe_fs_type);
+ }
+-
+-/* Make 'mount -t rpc_pipefs ...' autoload this module. */
+-MODULE_ALIAS("rpc_pipefs");
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 206c61e..e3641fb 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
+ #ifdef RPC_DEBUG
+ static void rpc_task_set_debuginfo(struct rpc_task *task)
+ {
+- static atomic_t rpc_pid;
++ static atomic_unchecked_t rpc_pid;
+
+- task->tk_pid = atomic_inc_return(&rpc_pid);
++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
+ }
+ #else
+ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index c80c162..83a1e28 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -732,7 +732,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+
+ __module_get(serv->sv_module);
+ task = kthread_create_on_node(serv->sv_function, rqstp,
+- node, serv->sv_name);
++ node, "%s", serv->sv_name);
+ if (IS_ERR(task)) {
+ error = PTR_ERR(task);
+ module_put(serv->sv_module);
+@@ -1145,7 +1145,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ svc_putnl(resv, RPC_SUCCESS);
+
+ /* Bump per-procedure stats counter */
+- procp->pc_count++;
++ pax_open_kernel();
++ (*(unsigned int *)&procp->pc_count)++;
++ pax_close_kernel();
+
+ /* Initialize storage for argp and resp */
+ memset(rqstp->rq_argp, 0, procp->pc_argsize);
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index ce13632..144987d 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -602,7 +602,7 @@ struct cache_detail unix_gid_cache = {
+ .alloc = unix_gid_alloc,
+ };
+
+-static struct unix_gid *unix_gid_lookup(uid_t uid)
++static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(uid_t uid)
+ {
+ struct unix_gid ug;
+ struct cache_head *ch;
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 296192c..5a95b93 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
+ int buflen, unsigned int base)
+ {
+ size_t save_iovlen;
+- void __user *save_iovbase;
++ void *save_iovbase;
+ unsigned int i;
+ int ret;
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
+index 09af4fa..677025e 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma.c
++++ b/net/sunrpc/xprtrdma/svc_rdma.c
+@@ -47,6 +47,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/sunrpc/sched.h>
+ #include <linux/sunrpc/svc_rdma.h>
++#include "xprt_rdma.h"
+
+ #define RPCDBG_FACILITY RPCDBG_SVCXPRT
+
+@@ -61,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
+ static unsigned int min_max_inline = 4096;
+ static unsigned int max_max_inline = 65536;
+
+-atomic_t rdma_stat_recv;
+-atomic_t rdma_stat_read;
+-atomic_t rdma_stat_write;
+-atomic_t rdma_stat_sq_starve;
+-atomic_t rdma_stat_rq_starve;
+-atomic_t rdma_stat_rq_poll;
+-atomic_t rdma_stat_rq_prod;
+-atomic_t rdma_stat_sq_poll;
+-atomic_t rdma_stat_sq_prod;
++atomic_unchecked_t rdma_stat_recv;
++atomic_unchecked_t rdma_stat_read;
++atomic_unchecked_t rdma_stat_write;
++atomic_unchecked_t rdma_stat_sq_starve;
++atomic_unchecked_t rdma_stat_rq_starve;
++atomic_unchecked_t rdma_stat_rq_poll;
++atomic_unchecked_t rdma_stat_rq_prod;
++atomic_unchecked_t rdma_stat_sq_poll;
++atomic_unchecked_t rdma_stat_sq_prod;
+
+ /* Temporary NFS request map and context caches */
+ struct kmem_cache *svc_rdma_map_cachep;
+@@ -109,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
+ len -= *ppos;
+ if (len > *lenp)
+ len = *lenp;
+- if (len && copy_to_user(buffer, str_buf, len))
++ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += len;
+@@ -150,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
+ {
+ .procname = "rdma_stat_read",
+ .data = &rdma_stat_read,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_recv",
+ .data = &rdma_stat_recv,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_write",
+ .data = &rdma_stat_write,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_starve",
+ .data = &rdma_stat_sq_starve,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_starve",
+ .data = &rdma_stat_rq_starve,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_poll",
+ .data = &rdma_stat_rq_poll,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_rq_prod",
+ .data = &rdma_stat_rq_prod,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_poll",
+ .data = &rdma_stat_sq_poll,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+ {
+ .procname = "rdma_stat_sq_prod",
+ .data = &rdma_stat_sq_prod,
+- .maxlen = sizeof(atomic_t),
++ .maxlen = sizeof(atomic_unchecked_t),
+ .mode = 0644,
+ .proc_handler = read_reset_stat,
+ },
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+index 9530ef2..65b1462 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
+ struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
+
+ while (ch->rc_discrim != xdr_zero) {
+- u64 ch_offset;
+-
+ if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
+ (unsigned long)vaend) {
+ dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
+ return NULL;
+ }
+-
+- ch->rc_discrim = ntohl(ch->rc_discrim);
+- ch->rc_position = ntohl(ch->rc_position);
+- ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
+- ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
+- va = (u32 *)&ch->rc_target.rs_offset;
+- xdr_decode_hyper(va, &ch_offset);
+- put_unaligned(ch_offset, (u64 *)va);
+ ch++;
+ }
+ return (u32 *)&ch->rc_position;
+@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
+ *byte_count = 0;
+ *ch_count = 0;
+ for (; ch->rc_discrim != 0; ch++) {
+- *byte_count = *byte_count + ch->rc_target.rs_length;
++ *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
+ *ch_count = *ch_count + 1;
+ }
+ }
+@@ -108,7 +98,9 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
+ */
+ static u32 *decode_write_list(u32 *va, u32 *vaend)
+ {
+- int ch_no;
++ unsigned long start, end;
++ int nchunks;
++
+ struct rpcrdma_write_array *ary =
+ (struct rpcrdma_write_array *)va;
+
+@@ -121,37 +113,28 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
+ dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
+ return NULL;
+ }
+- ary->wc_discrim = ntohl(ary->wc_discrim);
+- ary->wc_nchunks = ntohl(ary->wc_nchunks);
+- if (((unsigned long)&ary->wc_array[0] +
+- (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) >
+- (unsigned long)vaend) {
++ nchunks = ntohl(ary->wc_nchunks);
++
++ start = (unsigned long)&ary->wc_array[0];
++ end = (unsigned long)vaend;
++ if (nchunks < 0 ||
++ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
++ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
+ dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
+- ary, ary->wc_nchunks, vaend);
++ ary, nchunks, vaend);
+ return NULL;
+ }
+- for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
+- u64 ch_offset;
+-
+- ary->wc_array[ch_no].wc_target.rs_handle =
+- ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
+- ary->wc_array[ch_no].wc_target.rs_length =
+- ntohl(ary->wc_array[ch_no].wc_target.rs_length);
+- va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
+- xdr_decode_hyper(va, &ch_offset);
+- put_unaligned(ch_offset, (u64 *)va);
+- }
+-
+ /*
+ * rs_length is the 2nd 4B field in wc_target and taking its
+ * address skips the list terminator
+ */
+- return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length;
++ return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
+ }
+
+ static u32 *decode_reply_array(u32 *va, u32 *vaend)
+ {
+- int ch_no;
++ unsigned long start, end;
++ int nchunks;
+ struct rpcrdma_write_array *ary =
+ (struct rpcrdma_write_array *)va;
+
+@@ -164,28 +147,18 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
+ dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
+ return NULL;
+ }
+- ary->wc_discrim = ntohl(ary->wc_discrim);
+- ary->wc_nchunks = ntohl(ary->wc_nchunks);
+- if (((unsigned long)&ary->wc_array[0] +
+- (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) >
+- (unsigned long)vaend) {
++ nchunks = ntohl(ary->wc_nchunks);
++
++ start = (unsigned long)&ary->wc_array[0];
++ end = (unsigned long)vaend;
++ if (nchunks < 0 ||
++ nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
++ (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
+ dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
+- ary, ary->wc_nchunks, vaend);
++ ary, nchunks, vaend);
+ return NULL;
+ }
+- for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
+- u64 ch_offset;
+-
+- ary->wc_array[ch_no].wc_target.rs_handle =
+- ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
+- ary->wc_array[ch_no].wc_target.rs_length =
+- ntohl(ary->wc_array[ch_no].wc_target.rs_length);
+- va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
+- xdr_decode_hyper(va, &ch_offset);
+- put_unaligned(ch_offset, (u64 *)va);
+- }
+-
+- return (u32 *)&ary->wc_array[ch_no];
++ return (u32 *)&ary->wc_array[nchunks];
+ }
+
+ int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
+@@ -386,13 +359,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
+
+ void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
+ int chunk_no,
+- u32 rs_handle, u64 rs_offset,
++ __be32 rs_handle,
++ __be64 rs_offset,
+ u32 write_len)
+ {
+ struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
+- seg->rs_handle = htonl(rs_handle);
++ seg->rs_handle = rs_handle;
++ seg->rs_offset = rs_offset;
+ seg->rs_length = htonl(write_len);
+- xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
+ }
+
+ void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index df67211..c4a1489 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
+ page_off = 0;
+ ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+ ch_no = 0;
+- ch_bytes = ch->rc_target.rs_length;
++ ch_bytes = ntohl(ch->rc_target.rs_length);
+ head->arg.head[0] = rqstp->rq_arg.head[0];
+ head->arg.tail[0] = rqstp->rq_arg.tail[0];
+ head->arg.pages = &head->pages[head->count];
+@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
+ ch_no++;
+ ch++;
+ chl_map->ch[ch_no].start = sge_no;
+- ch_bytes = ch->rc_target.rs_length;
++ ch_bytes = ntohl(ch->rc_target.rs_length);
+ /* If bytes remaining account for next chunk */
+ if (byte_count) {
+ head->arg.page_len += ch_bytes;
+@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
+ offset = 0;
+ ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+ for (ch_no = 0; ch_no < ch_count; ch_no++) {
++ int len = ntohl(ch->rc_target.rs_length);
+ rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
+- rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length;
++ rpl_map->sge[ch_no].iov_len = len;
+ chl_map->ch[ch_no].count = 1;
+ chl_map->ch[ch_no].start = ch_no;
+- offset += ch->rc_target.rs_length;
++ offset += len;
+ ch++;
+ }
+
+@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
+ for (i = 0; i < count; i++) {
+ ctxt->sge[i].length = 0; /* in case map fails */
+ if (!frmr) {
+- BUG_ON(0 == virt_to_page(vec[i].iov_base));
++ BUG_ON(!virt_to_page(vec[i].iov_base));
+ off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
+ ctxt->sge[i].addr =
+ ib_dma_map_page(xprt->sc_cm_id->device,
+@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
+
+ for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+ ch->rc_discrim != 0; ch++, ch_no++) {
++ u64 rs_offset;
+ next_sge:
+ ctxt = svc_rdma_get_context(xprt);
+ ctxt->direction = DMA_FROM_DEVICE;
+@@ -440,10 +442,10 @@ next_sge:
+ read_wr.opcode = IB_WR_RDMA_READ;
+ ctxt->wr_op = read_wr.opcode;
+ read_wr.send_flags = IB_SEND_SIGNALED;
+- read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
+- read_wr.wr.rdma.remote_addr =
+- get_unaligned(&(ch->rc_target.rs_offset)) +
+- sgl_offset;
++ read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
++ xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
++ &rs_offset);
++ read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
+ read_wr.sg_list = ctxt->sge;
+ read_wr.num_sge =
+ rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
+@@ -499,7 +501,7 @@ next_sge:
+ svc_rdma_put_context(ctxt, 0);
+ goto out;
+ }
+- atomic_inc(&rdma_stat_read);
++ atomic_inc_unchecked(&rdma_stat_read);
+
+ if (read_wr.num_sge < chl_map->ch[ch_no].count) {
+ chl_map->ch[ch_no].count -= read_wr.num_sge;
+@@ -609,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ dto_q);
+ list_del_init(&ctxt->dto_q);
+ } else {
+- atomic_inc(&rdma_stat_rq_starve);
++ atomic_inc_unchecked(&rdma_stat_rq_starve);
+ clear_bit(XPT_DATA, &xprt->xpt_flags);
+ ctxt = NULL;
+ }
+@@ -629,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
+ dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
+ ctxt, rdma_xprt, rqstp, ctxt->wc_status);
+ BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
+- atomic_inc(&rdma_stat_recv);
++ atomic_inc_unchecked(&rdma_stat_recv);
+
+ /* Build up the XDR from the receive buffers. */
+ rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 249a835..c887c45 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
+ write_wr.wr.rdma.remote_addr = to;
+
+ /* Post It */
+- atomic_inc(&rdma_stat_write);
++ atomic_inc_unchecked(&rdma_stat_write);
+ if (svc_rdma_send(xprt, &write_wr))
+ goto err;
+ return 0;
+@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
+ u64 rs_offset;
+
+ arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
+- write_len = min(xfer_len, arg_ch->rs_length);
++ write_len = min(xfer_len, ntohl(arg_ch->rs_length));
+
+ /* Prepare the response chunk given the length actually
+ * written */
+- rs_offset = get_unaligned(&(arg_ch->rs_offset));
++ xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
+ svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
+- arg_ch->rs_handle,
+- rs_offset,
+- write_len);
++ arg_ch->rs_handle,
++ arg_ch->rs_offset,
++ write_len);
+ chunk_off = 0;
+ while (write_len) {
+ int this_write;
+ this_write = min(write_len, max_write);
+ ret = send_write(xprt, rqstp,
+- arg_ch->rs_handle,
++ ntohl(arg_ch->rs_handle),
+ rs_offset + chunk_off,
+ xdr_off,
+ this_write,
+@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
+ u32 xdr_off;
+ int chunk_no;
+ int chunk_off;
++ int nchunks;
+ struct rpcrdma_segment *ch;
+ struct rpcrdma_write_array *arg_ary;
+ struct rpcrdma_write_array *res_ary;
+@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
+ max_write = xprt->sc_max_sge * PAGE_SIZE;
+
+ /* xdr offset starts at RPC message */
++ nchunks = ntohl(arg_ary->wc_nchunks);
+ for (xdr_off = 0, chunk_no = 0;
+- xfer_len && chunk_no < arg_ary->wc_nchunks;
++ xfer_len && chunk_no < nchunks;
+ chunk_no++) {
+ u64 rs_offset;
+ ch = &arg_ary->wc_array[chunk_no].wc_target;
+- write_len = min(xfer_len, ch->rs_length);
++ write_len = min(xfer_len, htonl(ch->rs_length));
+
+ /* Prepare the reply chunk given the length actually
+ * written */
+- rs_offset = get_unaligned(&(ch->rs_offset));
++ xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
+ svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
+- ch->rs_handle, rs_offset,
+- write_len);
++ ch->rs_handle, ch->rs_offset,
++ write_len);
+ chunk_off = 0;
+ while (write_len) {
+ int this_write;
+
+ this_write = min(write_len, max_write);
+ ret = send_write(xprt, rqstp,
+- ch->rs_handle,
++ ntohl(ch->rs_handle),
+ rs_offset + chunk_off,
+ xdr_off,
+ this_write,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index ba1296d..515ea15 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -51,6 +51,7 @@
+ #include <rdma/rdma_cm.h>
+ #include <linux/sunrpc/svc_rdma.h>
+ #include <linux/export.h>
++#include "xprt_rdma.h"
+
+ #define RPCDBG_FACILITY RPCDBG_SVCXPRT
+
+@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+ };
+
+-/* WR context cache. Created in svc_rdma.c */
+-extern struct kmem_cache *svc_rdma_ctxt_cachep;
+-
+-/* Workqueue created in svc_rdma.c */
+-extern struct workqueue_struct *svc_rdma_wq;
+-
+ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+ {
+ struct svc_rdma_op_ctxt *ctxt;
+@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
+ atomic_dec(&xprt->sc_ctxt_used);
+ }
+
+-/* Temporary NFS request map cache. Created in svc_rdma.c */
+-extern struct kmem_cache *svc_rdma_map_cachep;
+-
+ /*
+ * Temporary NFS req mappings are shared across all transport
+ * instances. These are short lived and should be bounded by the number
+@@ -300,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
+ return;
+
+ ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
+- atomic_inc(&rdma_stat_rq_poll);
++ atomic_inc_unchecked(&rdma_stat_rq_poll);
+
+ while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
+ ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
+@@ -322,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
+ }
+
+ if (ctxt)
+- atomic_inc(&rdma_stat_rq_prod);
++ atomic_inc_unchecked(&rdma_stat_rq_prod);
+
+ set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+ /*
+@@ -394,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
+ return;
+
+ ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
+- atomic_inc(&rdma_stat_sq_poll);
++ atomic_inc_unchecked(&rdma_stat_sq_poll);
+ while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
+ if (wc.status != IB_WC_SUCCESS)
+ /* Close the transport */
+@@ -412,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
+ }
+
+ if (ctxt)
+- atomic_inc(&rdma_stat_sq_prod);
++ atomic_inc_unchecked(&rdma_stat_sq_prod);
+ }
+
+ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
+@@ -1274,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+ spin_lock_bh(&xprt->sc_lock);
+ if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
+ spin_unlock_bh(&xprt->sc_lock);
+- atomic_inc(&rdma_stat_sq_starve);
++ atomic_inc_unchecked(&rdma_stat_sq_starve);
+
+ /* See if we can opportunistically reap SQ WR to make room */
+ sq_cq_reap(xprt);
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 08c5d5a..9a66c95 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
+ */
+ int rpcrdma_marshal_req(struct rpc_rqst *);
+
++/* Temporary NFS request map cache. Created in svc_rdma.c */
++extern struct kmem_cache *svc_rdma_map_cachep;
++/* WR context cache. Created in svc_rdma.c */
++extern struct kmem_cache *svc_rdma_ctxt_cachep;
++/* Workqueue created in svc_rdma.c */
++extern struct workqueue_struct *svc_rdma_wq;
++
+ #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
+diff --git a/net/sysctl_net.c b/net/sysctl_net.c
+index e758139..d29ea47 100644
+--- a/net/sysctl_net.c
++++ b/net/sysctl_net.c
+@@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
+ struct ctl_table *table)
+ {
+ /* Allow network administrator to have same access as root. */
+- if (capable(CAP_NET_ADMIN)) {
++ if (capable_nolog(CAP_NET_ADMIN)) {
+ int mode = (table->mode >> 6) & 7;
+ return (mode << 6) | (mode << 3) | mode;
+ }
+diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
+index a224a38..c31d40a 100644
+--- a/net/tipc/eth_media.c
++++ b/net/tipc/eth_media.c
+@@ -58,7 +58,6 @@ struct eth_bearer {
+
+ static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+ static int eth_started;
+-static struct notifier_block notifier;
+
+ /**
+ * send_msg - send a TIPC message out over an Ethernet interface
+@@ -277,6 +276,11 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
+ * with OS for notifications about device state changes.
+ */
+
++static struct notifier_block notifier = {
++ .notifier_call = &recv_notification,
++ .priority = 0,
++};
++
+ int tipc_eth_media_start(void)
+ {
+ struct tipc_media_addr bcast_addr;
+@@ -297,8 +301,6 @@ int tipc_eth_media_start(void)
+ if (res)
+ return res;
+
+- notifier.notifier_call = &recv_notification;
+- notifier.priority = 0;
+ res = register_netdevice_notifier(&notifier);
+ if (!res)
+ eth_started = 1;
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index ae98a72..22f4de0 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
+ struct tipc_msg fragm_hdr;
+ struct sk_buff *buf, *buf_chain, *prev;
+ u32 fragm_crs, fragm_rest, hsz, sect_rest;
+- const unchar *sect_crs;
++ const unchar __user *sect_crs;
+ int curr_sect;
+ u32 fragm_no;
+
+@@ -1247,7 +1247,7 @@ again:
+
+ if (!sect_rest) {
+ sect_rest = msg_sect[++curr_sect].iov_len;
+- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
++ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
+ }
+
+ if (sect_rest < fragm_rest)
+@@ -1266,7 +1266,7 @@ error:
+ }
+ } else
+ skb_copy_to_linear_data_offset(buf, fragm_crs,
+- sect_crs, sz);
++ (const void __force_kernel *)sect_crs, sz);
+ sect_crs += sz;
+ sect_rest -= sz;
+ fragm_crs += sz;
+@@ -2367,8 +2367,11 @@ static int link_recv_changeover_msg(struct link **l_ptr,
+ struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
+ u32 msg_typ = msg_type(tunnel_msg);
+ u32 msg_count = msg_msgcnt(tunnel_msg);
++ u32 bearer_id = msg_bearer_id(tunnel_msg);
+
+- dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
++ if (bearer_id >= MAX_BEARERS)
++ goto exit;
++ dest_link = (*l_ptr)->owner->links[bearer_id];
+ if (!dest_link)
+ goto exit;
+ if (dest_link == *l_ptr) {
+@@ -2601,14 +2604,16 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
+ struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
+ u32 msg_sz = msg_size(imsg);
+ u32 fragm_sz = msg_data_sz(fragm);
+- u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
++ u32 exp_fragm_cnt;
+ u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
++
+ if (msg_type(imsg) == TIPC_MCAST_MSG)
+ max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
+- if (msg_size(imsg) > max) {
++ if (fragm_sz == 0 || msg_size(imsg) > max) {
+ buf_discard(fbuf);
+ return 0;
+ }
++ exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
+ pbuf = tipc_buf_acquire(msg_size(imsg));
+ if (pbuf != NULL) {
+ pbuf->next = *pending;
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 83d5096..dcba497 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
+ msg_sect[cnt].iov_len);
+ else
+ skb_copy_to_linear_data_offset(*buf, pos,
+- msg_sect[cnt].iov_base,
++ (const void __force_kernel *)msg_sect[cnt].iov_base,
+ msg_sect[cnt].iov_len);
+ pos += msg_sect[cnt].iov_len;
+ }
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index 1983717..4d6102c 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
+ {
+ struct iovec msg_sect;
+
+- msg_sect.iov_base = (void *)&sub->evt;
++ msg_sect.iov_base = (void __force_user *)&sub->evt;
+ msg_sect.iov_len = sizeof(struct tipc_event);
+
+ sub->evt.event = htohl(event, sub->swap);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9338ccc..9bc732b 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -766,6 +766,12 @@ static struct sock *unix_find_other(struct net *net,
+ err = -ECONNREFUSED;
+ if (!S_ISSOCK(inode->i_mode))
+ goto put_fail;
++
++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
++ err = -EACCES;
++ goto put_fail;
++ }
++
+ u = unix_find_socket_byinode(inode);
+ if (!u)
+ goto put_fail;
+@@ -786,6 +792,13 @@ static struct sock *unix_find_other(struct net *net,
+ if (u) {
+ struct dentry *dentry;
+ dentry = unix_sk(u)->dentry;
++
++ if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
++ err = -EPERM;
++ sock_put(u);
++ goto fail;
++ }
++
+ if (dentry)
+ touch_atime(unix_sk(u)->mnt, dentry);
+ } else
+@@ -868,11 +881,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ err = security_path_mknod(&path, dentry, mode, 0);
+ if (err)
+ goto out_mknod_drop_write;
++ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
++ err = -EACCES;
++ goto out_mknod_drop_write;
++ }
+ err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
+ out_mknod_drop_write:
+ mnt_drop_write(path.mnt);
+ if (err)
+ goto out_mknod_dput;
++
++ gr_handle_create(dentry, path.mnt);
++
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ dput(path.dentry);
+ path.dentry = dentry;
+@@ -2265,9 +2285,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+ seq_puts(seq, "Num RefCount Protocol Flags Type St "
+ "Inode Path\n");
+ else {
+- struct sock *s = v;
++ struct sock *s = v, *peer;
+ struct unix_sock *u = unix_sk(s);
+ unix_state_lock(s);
++ peer = unix_peer(s);
++ unix_state_unlock(s);
++
++ unix_state_double_lock(s, peer);
+
+ seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
+ s,
+@@ -2294,8 +2318,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+ }
+ for ( ; i < len; i++)
+ seq_putc(seq, u->addr->name->sun_path[i]);
+- }
+- unix_state_unlock(s);
++ } else if (peer)
++ seq_printf(seq, " P%lu", sock_i_ino(peer));
++
++ unix_state_double_unlock(s, peer);
+ seq_putc(seq, '\n');
+ }
+
+diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
+index 397cffe..405fdb1 100644
+--- a/net/unix/sysctl_net_unix.c
++++ b/net/unix/sysctl_net_unix.c
+@@ -34,7 +34,7 @@ static struct ctl_path unix_path[] = {
+
+ int __net_init unix_sysctl_register(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
+ if (table == NULL)
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index 0af7f54..c916d2f 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
+ */
+
+ /* Support for very large requests */
+- if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
+- (user_length > descr->max_tokens)) {
++ if (user_length > descr->max_tokens) {
+ /* Allow userspace to GET more than max so
+ * we can support any size GET requests.
+ * There is still a limit : -ENOMEM.
+@@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
+ }
+ }
+
+- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+- /*
+- * If this is a GET, but not NOMAX, it means that the extra
+- * data is not bounded by userspace, but by max_tokens. Thus
+- * set the length to max_tokens. This matches the extra data
+- * allocation.
+- * The driver should fill it with the number of tokens it
+- * provided, and it may check iwp->length rather than having
+- * knowledge of max_tokens. If the driver doesn't change the
+- * iwp->length, this ioctl just copies back max_token tokens
+- * filled with zeroes. Hopefully the driver isn't claiming
+- * them to be valid data.
+- */
+- iwp->length = descr->max_tokens;
+- }
+-
+ err = handler(dev, info, (union iwreq_data *) iwp, extra);
+
+ iwp->length += essid_compat;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 113d20e..2bb5a4e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
+ {
+ policy->walk.dead = 1;
+
+- atomic_inc(&policy->genid);
++ atomic_inc_unchecked(&policy->genid);
+
+ if (del_timer(&policy->timer))
+ xfrm_pol_put(policy);
+@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
+ hlist_add_head(&policy->bydst, chain);
+ xfrm_pol_hold(policy);
+ net->xfrm.policy_count[dir]++;
+- atomic_inc(&flow_cache_genid);
++ atomic_inc_unchecked(&flow_cache_genid);
+ if (delpol)
+ __xfrm_policy_unlink(delpol, dir);
+ policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
+@@ -1530,7 +1530,7 @@ free_dst:
+ goto out;
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_alloc_copy(void **target, const void *src, int size)
+ {
+ if (!*target) {
+@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
+ return 0;
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
+ #endif
+ }
+
+-static int inline
++static inline int
+ xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
+ {
+ #ifdef CONFIG_XFRM_SUB_POLICY
+@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
+
+ xdst->num_pols = num_pols;
+ memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
+- xdst->policy_genid = atomic_read(&pols[0]->genid);
++ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
+
+ return xdst;
+ }
+@@ -2297,11 +2297,12 @@ static void xfrm_garbage_collect(struct net *net)
+ __xfrm_garbage_collect(net);
+ }
+
+-static void xfrm_garbage_collect_deferred(struct net *net)
++void xfrm_garbage_collect_deferred(struct net *net)
+ {
+ flow_cache_flush_deferred();
+ __xfrm_garbage_collect(net);
+ }
++EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
+
+ static void xfrm_init_pmtu(struct dst_entry *dst)
+ {
+@@ -2348,7 +2349,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
+ if (xdst->xfrm_genid != dst->xfrm->genid)
+ return 0;
+ if (xdst->num_pols > 0 &&
+- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
++ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
+ return 0;
+
+ mtu = dst_mtu(dst->child);
+@@ -2434,8 +2435,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+ dst_ops->link_failure = xfrm_link_failure;
+ if (likely(dst_ops->neigh_lookup == NULL))
+ dst_ops->neigh_lookup = xfrm_neigh_lookup;
+- if (likely(afinfo->garbage_collect == NULL))
+- afinfo->garbage_collect = xfrm_garbage_collect_deferred;
+ xfrm_policy_afinfo[afinfo->family] = afinfo;
+ }
+ write_unlock_bh(&xfrm_policy_afinfo_lock);
+@@ -2482,7 +2481,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
+ dst_ops->check = NULL;
+ dst_ops->negative_advice = NULL;
+ dst_ops->link_failure = NULL;
+- afinfo->garbage_collect = NULL;
+ }
+ }
+ write_unlock_bh(&xfrm_policy_afinfo_lock);
+@@ -2692,7 +2690,7 @@ static void __net_exit xfrm_net_exit(struct net *net)
+ xfrm_statistics_fini(net);
+ }
+
+-static struct pernet_operations __net_initdata xfrm_net_ops = {
++static struct pernet_operations __net_initconst xfrm_net_ops = {
+ .init = xfrm_net_init,
+ .exit = xfrm_net_exit,
+ };
+@@ -2885,7 +2883,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
+ sizeof(pol->xfrm_vec[i].saddr));
+ pol->xfrm_vec[i].encap_family = mp->new_family;
+ /* flush bundles */
+- atomic_inc(&pol->genid);
++ atomic_inc_unchecked(&pol->genid);
+ }
+ }
+
+diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
+index 3efb07d..2576ee4 100644
+--- a/net/xfrm/xfrm_replay.c
++++ b/net/xfrm/xfrm_replay.c
+@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
+ return 0;
+
+ diff = x->replay.seq - seq;
+- if (diff >= min_t(unsigned int, x->props.replay_window,
+- sizeof(x->replay.bitmap) * 8)) {
++ if (diff >= x->props.replay_window) {
+ x->stats.replay_window++;
+ goto err;
+ }
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 9414b9c..2477932 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -194,11 +194,13 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
+
+ if (unlikely(afinfo == NULL))
+ return -EAFNOSUPPORT;
+- typemap = afinfo->type_map;
++ typemap = (const struct xfrm_type **)afinfo->type_map;
+
+- if (likely(typemap[type->proto] == NULL))
++ if (likely(typemap[type->proto] == NULL)) {
++ pax_open_kernel();
+ typemap[type->proto] = type;
+- else
++ pax_close_kernel();
++ } else
+ err = -EEXIST;
+ xfrm_state_unlock_afinfo(afinfo);
+ return err;
+@@ -213,12 +215,15 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
+
+ if (unlikely(afinfo == NULL))
+ return -EAFNOSUPPORT;
+- typemap = afinfo->type_map;
++ typemap = (const struct xfrm_type **)afinfo->type_map;
+
+ if (unlikely(typemap[type->proto] != type))
+ err = -ENOENT;
+- else
++ else {
++ pax_open_kernel();
+ typemap[type->proto] = NULL;
++ pax_close_kernel();
++ }
+ xfrm_state_unlock_afinfo(afinfo);
+ return err;
+ }
+@@ -227,7 +232,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
+ static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
+ {
+ struct xfrm_state_afinfo *afinfo;
+- const struct xfrm_type **typemap;
+ const struct xfrm_type *type;
+ int modload_attempted = 0;
+
+@@ -235,9 +239,8 @@ retry:
+ afinfo = xfrm_state_get_afinfo(family);
+ if (unlikely(afinfo == NULL))
+ return NULL;
+- typemap = afinfo->type_map;
+
+- type = typemap[proto];
++ type = afinfo->type_map[proto];
+ if (unlikely(type && !try_module_get(type->owner)))
+ type = NULL;
+ if (!type && !modload_attempted) {
+@@ -270,7 +273,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
+ return -EAFNOSUPPORT;
+
+ err = -EEXIST;
+- modemap = afinfo->mode_map;
++ modemap = (struct xfrm_mode **)afinfo->mode_map;
+ if (modemap[mode->encap])
+ goto out;
+
+@@ -278,8 +281,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
+ if (!try_module_get(afinfo->owner))
+ goto out;
+
+- mode->afinfo = afinfo;
++ pax_open_kernel();
++ *(const void **)&mode->afinfo = afinfo;
+ modemap[mode->encap] = mode;
++ pax_close_kernel();
+ err = 0;
+
+ out:
+@@ -302,9 +307,11 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
+ return -EAFNOSUPPORT;
+
+ err = -ENOENT;
+- modemap = afinfo->mode_map;
++ modemap = (struct xfrm_mode **)afinfo->mode_map;
+ if (likely(modemap[mode->encap] == mode)) {
++ pax_open_kernel();
+ modemap[mode->encap] = NULL;
++ pax_close_kernel();
+ module_put(mode->afinfo->owner);
+ err = 0;
+ }
+@@ -1497,10 +1504,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
+ u32 xfrm_get_acqseq(void)
+ {
+ u32 res;
+- static atomic_t acqseq;
++ static atomic_unchecked_t acqseq;
+
+ do {
+- res = atomic_inc_return(&acqseq);
++ res = atomic_inc_return_unchecked(&acqseq);
+ } while (!res);
+
+ return res;
+@@ -1985,8 +1992,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
+ goto error;
+
+ x->outer_mode = xfrm_get_mode(x->props.mode, family);
+- if (x->outer_mode == NULL)
++ if (x->outer_mode == NULL) {
++ err = -EPROTONOSUPPORT;
+ goto error;
++ }
+
+ if (init_replay) {
+ err = xfrm_init_replay(x);
+diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
+index 05640bc..b67eaaa 100644
+--- a/net/xfrm/xfrm_sysctl.c
++++ b/net/xfrm/xfrm_sysctl.c
+@@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
+
+ int __net_init xfrm_sysctl_init(struct net *net)
+ {
+- struct ctl_table *table;
++ ctl_table_no_const *table;
+
+ __xfrm_sysctl_init(net);
+
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index ede01a8..756e6bd 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
+ memcpy(&x->sel, &p->sel, sizeof(x->sel));
+ memcpy(&x->lft, &p->lft, sizeof(x->lft));
+ x->props.mode = p->mode;
+- x->props.replay_window = p->replay_window;
++ x->props.replay_window = min_t(unsigned int, p->replay_window,
++ sizeof(x->replay.bitmap) * 8);
+ x->props.reqid = p->reqid;
+ x->props.family = p->family;
+ memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
+@@ -1816,7 +1817,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (x->km.state != XFRM_STATE_VALID)
+ goto out;
+
+- err = xfrm_replay_verify_len(x->replay_esn, rp);
++ err = xfrm_replay_verify_len(x->replay_esn, re);
+ if (err)
+ goto out;
+
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index d2b366c..2d5a6f8 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -109,7 +109,7 @@ endif
+ endif
+
+ # Do not include host rules unless needed
+-ifneq ($(hostprogs-y)$(hostprogs-m),)
++ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
+ include scripts/Makefile.host
+ endif
+
+diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
+index 686cb0d..9d653bf 100644
+--- a/scripts/Makefile.clean
++++ b/scripts/Makefile.clean
+@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
+ __clean-files := $(extra-y) $(always) \
+ $(targets) $(clean-files) \
+ $(host-progs) \
+- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
++ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
++ $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
+
+ __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
+
+diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
+index a57f5bd..d3bae5e 100644
+--- a/scripts/Makefile.headersinst
++++ b/scripts/Makefile.headersinst
+@@ -4,12 +4,16 @@
+ # header-y - list files to be installed. They are preprocessed
+ # to remove __KERNEL__ section of the file
+ # objhdr-y - Same as header-y but for generated files
++# genhdr-y - Same as objhdr-y but in a generated/ directory
+ #
+ # ==========================================================================
+
+ # called may set destination dir (when installing to asm/)
+ _dst := $(if $(dst),$(dst),$(obj))
+
++# generated header directory
++gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
++
+ kbuild-file := $(srctree)/$(obj)/Kbuild
+ include $(kbuild-file)
+
+@@ -33,9 +37,10 @@ wrapper-files := $(filter $(header-y), $(generic-y))
+
+ # all headers files for this dir
+ header-y := $(filter-out $(generic-y), $(header-y))
+-all-files := $(header-y) $(objhdr-y) $(wrapper-files)
++all-files := $(header-y) $(objhdr-y) $(genhdr-y) $(wrapper-files)
+ input-files := $(addprefix $(srctree)/$(obj)/,$(header-y)) \
+- $(addprefix $(objtree)/$(obj)/,$(objhdr-y))
++ $(addprefix $(objtree)/$(obj)/,$(objhdr-y)) \
++ $(addprefix $(objtree)/$(gen)/,$(genhdr-y))
+ output-files := $(addprefix $(install)/, $(all-files))
+
+ # Work out what needs to be removed
+@@ -52,6 +57,7 @@ quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\
+ cmd_install = \
+ $(PERL) $< $(srctree)/$(obj) $(install) $(SRCARCH) $(header-y); \
+ $(PERL) $< $(objtree)/$(obj) $(install) $(SRCARCH) $(objhdr-y); \
++ $(PERL) $< $(objtree)/$(gen) $(install) $(SRCARCH) $(genhdr-y); \
+ for F in $(wrapper-files); do \
+ echo "\#include <asm-generic/$$F>" > $(install)/$$F; \
+ done; \
+diff --git a/scripts/Makefile.host b/scripts/Makefile.host
+index 1ac414f..38575f7 100644
+--- a/scripts/Makefile.host
++++ b/scripts/Makefile.host
+@@ -31,6 +31,8 @@
+ # Note: Shared libraries consisting of C++ files are not supported
+
+ __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
++__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
++__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
+
+ # C code
+ # Executables compiled from a single .c file
+@@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
+ # Shared libaries (only .c supported)
+ # Shared libraries (.so) - all .so files referenced in "xxx-objs"
+ host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
++host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
++host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
+ # Remove .so files from "xxx-objs"
+ host-cobjs := $(filter-out %.so,$(host-cobjs))
++host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
+
+-#Object (.o) files used by the shared libaries
++# Object (.o) files used by the shared libaries
+ host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
++host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
+
+ # output directory for programs/.o files
+ # hostprogs-y := tools/build may have been specified. Retrieve directory
+@@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
+ host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
+ host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
+ host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
++host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
+ host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
++host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
+ host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
+
+ obj-dirs += $(host-objdirs)
+@@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
+ $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
+ $(call if_changed_dep,host-cshobjs)
+
++# Compile .c file, create position independent .o file
++# host-cxxshobjs -> .o
++quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
++ cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
++$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
++ $(call if_changed_dep,host-cxxshobjs)
++
+ # Link a shared library, based on position independent .o files
+ # *.o -> .so shared library (host-cshlib)
+ quiet_cmd_host-cshlib = HOSTLLD -shared $@
+@@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
+ $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
+ $(call if_changed,host-cshlib)
+
++# Link a shared library, based on position independent .o files
++# *.o -> .so shared library (host-cxxshlib)
++quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
++ cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
++ $(addprefix $(obj)/,$($(@F:.so=-objs))) \
++ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
++$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
++ $(call if_changed,host-cxxshlib)
++
+ targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
+- $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
++ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
+
+diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
+index cb1f50c..cef2a7c 100644
+--- a/scripts/basic/fixdep.c
++++ b/scripts/basic/fixdep.c
+@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
+ /*
+ * Lookup a value in the configuration string.
+ */
+-static int is_defined_config(const char *name, int len, unsigned int hash)
++static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
+ {
+ struct item *aux;
+
+@@ -211,10 +211,10 @@ static void clear_config(void)
+ /*
+ * Record the use of a CONFIG_* word.
+ */
+-static void use_config(const char *m, int slen)
++static void use_config(const char *m, unsigned int slen)
+ {
+ unsigned int hash = strhash(m, slen);
+- int c, i;
++ unsigned int c, i;
+
+ if (is_defined_config(m, slen, hash))
+ return;
+@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
+
+ static void parse_config_file(const char *map, size_t len)
+ {
+- const int *end = (const int *) (map + len);
++ const unsigned int *end = (const unsigned int *) (map + len);
+ /* start at +1, so that p can never be < map */
+- const int *m = (const int *) map + 1;
++ const unsigned int *m = (const unsigned int *) map + 1;
+ const char *p, *q;
+
+ for (; m < end; m++) {
+@@ -406,7 +406,7 @@ static void print_deps(void)
+ static void traps(void)
+ {
+ static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
+- int *p = (int *)test;
++ unsigned int *p = (unsigned int *)test;
+
+ if (*p != INT_CONF) {
+ fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
+diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
+new file mode 100644
+index 0000000..5e0222d
+--- /dev/null
++++ b/scripts/gcc-plugin.sh
+@@ -0,0 +1,17 @@
++#!/bin/bash
++plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
++#include "gcc-plugin.h"
++#include "tree.h"
++#include "tm.h"
++#include "rtl.h"
++#ifdef ENABLE_BUILD_WITH_CXX
++#warning $2
++#else
++#warning $1
++#endif
++EOF`
++if [ $? -eq 0 ]
++then
++ [[ "$plugincc" =~ "$1" ]] && echo "$1"
++ [[ "$plugincc" =~ "$2" ]] && echo "$2"
++fi
+diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
+index 48462be..3e08f94 100644
+--- a/scripts/headers_install.pl
++++ b/scripts/headers_install.pl
+@@ -33,6 +33,7 @@ foreach my $file (@files) {
+ $line =~ s/([\s(])__user\s/$1/g;
+ $line =~ s/([\s(])__force\s/$1/g;
+ $line =~ s/([\s(])__iomem\s/$1/g;
++ $line =~ s/(\s?)__intentional_overflow\([-\d\s,]*\)\s?/$1/g;
+ $line =~ s/\s__attribute_const__\s/ /g;
+ $line =~ s/\s__attribute_const__$//g;
+ $line =~ s/\b__packed\b/__attribute__((packed))/g;
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index d1d0ae8..6b73b2a 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
+ unsigned long size, unsigned long id_size,
+ void *symval)
+ {
+- int i;
++ unsigned int i;
+
+ if (size % id_size || size < id_size) {
+ if (cross_build != 0)
+@@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
+ /* USB is special because the bcdDevice can be matched against a numeric range */
+ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
+ static void do_usb_entry(struct usb_device_id *id,
+- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
+ unsigned char range_lo, unsigned char range_hi,
+ unsigned char max, struct module *mod)
+ {
+@@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
+ {
+ unsigned int devlo, devhi;
+ unsigned char chi, clo, max;
+- int ndigits;
++ unsigned int ndigits;
+
+ id->match_flags = TO_NATIVE(id->match_flags);
+ id->idVendor = TO_NATIVE(id->idVendor);
+@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
+ for (i = 0; i < count; i++) {
+ const char *id = (char *)devs[i].id;
+ char acpi_id[sizeof(devs[0].id)];
+- int j;
++ unsigned int j;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
+
+ for (j = 0; j < PNP_MAX_DEVICES; j++) {
+ const char *id = (char *)card->devs[j].id;
+- int i2, j2;
++ unsigned int i2, j2;
+ int dup = 0;
+
+ if (!id[0])
+@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
+ /* add an individual alias for every device entry */
+ if (!dup) {
+ char acpi_id[sizeof(card->devs[0].id)];
+- int k;
++ unsigned int k;
+
+ buf_printf(&mod->dev_table_buf,
+ "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+@@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
+ static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
+ char *alias)
+ {
+- int i, j;
++ unsigned int i, j;
+
+ sprintf(alias, "dmi*");
+
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 619228d..bf61bbb 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -922,6 +922,7 @@ enum mismatch {
+ ANY_INIT_TO_ANY_EXIT,
+ ANY_EXIT_TO_ANY_INIT,
+ EXPORT_TO_INIT_EXIT,
++ DATA_TO_TEXT
+ };
+
+ struct sectioncheck {
+@@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
+ .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
+ .mismatch = EXPORT_TO_INIT_EXIT,
+ .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
++},
++/* Do not reference code from writable data */
++{
++ .fromsec = { DATA_SECTIONS, NULL },
++ .tosec = { TEXT_SECTIONS, NULL },
++ .mismatch = DATA_TO_TEXT
+ }
+ };
+
+@@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
+- if (sym->st_value == addr)
+- return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+ d = sym->st_value - addr;
++ if (d == 0)
++ return sym;
+ if (d < 0)
+ d = addr - sym->st_value;
+ if (d < distance) {
+@@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
+ tosym, prl_to, prl_to, tosym);
+ free(prl_to);
+ break;
++ case DATA_TO_TEXT:
++#if 0
++ fprintf(stderr,
++ "The %s %s:%s references\n"
++ "the %s %s:%s%s\n",
++ from, fromsec, fromsym, to, tosec, tosym, to_p);
++#endif
++ break;
+ }
+ fprintf(stderr, "\n");
+ }
+@@ -1659,7 +1674,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
+ static void check_sec_ref(struct module *mod, const char *modname,
+ struct elf_info *elf)
+ {
+- int i;
++ unsigned int i;
+ Elf_Shdr *sechdrs = elf->sechdrs;
+
+ /* Walk through all sections */
+@@ -1757,7 +1772,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
+ va_end(ap);
+ }
+
+-void buf_write(struct buffer *buf, const char *s, int len)
++void buf_write(struct buffer *buf, const char *s, unsigned int len)
+ {
+ if (buf->size - buf->pos < len) {
+ buf->size += len + SZ;
+@@ -1975,7 +1990,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
+ if (fstat(fileno(file), &st) < 0)
+ goto close_write;
+
+- if (st.st_size != b->pos)
++ if (st.st_size != (off_t)b->pos)
+ goto close_write;
+
+ tmp = NOFAIL(malloc(b->pos));
+diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
+index 51207e4..f7d603d 100644
+--- a/scripts/mod/modpost.h
++++ b/scripts/mod/modpost.h
+@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
+
+ struct buffer {
+ char *p;
+- int pos;
+- int size;
++ unsigned int pos;
++ unsigned int size;
+ };
+
+ void __attribute__((format(printf, 2, 3)))
+ buf_printf(struct buffer *buf, const char *fmt, ...);
+
+ void
+-buf_write(struct buffer *buf, const char *s, int len);
++buf_write(struct buffer *buf, const char *s, unsigned int len);
+
+ struct module {
+ struct module *next;
+diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
+index 9dfcd6d..099068e 100644
+--- a/scripts/mod/sumversion.c
++++ b/scripts/mod/sumversion.c
+@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
+ goto out;
+ }
+
+- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
+ warn("writing sum in %s failed: %s\n",
+ filename, strerror(errno));
+ goto out;
+diff --git a/scripts/module-common.lds b/scripts/module-common.lds
+index 0865b3e..7235dd4 100644
+--- a/scripts/module-common.lds
++++ b/scripts/module-common.lds
+@@ -6,6 +6,10 @@
+ SECTIONS {
+ /DISCARD/ : { *(.discard) }
+
++ .rodata : {
++ *(.rodata) *(.rodata.*)
++ *(.data..read_only)
++ }
+ __ksymtab : { *(SORT(___ksymtab+*)) }
+ __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
+ __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 3c6c0b1..3e4dbf3 100644
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -241,6 +241,7 @@ fi
+ (cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
+ (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
+ (cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
++(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
+ destdir=$kernel_headers_dir/usr/src/linux-headers-$version
+ mkdir -p "$destdir"
+ (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
+diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
+index 5c11312..72742b5 100644
+--- a/scripts/pnmtologo.c
++++ b/scripts/pnmtologo.c
+@@ -237,14 +237,14 @@ static void write_header(void)
+ fprintf(out, " * Linux logo %s\n", logoname);
+ fputs(" */\n\n", out);
+ fputs("#include <linux/linux_logo.h>\n\n", out);
+- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_data[] = {\n",
+ logoname);
+ }
+
+ static void write_footer(void)
+ {
+ fputs("\n};\n\n", out);
+- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
++ fprintf(out, "const struct linux_logo %s = {\n", logoname);
+ fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+ fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
+ fputs("\n};\n\n", out);
+
+ /* write logo clut */
+- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
++ fprintf(out, "static unsigned char %s_clut[] = {\n",
+ logoname);
+ write_hex_cnt = 0;
+ for (i = 0; i < logo_clutsize; i++) {
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index 38f6617..e70b72b 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -116,7 +116,7 @@ docscope()
+
+ dogtags()
+ {
+- all_sources | gtags -f -
++ all_sources | gtags -i -f -
+ }
+
+ exuberant()
+diff --git a/security/Kconfig b/security/Kconfig
+index 51bd5a0..ce4aad0 100644
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -4,6 +4,954 @@
+
+ menu "Security options"
+
++menu "Grsecurity"
++
++ config ARCH_TRACK_EXEC_LIMIT
++ bool
++
++ config PAX_KERNEXEC_PLUGIN
++ bool
++
++ config PAX_PER_CPU_PGD
++ bool
++
++ config TASK_SIZE_MAX_SHIFT
++ int
++ depends on X86_64
++ default 47 if !PAX_PER_CPU_PGD
++ default 42 if PAX_PER_CPU_PGD
++
++ config PAX_ENABLE_PAE
++ bool
++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
++
++ config PAX_USERCOPY_SLABS
++ bool
++
++config GRKERNSEC
++ bool "Grsecurity"
++ select CRYPTO
++ select CRYPTO_SHA256
++ select PROC_FS
++ select STOP_MACHINE
++ select DEBUG_LIST
++ help
++ If you say Y here, you will be able to configure many features
++ that will enhance the security of your system. It is highly
++ recommended that you say Y here and read through the help
++ for each option so that you fully understand the features and
++ can evaluate their usefulness for your machine.
++
++choice
++ prompt "Configuration Method"
++ depends on GRKERNSEC
++ default GRKERNSEC_CONFIG_CUSTOM
++ help
++
++config GRKERNSEC_CONFIG_AUTO
++ bool "Automatic"
++ help
++ If you choose this configuration method, you'll be able to answer a small
++ number of simple questions about how you plan to use this kernel.
++ The settings of grsecurity and PaX will be automatically configured for
++ the highest commonly-used settings within the provided constraints.
++
++ If you require additional configuration, custom changes can still be made
++ from the "custom configuration" menu.
++
++config GRKERNSEC_CONFIG_CUSTOM
++ bool "Custom"
++ help
++ If you choose this configuration method, you'll be able to configure all
++ grsecurity and PaX settings manually. Via this method, no options are
++ automatically enabled.
++
++endchoice
++
++choice
++ prompt "Usage Type"
++ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
++ default GRKERNSEC_CONFIG_SERVER
++ help
++
++config GRKERNSEC_CONFIG_SERVER
++ bool "Server"
++ help
++ Choose this option if you plan to use this kernel on a server.
++
++config GRKERNSEC_CONFIG_DESKTOP
++ bool "Desktop"
++ help
++ Choose this option if you plan to use this kernel on a desktop.
++
++endchoice
++
++choice
++ prompt "Virtualization Type"
++ depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
++ default GRKERNSEC_CONFIG_VIRT_NONE
++ help
++
++config GRKERNSEC_CONFIG_VIRT_NONE
++ bool "None"
++ help
++ Choose this option if this kernel will be run on bare metal.
++
++config GRKERNSEC_CONFIG_VIRT_GUEST
++ bool "Guest"
++ help
++ Choose this option if this kernel will be run as a VM guest.
++
++config GRKERNSEC_CONFIG_VIRT_HOST
++ bool "Host"
++ help
++ Choose this option if this kernel will be run as a VM host.
++
++endchoice
++
++choice
++ prompt "Virtualization Hardware"
++ depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
++ help
++
++config GRKERNSEC_CONFIG_VIRT_EPT
++ bool "EPT/RVI Processor Support"
++ depends on X86
++ help
++ Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
++ hardware virtualization. This allows for additional kernel hardening protections
++ to operate without additional performance impact.
++
++ To see if your Intel processor supports EPT, see:
++ http://ark.intel.com/Products/VirtualizationTechnology
++ (Most Core i3/5/7 support EPT)
++
++ To see if your AMD processor supports RVI, see:
++ http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
++
++config GRKERNSEC_CONFIG_VIRT_SOFT
++ bool "First-gen/No Hardware Virtualization"
++ help
++ Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
++ support hardware virtualization or doesn't support the EPT/RVI extensions.
++
++endchoice
++
++choice
++ prompt "Virtualization Software"
++ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
++ help
++
++config GRKERNSEC_CONFIG_VIRT_XEN
++ bool "Xen"
++ help
++ Choose this option if this kernel is running as a Xen guest or host.
++
++config GRKERNSEC_CONFIG_VIRT_VMWARE
++ bool "VMWare"
++ help
++ Choose this option if this kernel is running as a VMWare guest or host.
++
++config GRKERNSEC_CONFIG_VIRT_KVM
++ bool "KVM"
++ help
++ Choose this option if this kernel is running as a KVM guest or host.
++
++config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
++ bool "VirtualBox"
++ help
++ Choose this option if this kernel is running as a VirtualBox guest or host.
++
++endchoice
++
++choice
++ prompt "Required Priorities"
++ depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
++ default GRKERNSEC_CONFIG_PRIORITY_PERF
++ help
++
++config GRKERNSEC_CONFIG_PRIORITY_PERF
++ bool "Performance"
++ help
++ Choose this option if performance is of highest priority for this deployment
++ of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
++ clearing of structures intended for userland, and freed memory sanitizing will
++ be disabled.
++
++config GRKERNSEC_CONFIG_PRIORITY_SECURITY
++ bool "Security"
++ help
++ Choose this option if security is of highest priority for this deployment of
++ grsecurity. UDEREF, kernel stack clearing, clearing of structures intended
++ for userland, and freed memory sanitizing will be enabled for this kernel.
++ In a worst-case scenario, these features can introduce a 20% performance hit
++ (UDEREF on x64 contributing half of this hit).
++
++endchoice
++
++menu "Default Special Groups"
++depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
++
++config GRKERNSEC_PROC_GID
++ int "GID exempted from /proc restrictions"
++ default 1001
++ help
++ Setting this GID determines which group will be exempted from
++ grsecurity's /proc restrictions, allowing users of the specified
++ group to view network statistics and the existence of other users'
++ processes on the system. This GID may also be chosen at boot time
++ via "grsec_proc_gid=" on the kernel commandline.
++
++config GRKERNSEC_TPE_UNTRUSTED_GID
++ int "GID for TPE-untrusted users"
++ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ Setting this GID determines which group untrusted users should
++ be added to. These users will be placed under grsecurity's Trusted Path
++ Execution mechanism, preventing them from executing their own binaries.
++ The users will only be able to execute binaries in directories owned and
++ writable only by the root user. If the sysctl option is enabled, a sysctl
++ option with name "tpe_gid" is created.
++
++config GRKERNSEC_TPE_TRUSTED_GID
++ int "GID for TPE-trusted users"
++ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
++ default 1005
++ help
++ Setting this GID determines what group TPE restrictions will be
++ *disabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
++
++config GRKERNSEC_SYMLINKOWN_GID
++ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
++ depends on GRKERNSEC_CONFIG_SERVER
++ default 1006
++ help
++ Setting this GID determines what group kernel-enforced
++ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
++ is enabled, a sysctl option with name "symlinkown_gid" is created.
++
++
++endmenu
++
++menu "Customize Configuration"
++depends on GRKERNSEC
++
++menu "PaX"
++
++config PAX
++ bool "Enable various PaX features"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
++ help
++ This allows you to enable various PaX features. PaX adds
++ intrusion prevention mechanisms to the kernel that reduce
++ the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++ depends on PAX
++
++config PAX_SOFTMODE
++ bool 'Support soft mode'
++ help
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
++ support as they are the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++ bool 'Use legacy ELF header marking'
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'chpax' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
++ support.
++
++ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
++ support as well, they will override the legacy EI_PAX marks.
++
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
++
++config PAX_PT_PAX_FLAGS
++ bool 'Use ELF program header marking'
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'paxctl' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
++
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
++
++config PAX_XATTR_PAX_FLAGS
++ bool 'Use filesystem extended attributes marking'
++ default y if GRKERNSEC_CONFIG_AUTO
++ select CIFS_XATTR if CIFS
++ select EXT2_FS_XATTR if EXT2_FS
++ select EXT3_FS_XATTR if EXT3_FS
++ select EXT4_FS_XATTR if EXT4_FS
++ select JFFS2_FS_XATTR if JFFS2_FS
++ select REISERFS_FS_XATTR if REISERFS_FS
++ select SQUASHFS_XATTR if SQUASHFS
++ select TMPFS_XATTR if TMPFS
++ select UBIFS_FS_XATTR if UBIFS_FS
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'setfattr' utility. The control
++ flags will be read from the user.pax.flags extended attribute of
++ the file. This marking has the benefit of supporting binary-only
++ applications that self-check themselves (e.g., skype) and would
++ not tolerate chpax/paxctl changes. The main drawback is that
++ extended attributes are not supported by some filesystems (e.g.,
++ isofs, udf, vfat) so copying files through such filesystems will
++ lose the extended attributes and these PaX markings.
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
++
++ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
++ must make sure that the marks are the same if a binary has both marks.
++
++ If you enable none of the marking options then all applications
++ will run with PaX enabled on them by default.
++
++choice
++ prompt 'MAC system integration'
++ default PAX_HAVE_ACL_FLAGS
++ help
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_initial_flags() itself,
++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++ config PAX_NO_ACL_FLAGS
++ bool 'none'
++
++ config PAX_HAVE_ACL_FLAGS
++ bool 'direct'
++
++ config PAX_HOOK_ACL_FLAGS
++ bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++ depends on PAX
++
++config PAX_NOEXEC
++ bool "Enforce non-executable pages"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
++ help
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++ bool "Paging based non-executable pages"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
++ select ARCH_TRACK_EXEC_LIMIT if X86_32
++ help
++ This implementation is based on the paging feature of the CPU.
++ On i386 without hardware non-executable bit support there is a
++ variable but usually low performance impact, however on Intel's
++ P4 core based CPUs it is very high so you should not enable this
++ for kernels meant to be used on such CPUs.
++
++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
++ with hardware non-executable bit support there is no performance
++ impact, on ppc the impact is negligible.
++
++ Note that several architectures require various emulations due to
++ badly designed userland ABIs, this will cause a performance impact
++ but will disappear as soon as userland is fixed. For example, ppc
++ userland MUST have been built with secure-plt by a recent toolchain.
++
++config PAX_SEGMEXEC
++ bool "Segmentation based non-executable pages"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PAX_NOEXEC && X86_32
++ help
++ This implementation is based on the segmentation feature of the
++ CPU and has a very small performance impact, however applications
++ will be limited to a 1.5 GB address space instead of the normal
++ 3 GB.
++
++config PAX_EMUTRAMP
++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
++ default y if PARISC
++ help
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++ utilities to enable trampoline emulation for the affected programs
++ yet still have the protection provided by the non-executable pages.
++
++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
++ your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++ for the affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++ bool "Automatically emulate sigreturn trampolines"
++ depends on PAX_EMUTRAMP && PARISC
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc you MUST enable this option, otherwise your system will
++ not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++config PAX_MPROTECT
++ bool "Restrict mprotect()"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
++ help
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory,
++ - making read-only-after-relocations (RELRO) data pages writable again.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_MPROTECT_COMPAT
++ bool "Use legacy/compat protection demoting (read help)"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
++ depends on PAX_MPROTECT
++ help
++ The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
++ by sending the proper error code to the application. For some broken
++ userland, this can cause problems with Python or other applications. The
++ current implementation however allows for applications like clamav to
++ detect if JIT compilation/execution is allowed and to fall back gracefully
++ to an interpreter-based mode if it does not. While we encourage everyone
++ to use the current implementation as-is and push upstream to fix broken
++ userland (note that the RWX logging option can assist with this), in some
++ environments this may not be possible. Having to disable MPROTECT
++ completely on certain binaries reduces the security benefit of PaX,
++ so this option is provided for those environments to revert to the old
++ behavior.
++
++config PAX_ELFRELOCS
++ bool "Allow ELF text relocations (read help)"
++ depends on PAX_MPROTECT
++ default n
++ help
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ as is the case with all modern Linux distributions, then leave
++ this option disabled. You should say 'n' here.
++
++config PAX_ETEXECRELOCS
++ bool "Allow ELF ET_EXEC text relocations"
++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++ select PAX_ELFRELOCS
++ default y
++ help
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++config PAX_EMUPLT
++ bool "Automatically emulate ELF PLT"
++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++config PAX_DLRESOLVE
++ bool 'Emulate old glibc resolver stub'
++ depends on PAX_EMUPLT && SPARC
++ default n
++ help
++ This option is needed if userland has an old glibc (before 2.4)
++ that puts a 'save' instruction into the runtime generated resolver
++ stub that needs special emulation.
++
++config PAX_KERNEXEC
++ bool "Enforce non-executable kernel pages"
++ default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
++ depends on X86 && !XEN && (!X86_32 || X86_WP_WORKS_OK)
++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
++ select PAX_KERNEXEC_PLUGIN if X86_64
++ help
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++choice
++ prompt "Return Address Instrumentation Method"
++ default PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ depends on PAX_KERNEXEC_PLUGIN
++ help
++ Select the method used to instrument function pointer dereferences.
++ Note that binary modules cannot be instrumented by this approach.
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
++ config PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ bool "bts"
++ help
++ This method is compatible with binary only modules but has
++ a higher runtime overhead.
++
++ config PAX_KERNEXEC_PLUGIN_METHOD_OR
++ bool "or"
++ depends on !PARAVIRT
++ help
++ This method is incompatible with binary only modules but has
++ a lower runtime overhead.
++endchoice
++
++config PAX_KERNEXEC_PLUGIN_METHOD
++ string
++ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
++ default ""
++
++config PAX_KERNEXEC_MODULE_TEXT
++ int "Minimum amount of memory reserved for module code"
++ default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
++ default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
++ depends on PAX_KERNEXEC && X86_32
++ help
++ Due to implementation details the kernel must reserve a fixed
++ amount of memory for runtime allocated code (such as modules)
++ at compile time that cannot be changed at runtime. Here you
++ can specify the minimum amount in MB that will be reserved.
++ Due to the same implementation details this size will always
++ be rounded up to the next 2/4 MB boundary (depends on PAE) so
++ the actually available memory for runtime allocated code will
++ usually be more than this minimum.
++
++ The default 4 MB should be enough for most users but if you have
++ an excessive number of modules (e.g., most distribution configs
++ compile many drivers as modules) or use huge modules such as
++ nvidia's kernel driver, you will need to adjust this amount.
++ A good rule of thumb is to look at your currently loaded kernel
++ modules and add up their sizes.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++ depends on PAX
++
++config PAX_ASLR
++ bool "Address Space Layout Randomization"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_RANDKSTACK
++ bool "Randomize kernel stack base"
++ default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
++ depends on X86_TSC && X86
++ help
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++ bool "Randomize user stack base"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' or
++ 'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++ bool "Randomize mmap() base"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++endmenu
++
++menu "Miscellaneous hardening features"
++
++config PAX_MEMORY_SANITIZE
++ bool "Sanitize all freed memory"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
++ help
++ By saying Y here the kernel will erase memory pages and slab objects
++ as soon as they are freed. This in turn reduces the lifetime of data
++ stored in them, making it less likely that sensitive information such
++ as passwords, cryptographic secrets, etc stay in memory for too long.
++
++ This is especially useful for programs whose runtime is short, long
++ lived processes and the kernel itself benefit from this as long as
++ they ensure timely freeing of memory that may hold sensitive
++ information.
++
++ A nice side effect of the sanitization of slab objects is the
++ reduction of possible info leaks caused by padding bytes within the
++ leaky structures. Use-after-free bugs for structures containing
++ pointers can also be detected as dereferencing the sanitized pointer
++ will generate an access violation.
++
++ The tradeoff is performance impact, on a single CPU system kernel
++ compilation sees a 3% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ To reduce the performance penalty by sanitizing pages only, albeit
++ limiting the effectiveness of this feature at the same time, slab
++ sanitization can be disabled with the kernel commandline parameter
++ "pax_sanitize_slab=0".
++
++ Note that this feature does not protect data stored in live pages,
++ e.g., process memory swapped to disk may stay there for a long time.
++
++config PAX_MEMORY_STACKLEAK
++ bool "Sanitize kernel stack"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
++ depends on X86
++ help
++ By saying Y here the kernel will erase the kernel stack before it
++ returns from a system call. This in turn reduces the information
++ that a kernel stack leak bug can reveal.
++
++ Note that such a bug can still leak information that was put on
++ the stack by the current system call (the one eventually triggering
++ the bug) but traces of earlier system calls on the kernel stack
++ cannot leak anymore.
++
++ The tradeoff is performance impact: on a single CPU system kernel
++ compilation sees a 1% slowdown, other systems and workloads may vary
++ and you are advised to test this feature on your expected workload
++ before deploying it.
++
++ Note that the full feature requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package. Using
++ older gcc versions means that functions with large enough stack
++ frames may leave uninitialized memory behind that may be exposed
++ to a later syscall leaking the stack.
++
++config PAX_MEMORY_STRUCTLEAK
++ bool "Forcibly initialize local variables copied to userland"
++ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
++ help
++ By saying Y here the kernel will zero initialize some local
++ variables that are going to be copied to userland. This in
++ turn prevents unintended information leakage from the kernel
++ stack should later code forget to explicitly set all parts of
++ the copied variable.
++
++ The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
++ at a much smaller coverage.
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
++config PAX_MEMORY_UDEREF
++ bool "Prevent invalid userland pointer dereference"
++ default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
++ depends on X86 && !UML_X86 && !XEN
++ select PAX_PER_CPU_PGD if X86_64
++ help
++ By saying Y here the kernel will be prevented from dereferencing
++ userland pointers in contexts where the kernel expects only kernel
++ pointers. This is both a useful runtime debugging feature and a
++ security measure that prevents exploiting a class of kernel bugs.
++
++ The tradeoff is that some virtualization solutions may experience
++ a huge slowdown and therefore you should not enable this feature
++ for kernels meant to run in such environments. Whether a given VM
++ solution is affected or not is best determined by simply trying it
++ out, the performance impact will be obvious right on boot as this
++ mechanism engages from very early on. A good rule of thumb is that
++ VMs running on CPUs without hardware virtualization support (i.e.,
++ the majority of IA-32 CPUs) will likely experience the slowdown.
++
++config PAX_REFCOUNT
++ bool "Prevent various kernel object reference counter overflows"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
++ help
++ By saying Y here the kernel will detect and prevent overflowing
++ various (but not all) kinds of object reference counters. Such
++ overflows can normally occur due to bugs only and are often, if
++ not always, exploitable.
++
++ The tradeoff is that data structures protected by an overflowed
++ refcount will never be freed and therefore will leak memory. Note
++ that this leak also happens even without this protection but in
++ that case the overflow can eventually trigger the freeing of the
++ data structure while it is still being used elsewhere, resulting
++ in the exploitable situation that this feature prevents.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++config PAX_USERCOPY
++ bool "Harden heap object copies between kernel and userland"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on ARM || IA64 || PPC || SPARC || X86
++ depends on GRKERNSEC && (SLAB || SLUB || SLOB)
++ select PAX_USERCOPY_SLABS
++ help
++ By saying Y here the kernel will enforce the size of heap objects
++ when they are copied in either direction between the kernel and
++ userland, even if only a part of the heap object is copied.
++
++ Specifically, this checking prevents information leaking from the
++ kernel heap during kernel to userland copies (if the kernel heap
++ object is otherwise fully initialized) and prevents kernel heap
++ overflows during userland to kernel copies.
++
++ Note that the current implementation provides the strictest bounds
++ checks for the SLUB allocator.
++
++ Enabling this option also enables per-slab cache protection against
++ data in a given cache being copied into/out of via userland
++ accessors. Though the whitelist of regions will be reduced over
++ time, it notably protects important data structures like task structs.
++
++ If frame pointers are enabled on x86, this option will also restrict
++ copies into and out of the kernel stack to local variables within a
++ single frame.
++
++ Since this has a negligible performance impact, you should enable
++ this feature.
++
++config PAX_USERCOPY_DEBUG
++ bool
++ depends on X86 && PAX_USERCOPY
++ default n
++
++config PAX_CONSTIFY_PLUGIN
++ bool "Automatically constify eligible structures"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on !UML && PAX_KERNEXEC
++ help
++ By saying Y here the compiler will automatically constify a class
++ of types that contain only function pointers. This reduces the
++ kernel's attack surface and also produces a better memory layout.
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package
++
++ Note that if some code really has to modify constified variables
++ then the source code will have to be patched to allow it. Examples
++ can be found in PaX itself (the no_const attribute) and for some
++ out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
++
++
++config PAX_SIZE_OVERFLOW
++ bool "Prevent various integer overflows in function size parameters"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on X86
++ help
++ By saying Y here the kernel recomputes expressions of function
++ arguments marked by a size_overflow attribute with double integer
++ precision (DImode/TImode for 32/64 bit integer types).
++
++ The recomputed argument is checked against TYPE_MAX and an event
++ is logged on overflow and the triggering process is killed.
++
++ Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
++config PAX_LATENT_ENTROPY
++ bool "Generate some entropy during boot and runtime"
++ default y if GRKERNSEC_CONFIG_AUTO
++ help
++ By saying Y here the kernel will instrument some kernel code to
++ extract some entropy from both original and artificially created
++ program state. This will help especially embedded systems where
++ there is little 'natural' source of entropy normally. The cost
++ is some slowdown of the boot process and fork and irq processing.
++
++ When pax_extra_latent_entropy is passed on the kernel command line,
++ entropy will be extracted from up to the first 4GB of RAM while the
++ runtime memory allocator is being initialized. This costs even more
++ slowdown of the boot process.
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
++ Note that entropy extracted this way is not cryptographically
++ secure!
++
++endmenu
++
++endmenu
++
++source grsecurity/Kconfig
++
++endmenu
++
++endmenu
++
+ config KEYS
+ bool "Enable access key retention support"
+ help
+@@ -169,7 +1117,7 @@ config INTEL_TXT
+ config LSM_MMAP_MIN_ADDR
+ int "Low address space for LSM to protect from user allocation"
+ depends on SECURITY && SECURITY_SELINUX
+- default 32768 if ARM
++ default 32768 if ALPHA || ARM || PARISC || SPARC32
+ default 65536
+ help
+ This is the portion of low virtual memory which should be protected
+diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
+index 9b9013b..51ebf96 100644
+--- a/security/apparmor/Kconfig
++++ b/security/apparmor/Kconfig
+@@ -29,3 +29,12 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE
+ boot.
+
+ If you are unsure how to answer this question, answer 1.
++
++config SECURITY_APPARMOR_COMPAT_24
++ bool "Enable AppArmor 2.4 compatability"
++ depends on SECURITY_APPARMOR
++ default y
++ help
++ This option enables compatability with AppArmor 2.4. It is
++ recommended if compatability with older versions of AppArmor
++ is desired.
+diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
+index 2dafe50..0bb604b 100644
+--- a/security/apparmor/Makefile
++++ b/security/apparmor/Makefile
+@@ -4,9 +4,10 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
+
+ apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
+ path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
+- resource.o sid.o file.o
++ resource.o sid.o file.o net.o
++apparmor-$(CONFIG_SECURITY_APPARMOR_COMPAT_24) += apparmorfs-24.o
+
+-clean-files := capability_names.h rlim_names.h
++clean-files := capability_names.h rlim_names.h af_names.h
+
+
+ # Build a lower case string table of capability names
+@@ -44,9 +45,24 @@ cmd_make-rlim = echo "static const char *rlim_names[] = {" > $@ ;\
+ sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
+ echo "};" >> $@
+
++# Build a lower case string table of address family names.
++# Transform lines from
++# #define AF_INET 2 /* Internet IP Protocol */
++# to
++# [2] = "inet",
++quiet_cmd_make-af = GEN $@
++cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
++ sed $< >> $@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e \
++ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+).*/[\2] = "\L\1",/p';\
++ echo "};" >> $@
++
++
+ $(obj)/capability.o : $(obj)/capability_names.h
+ $(obj)/resource.o : $(obj)/rlim_names.h
++$(obj)/net.o : $(obj)/af_names.h
+ $(obj)/capability_names.h : $(srctree)/include/linux/capability.h
+ $(call cmd,make-caps)
+ $(obj)/rlim_names.h : $(srctree)/include/asm-generic/resource.h
+ $(call cmd,make-rlim)
++$(obj)/af_names.h : $(srctree)/include/linux/socket.h
++ $(call cmd,make-af)
+\ No newline at end of file
+diff --git a/security/apparmor/apparmorfs-24.c b/security/apparmor/apparmorfs-24.c
+new file mode 100644
+index 0000000..dc8c744
+--- /dev/null
++++ b/security/apparmor/apparmorfs-24.c
+@@ -0,0 +1,287 @@
++/*
++ * AppArmor security module
++ *
++ * This file contains AppArmor /sys/kernel/secrutiy/apparmor interface functions
++ *
++ * Copyright (C) 1998-2008 Novell/SUSE
++ * Copyright 2009-2010 Canonical Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ *
++ * This file contain functions providing an interface for <= AppArmor 2.4
++ * compatibility. It is dependent on CONFIG_SECURITY_APPARMOR_COMPAT_24
++ * being set (see Makefile).
++ */
++
++#include <linux/security.h>
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++#include <linux/namei.h>
++
++#include "include/apparmor.h"
++#include "include/audit.h"
++#include "include/context.h"
++#include "include/policy.h"
++
++
++/* apparmor/matching */
++static ssize_t aa_matching_read(struct file *file, char __user *buf,
++ size_t size, loff_t *ppos)
++{
++ const char matching[] = "pattern=aadfa audit perms=crwxamlk/ "
++ "user::other";
++
++ return simple_read_from_buffer(buf, size, ppos, matching,
++ sizeof(matching) - 1);
++}
++
++const struct file_operations aa_fs_matching_fops = {
++ .read = aa_matching_read,
++};
++
++/* apparmor/features */
++static ssize_t aa_features_read(struct file *file, char __user *buf,
++ size_t size, loff_t *ppos)
++{
++ const char features[] = "file=3.1 capability=2.0 network=1.0 "
++ "change_hat=1.5 change_profile=1.1 " "aanamespaces=1.1 rlimit=1.1";
++
++ return simple_read_from_buffer(buf, size, ppos, features,
++ sizeof(features) - 1);
++}
++
++const struct file_operations aa_fs_features_fops = {
++ .read = aa_features_read,
++};
++
++/**
++ * __next_namespace - find the next namespace to list
++ * @root: root namespace to stop search at (NOT NULL)
++ * @ns: current ns position (NOT NULL)
++ *
++ * Find the next namespace from @ns under @root and handle all locking needed
++ * while switching current namespace.
++ *
++ * Returns: next namespace or NULL if at last namespace under @root
++ * NOTE: will not unlock root->lock
++ */
++static struct aa_namespace *__next_namespace(struct aa_namespace *root,
++ struct aa_namespace *ns)
++{
++ struct aa_namespace *parent;
++
++ /* is next namespace a child */
++ if (!list_empty(&ns->sub_ns)) {
++ struct aa_namespace *next;
++ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
++ read_lock(&next->lock);
++ return next;
++ }
++
++ /* check if the next ns is a sibling, parent, gp, .. */
++ parent = ns->parent;
++ while (parent) {
++ read_unlock(&ns->lock);
++ list_for_each_entry_continue(ns, &parent->sub_ns, base.list) {
++ read_lock(&ns->lock);
++ return ns;
++ }
++ if (parent == root)
++ return NULL;
++ ns = parent;
++ parent = parent->parent;
++ }
++
++ return NULL;
++}
++
++/**
++ * __first_profile - find the first profile in a namespace
++ * @root: namespace that is root of profiles being displayed (NOT NULL)
++ * @ns: namespace to start in (NOT NULL)
++ *
++ * Returns: unrefcounted profile or NULL if no profile
++ */
++static struct aa_profile *__first_profile(struct aa_namespace *root,
++ struct aa_namespace *ns)
++{
++ for ( ; ns; ns = __next_namespace(root, ns)) {
++ if (!list_empty(&ns->base.profiles))
++ return list_first_entry(&ns->base.profiles,
++ struct aa_profile, base.list);
++ }
++ return NULL;
++}
++
++/**
++ * __next_profile - step to the next profile in a profile tree
++ * @profile: current profile in tree (NOT NULL)
++ *
++ * Perform a depth first taversal on the profile tree in a namespace
++ *
++ * Returns: next profile or NULL if done
++ * Requires: profile->ns.lock to be held
++ */
++static struct aa_profile *__next_profile(struct aa_profile *p)
++{
++ struct aa_profile *parent;
++ struct aa_namespace *ns = p->ns;
++
++ /* is next profile a child */
++ if (!list_empty(&p->base.profiles))
++ return list_first_entry(&p->base.profiles, typeof(*p),
++ base.list);
++
++ /* is next profile a sibling, parent sibling, gp, subling, .. */
++ parent = p->parent;
++ while (parent) {
++ list_for_each_entry_continue(p, &parent->base.profiles,
++ base.list)
++ return p;
++ p = parent;
++ parent = parent->parent;
++ }
++
++ /* is next another profile in the namespace */
++ list_for_each_entry_continue(p, &ns->base.profiles, base.list)
++ return p;
++
++ return NULL;
++}
++
++/**
++ * next_profile - step to the next profile in where ever it may be
++ * @root: root namespace (NOT NULL)
++ * @profile: current profile (NOT NULL)
++ *
++ * Returns: next profile or NULL if there isn't one
++ */
++static struct aa_profile *next_profile(struct aa_namespace *root,
++ struct aa_profile *profile)
++{
++ struct aa_profile *next = __next_profile(profile);
++ if (next)
++ return next;
++
++ /* finished all profiles in namespace move to next namespace */
++ return __first_profile(root, __next_namespace(root, profile->ns));
++}
++
++/**
++ * p_start - start a depth first traversal of profile tree
++ * @f: seq_file to fill
++ * @pos: current position
++ *
++ * Returns: first profile under current namespace or NULL if none found
++ *
++ * acquires first ns->lock
++ */
++static void *p_start(struct seq_file *f, loff_t *pos)
++ __acquires(root->lock)
++{
++ struct aa_profile *profile = NULL;
++ struct aa_namespace *root = aa_current_profile()->ns;
++ loff_t l = *pos;
++ f->private = aa_get_namespace(root);
++
++
++ /* find the first profile */
++ read_lock(&root->lock);
++ profile = __first_profile(root, root);
++
++ /* skip to position */
++ for (; profile && l > 0; l--)
++ profile = next_profile(root, profile);
++
++ return profile;
++}
++
++/**
++ * p_next - read the next profile entry
++ * @f: seq_file to fill
++ * @p: profile previously returned
++ * @pos: current position
++ *
++ * Returns: next profile after @p or NULL if none
++ *
++ * may acquire/release locks in namespace tree as necessary
++ */
++static void *p_next(struct seq_file *f, void *p, loff_t *pos)
++{
++ struct aa_profile *profile = p;
++ struct aa_namespace *root = f->private;
++ (*pos)++;
++
++ return next_profile(root, profile);
++}
++
++/**
++ * p_stop - stop depth first traversal
++ * @f: seq_file we are filling
++ * @p: the last profile writen
++ *
++ * Release all locking done by p_start/p_next on namespace tree
++ */
++static void p_stop(struct seq_file *f, void *p)
++ __releases(root->lock)
++{
++ struct aa_profile *profile = p;
++ struct aa_namespace *root = f->private, *ns;
++
++ if (profile) {
++ for (ns = profile->ns; ns && ns != root; ns = ns->parent)
++ read_unlock(&ns->lock);
++ }
++ read_unlock(&root->lock);
++ aa_put_namespace(root);
++}
++
++/**
++ * seq_show_profile - show a profile entry
++ * @f: seq_file to file
++ * @p: current position (profile) (NOT NULL)
++ *
++ * Returns: error on failure
++ */
++static int seq_show_profile(struct seq_file *f, void *p)
++{
++ struct aa_profile *profile = (struct aa_profile *)p;
++ struct aa_namespace *root = f->private;
++
++ if (profile->ns != root)
++ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
++ seq_printf(f, "%s (%s)\n", profile->base.hname,
++ COMPLAIN_MODE(profile) ? "complain" : "enforce");
++
++ return 0;
++}
++
++static const struct seq_operations aa_fs_profiles_op = {
++ .start = p_start,
++ .next = p_next,
++ .stop = p_stop,
++ .show = seq_show_profile,
++};
++
++static int profiles_open(struct inode *inode, struct file *file)
++{
++ return seq_open(file, &aa_fs_profiles_op);
++}
++
++static int profiles_release(struct inode *inode, struct file *file)
++{
++ return seq_release(inode, file);
++}
++
++const struct file_operations aa_fs_profiles_fops = {
++ .open = profiles_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = profiles_release,
++};
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 69ddb47..be0f0f9 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -187,7 +187,11 @@ void __init aa_destroy_aafs(void)
+ aafs_remove(".remove");
+ aafs_remove(".replace");
+ aafs_remove(".load");
+-
++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
++ aafs_remove("profiles");
++ aafs_remove("matching");
++ aafs_remove("features");
++#endif
+ securityfs_remove(aa_fs_dentry);
+ aa_fs_dentry = NULL;
+ }
+@@ -218,7 +222,17 @@ static int __init aa_create_aafs(void)
+ aa_fs_dentry = NULL;
+ goto error;
+ }
+-
++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
++ error = aafs_create("matching", 0444, &aa_fs_matching_fops);
++ if (error)
++ goto error;
++ error = aafs_create("features", 0444, &aa_fs_features_fops);
++ if (error)
++ goto error;
++ error = aafs_create("profiles", 0440, &aa_fs_profiles_fops);
++ if (error)
++ goto error;
++#endif
+ error = aafs_create(".load", 0640, &aa_fs_profile_load);
+ if (error)
+ goto error;
+diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
+index c1e18ba..7316d77 100644
+--- a/security/apparmor/domain.c
++++ b/security/apparmor/domain.c
+@@ -395,6 +395,11 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ new_profile = find_attach(ns, &ns->base.profiles, name);
+ if (!new_profile)
+ goto cleanup;
++ /*
++ * NOTE: Domain transitions from unconfined are allowed
++ * even when no_new_privs is set because this aways results
++ * in a further reduction of permissions.
++ */
+ goto apply;
+ }
+
+@@ -455,6 +460,16 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+ /* fail exec */
+ error = -EACCES;
+
++ /*
++ * Policy has specified a domain transition, if no_new_privs then
++ * fail the exec.
++ */
++ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
++ aa_put_profile(new_profile);
++ error = -EPERM;
++ goto cleanup;
++ }
++
+ if (!new_profile)
+ goto audit;
+
+@@ -609,6 +624,14 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
+ const char *target = NULL, *info = NULL;
+ int error = 0;
+
++ /*
++ * Fail explicitly requested domain transitions if no_new_privs.
++ * There is no exception for unconfined as change_hat is not
++ * available.
++ */
++ if (current->no_new_privs)
++ return -EPERM;
++
+ /* released below */
+ cred = get_current_cred();
+ cxt = cred->security;
+@@ -750,6 +773,18 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
+ cxt = cred->security;
+ profile = aa_cred_profile(cred);
+
++ /*
++ * Fail explicitly requested domain transitions if no_new_privs
++ * and not unconfined.
++ * Domain transitions from unconfined are allowed even when
++ * no_new_privs is set because this aways results in a reduction
++ * of permissions.
++ */
++ if (current->no_new_privs && !unconfined(profile)) {
++ put_cred(cred);
++ return -EPERM;
++ }
++
+ if (ns_name) {
+ /* released below */
+ ns = aa_find_namespace(profile->ns, ns_name);
+diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
+index cb1e93a..14f955c 100644
+--- a/security/apparmor/include/apparmorfs.h
++++ b/security/apparmor/include/apparmorfs.h
+@@ -17,4 +17,10 @@
+
+ extern void __init aa_destroy_aafs(void);
+
++#ifdef CONFIG_SECURITY_APPARMOR_COMPAT_24
++extern const struct file_operations aa_fs_matching_fops;
++extern const struct file_operations aa_fs_features_fops;
++extern const struct file_operations aa_fs_profiles_fops;
++#endif
++
+ #endif /* __AA_APPARMORFS_H */
+diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
+new file mode 100644
+index 0000000..3c7d599
+--- /dev/null
++++ b/security/apparmor/include/net.h
+@@ -0,0 +1,40 @@
++/*
++ * AppArmor security module
++ *
++ * This file contains AppArmor network mediation definitions.
++ *
++ * Copyright (C) 1998-2008 Novell/SUSE
++ * Copyright 2009-2010 Canonical Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ */
++
++#ifndef __AA_NET_H
++#define __AA_NET_H
++
++#include <net/sock.h>
++
++/* struct aa_net - network confinement data
++ * @allowed: basic network families permissions
++ * @audit_network: which network permissions to force audit
++ * @quiet_network: which network permissions to quiet rejects
++ */
++struct aa_net {
++ u16 allow[AF_MAX];
++ u16 audit[AF_MAX];
++ u16 quiet[AF_MAX];
++};
++
++extern int aa_net_perm(int op, struct aa_profile *profile, u16 family,
++ int type, int protocol, struct sock *sk);
++extern int aa_revalidate_sk(int op, struct sock *sk);
++
++static inline void aa_free_net_rules(struct aa_net *new)
++{
++ /* NOP */
++}
++
++#endif /* __AA_NET_H */
+diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
+index aeda5cf..6776929 100644
+--- a/security/apparmor/include/policy.h
++++ b/security/apparmor/include/policy.h
+@@ -27,6 +27,7 @@
+ #include "capability.h"
+ #include "domain.h"
+ #include "file.h"
++#include "net.h"
+ #include "resource.h"
+
+ extern const char *profile_mode_names[];
+@@ -145,6 +146,7 @@ struct aa_namespace {
+ * @size: the memory consumed by this profiles rules
+ * @file: The set of rules governing basic file access and domain transitions
+ * @caps: capabilities for the profile
++ * @net: network controls for the profile
+ * @rlimits: rlimits for the profile
+ *
+ * The AppArmor profile contains the basic confinement data. Each profile
+@@ -181,6 +183,7 @@ struct aa_profile {
+
+ struct aa_file_rules file;
+ struct aa_caps caps;
++ struct aa_net net;
+ struct aa_rlimit rlimits;
+ };
+
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 3783202..d70ae70 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -32,6 +32,7 @@
+ #include "include/context.h"
+ #include "include/file.h"
+ #include "include/ipc.h"
++#include "include/net.h"
+ #include "include/path.h"
+ #include "include/policy.h"
+ #include "include/procattr.h"
+@@ -621,7 +622,105 @@ static int apparmor_task_setrlimit(struct task_struct *task,
+ return error;
+ }
+
+-static struct security_operations apparmor_ops = {
++static int apparmor_socket_create(int family, int type, int protocol, int kern)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ if (kern)
++ return 0;
++
++ profile = __aa_current_profile();
++ if (!unconfined(profile))
++ error = aa_net_perm(OP_CREATE, profile, family, type, protocol,
++ NULL);
++ return error;
++}
++
++static int apparmor_socket_bind(struct socket *sock,
++ struct sockaddr *address, int addrlen)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_BIND, sk);
++}
++
++static int apparmor_socket_connect(struct socket *sock,
++ struct sockaddr *address, int addrlen)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_CONNECT, sk);
++}
++
++static int apparmor_socket_listen(struct socket *sock, int backlog)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_LISTEN, sk);
++}
++
++static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_ACCEPT, sk);
++}
++
++static int apparmor_socket_sendmsg(struct socket *sock,
++ struct msghdr *msg, int size)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_SENDMSG, sk);
++}
++
++static int apparmor_socket_recvmsg(struct socket *sock,
++ struct msghdr *msg, int size, int flags)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_RECVMSG, sk);
++}
++
++static int apparmor_socket_getsockname(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_GETSOCKNAME, sk);
++}
++
++static int apparmor_socket_getpeername(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_GETPEERNAME, sk);
++}
++
++static int apparmor_socket_getsockopt(struct socket *sock, int level,
++ int optname)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_GETSOCKOPT, sk);
++}
++
++static int apparmor_socket_setsockopt(struct socket *sock, int level,
++ int optname)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_SETSOCKOPT, sk);
++}
++
++static int apparmor_socket_shutdown(struct socket *sock, int how)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(OP_SOCK_SHUTDOWN, sk);
++}
++
++static struct security_operations apparmor_ops __read_only = {
+ .name = "apparmor",
+
+ .ptrace_access_check = apparmor_ptrace_access_check,
+@@ -652,6 +751,19 @@ static struct security_operations apparmor_ops = {
+ .getprocattr = apparmor_getprocattr,
+ .setprocattr = apparmor_setprocattr,
+
++ .socket_create = apparmor_socket_create,
++ .socket_bind = apparmor_socket_bind,
++ .socket_connect = apparmor_socket_connect,
++ .socket_listen = apparmor_socket_listen,
++ .socket_accept = apparmor_socket_accept,
++ .socket_sendmsg = apparmor_socket_sendmsg,
++ .socket_recvmsg = apparmor_socket_recvmsg,
++ .socket_getsockname = apparmor_socket_getsockname,
++ .socket_getpeername = apparmor_socket_getpeername,
++ .socket_getsockopt = apparmor_socket_getsockopt,
++ .socket_setsockopt = apparmor_socket_setsockopt,
++ .socket_shutdown = apparmor_socket_shutdown,
++
+ .cred_alloc_blank = apparmor_cred_alloc_blank,
+ .cred_free = apparmor_cred_free,
+ .cred_prepare = apparmor_cred_prepare,
+diff --git a/security/apparmor/match.c b/security/apparmor/match.c
+index 94de6b4..081491e 100644
+--- a/security/apparmor/match.c
++++ b/security/apparmor/match.c
+@@ -57,8 +57,17 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
+ if (bsize < tsize)
+ goto out;
+
++ /* Pad table allocation for next/check by 256 entries to remain
++ * backwards compatible with old (buggy) tools and remain safe without
++ * run time checks
++ */
++ if (th.td_id == YYTD_ID_NXT || th.td_id == YYTD_ID_CHK)
++ tsize += 256 * th.td_flags;
++
+ table = kvmalloc(tsize);
+ if (table) {
++ /* ensure the pad is clear, else there will be errors */
++ memset(table, 0, tsize);
+ *table = th;
+ if (th.td_flags == YYTD_DATA8)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+@@ -134,11 +143,19 @@ static int verify_dfa(struct aa_dfa *dfa, int flags)
+ goto out;
+
+ if (flags & DFA_FLAG_VERIFY_STATES) {
++ int warning = 0;
+ for (i = 0; i < state_count; i++) {
+ if (DEFAULT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ /* TODO: do check that DEF state recursion terminates */
+ if (BASE_TABLE(dfa)[i] + 255 >= trans_count) {
++ if (warning)
++ continue;
++ printk(KERN_WARNING "AppArmor DFA next/check "
++ "upper bounds error fixed, upgrade "
++ "user space tools \n");
++ warning = 1;
++ } else if (BASE_TABLE(dfa)[i] >= trans_count) {
+ printk(KERN_ERR "AppArmor DFA next/check upper "
+ "bounds error\n");
+ goto out;
+diff --git a/security/apparmor/net.c b/security/apparmor/net.c
+new file mode 100644
+index 0000000..1765901
+--- /dev/null
++++ b/security/apparmor/net.c
+@@ -0,0 +1,170 @@
++/*
++ * AppArmor security module
++ *
++ * This file contains AppArmor network mediation
++ *
++ * Copyright (C) 1998-2008 Novell/SUSE
++ * Copyright 2009-2010 Canonical Ltd.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ */
++
++#include "include/apparmor.h"
++#include "include/audit.h"
++#include "include/context.h"
++#include "include/net.h"
++#include "include/policy.h"
++
++#include "af_names.h"
++
++static const char *sock_type_names[] = {
++ "unknown(0)",
++ "stream",
++ "dgram",
++ "raw",
++ "rdm",
++ "seqpacket",
++ "dccp",
++ "unknown(7)",
++ "unknown(8)",
++ "unknown(9)",
++ "packet",
++};
++
++/* audit callback for net specific fields */
++static void audit_cb(struct audit_buffer *ab, void *va)
++{
++ struct common_audit_data *sa = va;
++
++ audit_log_format(ab, " family=");
++ if (address_family_names[sa->u.net.family]) {
++ audit_log_string(ab, address_family_names[sa->u.net.family]);
++ } else {
++ audit_log_format(ab, " \"unknown(%d)\"", sa->u.net.family);
++ }
++
++ audit_log_format(ab, " sock_type=");
++ if (sock_type_names[sa->aad.net.type]) {
++ audit_log_string(ab, sock_type_names[sa->aad.net.type]);
++ } else {
++ audit_log_format(ab, "\"unknown(%d)\"", sa->aad.net.type);
++ }
++
++ audit_log_format(ab, " protocol=%d", sa->aad.net.protocol);
++}
++
++/**
++ * audit_net - audit network access
++ * @profile: profile being enforced (NOT NULL)
++ * @op: operation being checked
++ * @family: network family
++ * @type: network type
++ * @protocol: network protocol
++ * @sk: socket auditing is being applied to
++ * @error: error code for failure else 0
++ *
++ * Returns: %0 or sa->error else other errorcode on failure
++ */
++static int audit_net(struct aa_profile *profile, int op, u16 family, int type,
++ int protocol, struct sock *sk, int error)
++{
++ int audit_type = AUDIT_APPARMOR_AUTO;
++ struct common_audit_data sa;
++ if (sk) {
++ COMMON_AUDIT_DATA_INIT(&sa, NET);
++ } else {
++ COMMON_AUDIT_DATA_INIT(&sa, NONE);
++ }
++ /* todo fill in socket addr info */
++
++ sa.aad.op = op,
++ sa.u.net.family = family;
++ sa.u.net.sk = sk;
++ sa.aad.net.type = type;
++ sa.aad.net.protocol = protocol;
++ sa.aad.error = error;
++
++ if (likely(!sa.aad.error)) {
++ u16 audit_mask = profile->net.audit[sa.u.net.family];
++ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
++ !(1 << sa.aad.net.type & audit_mask)))
++ return 0;
++ audit_type = AUDIT_APPARMOR_AUDIT;
++ } else {
++ u16 quiet_mask = profile->net.quiet[sa.u.net.family];
++ u16 kill_mask = 0;
++ u16 denied = (1 << sa.aad.net.type) & ~quiet_mask;
++
++ if (denied & kill_mask)
++ audit_type = AUDIT_APPARMOR_KILL;
++
++ if ((denied & quiet_mask) &&
++ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
++ AUDIT_MODE(profile) != AUDIT_ALL)
++ return COMPLAIN_MODE(profile) ? 0 : sa.aad.error;
++ }
++
++ return aa_audit(audit_type, profile, GFP_KERNEL, &sa, audit_cb);
++}
++
++/**
++ * aa_net_perm - very course network access check
++ * @op: operation being checked
++ * @profile: profile being enforced (NOT NULL)
++ * @family: network family
++ * @type: network type
++ * @protocol: network protocol
++ *
++ * Returns: %0 else error if permission denied
++ */
++int aa_net_perm(int op, struct aa_profile *profile, u16 family, int type,
++ int protocol, struct sock *sk)
++{
++ u16 family_mask;
++ int error;
++
++ if ((family < 0) || (family >= AF_MAX))
++ return -EINVAL;
++
++ if ((type < 0) || (type >= SOCK_MAX))
++ return -EINVAL;
++
++ /* unix domain and netlink sockets are handled by ipc */
++ if (family == AF_UNIX || family == AF_NETLINK)
++ return 0;
++
++ family_mask = profile->net.allow[family];
++
++ error = (family_mask & (1 << type)) ? 0 : -EACCES;
++
++ return audit_net(profile, op, family, type, protocol, sk, error);
++}
++
++/**
++ * aa_revalidate_sk - Revalidate access to a sock
++ * @op: operation being checked
++ * @sk: sock being revalidated (NOT NULL)
++ *
++ * Returns: %0 else error if permission denied
++ */
++int aa_revalidate_sk(int op, struct sock *sk)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ /* aa_revalidate_sk should not be called from interrupt context
++ * don't mediate these calls as they are not task related
++ */
++ if (in_interrupt())
++ return 0;
++
++ profile = __aa_current_profile();
++ if (!unconfined(profile))
++ error = aa_net_perm(op, profile, sk->sk_family, sk->sk_type,
++ sk->sk_protocol, sk);
++
++ return error;
++}
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 4f0eade..4d5ce13 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -745,6 +745,7 @@ static void free_profile(struct aa_profile *profile)
+
+ aa_free_file_rules(&profile->file);
+ aa_free_cap_rules(&profile->caps);
++ aa_free_net_rules(&profile->net);
+ aa_free_rlimit_rules(&profile->rlimits);
+
+ aa_free_sid(profile->sid);
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 741dd13..ee8043e 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -190,6 +190,19 @@ fail:
+ return 0;
+ }
+
++static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
++{
++ if (unpack_nameX(e, AA_U16, name)) {
++ if (!inbounds(e, sizeof(u16)))
++ return 0;
++ if (data)
++ *data = le16_to_cpu(get_unaligned((u16 *) e->pos));
++ e->pos += sizeof(u16);
++ return 1;
++ }
++ return 0;
++}
++
+ static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
+ {
+ if (unpack_nameX(e, AA_U32, name)) {
+@@ -468,7 +481,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
+ {
+ struct aa_profile *profile = NULL;
+ const char *name = NULL;
+- int error = -EPROTO;
++ size_t size = 0;
++ int i, error = -EPROTO;
+ kernel_cap_t tmpcap;
+ u32 tmp;
+
+@@ -559,6 +573,38 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
+ if (!unpack_rlimits(e, profile))
+ goto fail;
+
++ size = unpack_array(e, "net_allowed_af");
++ if (size) {
++
++ for (i = 0; i < size; i++) {
++ /* discard extraneous rules that this kernel will
++ * never request
++ */
++ if (i >= AF_MAX) {
++ u16 tmp;
++ if (!unpack_u16(e, &tmp, NULL) ||
++ !unpack_u16(e, &tmp, NULL) ||
++ !unpack_u16(e, &tmp, NULL))
++ goto fail;
++ continue;
++ }
++ if (!unpack_u16(e, &profile->net.allow[i], NULL))
++ goto fail;
++ if (!unpack_u16(e, &profile->net.audit[i], NULL))
++ goto fail;
++ if (!unpack_u16(e, &profile->net.quiet[i], NULL))
++ goto fail;
++ }
++ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
++ goto fail;
++ /*
++ * allow unix domain and netlink sockets they are handled
++ * by IPC
++ */
++ }
++ profile->net.allow[AF_UNIX] = 0xffff;
++ profile->net.allow[AF_NETLINK] = 0xffff;
++
+ /* get file rules */
+ profile->file.dfa = unpack_dfa(e);
+ if (IS_ERR(profile->file.dfa)) {
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 12440ee..2ec6d88 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -29,6 +29,7 @@
+ #include <linux/securebits.h>
+ #include <linux/user_namespace.h>
+ #include <linux/personality.h>
++#include <net/sock.h>
+
+ /*
+ * If a non-root user executes a setuid-root binary in
+@@ -59,7 +60,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
+
+ int cap_netlink_recv(struct sk_buff *skb, int cap)
+ {
+- if (!cap_raised(current_cap(), cap))
++ if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
+ return -EPERM;
+ return 0;
+ }
+@@ -424,6 +425,45 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
+ return 0;
+ }
+
++/* returns:
++ 1 for suid privilege
++ 2 for sgid privilege
++ 3 for fscap privilege
++*/
++int is_privileged_binary(const struct dentry *dentry)
++{
++ struct cpu_vfs_cap_data capdata;
++ struct inode *inode = dentry->d_inode;
++
++ if (!inode || S_ISDIR(inode->i_mode))
++ return 0;
++
++ if (inode->i_mode & S_ISUID)
++ return 1;
++ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
++ return 2;
++
++ if (!get_vfs_caps_from_disk(dentry, &capdata)) {
++ if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
++ return 3;
++ }
++
++ return 0;
++}
++
++/* returns 1 for suid root privilege
++ returns 3 for fscap privilege
++*/
++int is_root_privileged_binary(const struct dentry *dentry)
++{
++ int ret = is_privileged_binary(dentry);
++ if (ret == 3)
++ return ret;
++ if (ret == 1 && dentry->d_inode->i_uid == 0)
++ return ret;
++ return 0;
++}
++
+ /*
+ * Attempt to get the on-exec apply capability sets for an executable file from
+ * its xattrs and, if present, apply them to the proposed credentials being
+@@ -521,14 +561,17 @@ skip:
+
+
+ /* Don't let someone trace a set[ug]id/setpcap binary with the revised
+- * credentials unless they have the appropriate permit
++ * credentials unless they have the appropriate permit.
++ *
++ * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
+ */
+ if ((new->euid != old->uid ||
+ new->egid != old->gid ||
+ !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
+ bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
+ /* downgrade; they get no more than they had, and maybe less */
+- if (!capable(CAP_SETUID)) {
++ if (!capable(CAP_SETUID) ||
++ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
+ new->euid = new->uid;
+ new->egid = new->gid;
+ }
+@@ -585,6 +628,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
+ {
+ const struct cred *cred = current_cred();
+
++ if (gr_acl_enable_at_secure())
++ return 1;
++
+ if (cred->uid != 0) {
+ if (bprm->cap_effective)
+ return 1;
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index 3ccf7ac..d73ad64 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
+ extern spinlock_t ima_queue_lock;
+
+ struct ima_h_table {
+- atomic_long_t len; /* number of stored measurements in the list */
+- atomic_long_t violations;
++ atomic_long_unchecked_t len; /* number of stored measurements in the list */
++ atomic_long_unchecked_t violations;
+ struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+ };
+ extern struct ima_h_table ima_htable;
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 88a2788..581ab92 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
+ int result;
+
+ /* can overflow, only indicator */
+- atomic_long_inc(&ima_htable.violations);
++ atomic_long_inc_unchecked(&ima_htable.violations);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
+index c5c5a72..2ad942f 100644
+--- a/security/integrity/ima/ima_audit.c
++++ b/security/integrity/ima/ima_audit.c
+@@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, fname);
+ }
+- if (inode)
+- audit_log_format(ab, " dev=%s ino=%lu",
+- inode->i_sb->s_id, inode->i_ino);
++ if (inode) {
++ audit_log_format(ab, " dev=");
++ audit_log_untrustedstring(ab, inode->i_sb->s_id);
++ audit_log_format(ab, " ino=%lu", inode->i_ino);
++ }
+ audit_log_format(ab, " res=%d", !result ? 0 : 1);
+ audit_log_end(ab);
+ }
+diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
+index e1aa2b4..52027bf 100644
+--- a/security/integrity/ima/ima_fs.c
++++ b/security/integrity/ima/ima_fs.c
+@@ -28,12 +28,12 @@
+ static int valid_policy = 1;
+ #define TMPBUFLEN 12
+ static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+- loff_t *ppos, atomic_long_t *val)
++ loff_t *ppos, atomic_long_unchecked_t *val)
+ {
+ char tmpbuf[TMPBUFLEN];
+ ssize_t len;
+
+- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+ }
+
+diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
+index 55a6271..ad829c3 100644
+--- a/security/integrity/ima/ima_queue.c
++++ b/security/integrity/ima/ima_queue.c
+@@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
+ INIT_LIST_HEAD(&qe->later);
+ list_add_tail_rcu(&qe->later, &ima_measurements);
+
+- atomic_long_inc(&ima_htable.len);
++ atomic_long_inc_unchecked(&ima_htable.len);
+ key = ima_hash_key(entry->digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ return 0;
+diff --git a/security/keys/compat.c b/security/keys/compat.c
+index 1b0b7bf..9476b92 100644
+--- a/security/keys/compat.c
++++ b/security/keys/compat.c
+@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
+ if (ret == 0)
+ goto no_payload_free;
+
+- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
+ err:
+ if (iov != iovstack)
+ kfree(iov);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 4414abd..bb89c73 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -956,7 +956,7 @@ int register_key_type(struct key_type *ktype)
+ }
+
+ /* store the type */
+- list_add(&ktype->link, &key_types_list);
++ pax_list_add((struct list_head *)&ktype->link, &key_types_list);
+ ret = 0;
+
+ out:
+@@ -976,7 +976,7 @@ EXPORT_SYMBOL(register_key_type);
+ void unregister_key_type(struct key_type *ktype)
+ {
+ down_write(&key_types_sem);
+- list_del_init(&ktype->link);
++ pax_list_del_init((struct list_head *)&ktype->link);
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ up_read(&key_types_sem);
+@@ -993,9 +993,9 @@ void __init key_init(void)
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+
+ /* add the special key types */
+- list_add_tail(&key_type_keyring.link, &key_types_list);
+- list_add_tail(&key_type_dead.link, &key_types_list);
+- list_add_tail(&key_type_user.link, &key_types_list);
++ pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
++ pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
++ pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
+
+ /* record the root user tracking */
+ rb_link_node(&root_key_user.node,
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index b70eaa2..35b5b71 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
+ /*
+ * Copy the iovec data from userspace
+ */
+-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
++static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
+ unsigned ioc)
+ {
+ for (; ioc > 0; ioc--) {
+@@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
+ * If successful, 0 will be returned.
+ */
+ long keyctl_instantiate_key_common(key_serial_t id,
+- const struct iovec *payload_iov,
++ const struct iovec __user *payload_iov,
+ unsigned ioc,
+ size_t plen,
+ key_serial_t ringid)
+@@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
+ [0].iov_len = plen
+ };
+
+- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
++ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
+ }
+
+ return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+@@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
+ if (ret == 0)
+ goto no_payload_free;
+
+- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
+ err:
+ if (iov != iovstack)
+ kfree(iov);
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 37a7f3b..86dc19f 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
+ ret = -EFAULT;
+
+ for (loop = 0; loop < klist->nkeys; loop++) {
++ key_serial_t serial;
+ key = klist->keys[loop];
++ serial = key->serial;
+
+ tmp = sizeof(key_serial_t);
+ if (tmp > buflen)
+ tmp = buflen;
+
+- if (copy_to_user(buffer,
+- &key->serial,
+- tmp) != 0)
++ if (copy_to_user(buffer, &serial, tmp))
+ goto error;
+
+ buflen -= tmp;
+diff --git a/security/lsm_audit.c b/security/lsm_audit.c
+index 893af8a..ba9237c 100644
+--- a/security/lsm_audit.c
++++ b/security/lsm_audit.c
+@@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
+ audit_log_d_path(ab, "path=", &a->u.path);
+
+ inode = a->u.path.dentry->d_inode;
+- if (inode)
+- audit_log_format(ab, " dev=%s ino=%lu",
+- inode->i_sb->s_id,
+- inode->i_ino);
++ if (inode) {
++ audit_log_format(ab, " dev=");
++ audit_log_untrustedstring(ab, inode->i_sb->s_id);
++ audit_log_format(ab, " ino=%lu", inode->i_ino);
++ }
+ break;
+ }
+ case LSM_AUDIT_DATA_DENTRY: {
+@@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
+ audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+
+ inode = a->u.dentry->d_inode;
+- if (inode)
+- audit_log_format(ab, " dev=%s ino=%lu",
+- inode->i_sb->s_id,
+- inode->i_ino);
++ if (inode) {
++ audit_log_format(ab, " dev=");
++ audit_log_untrustedstring(ab, inode->i_sb->s_id);
++ audit_log_format(ab, " ino=%lu", inode->i_ino);
++ }
+ break;
+ }
+ case LSM_AUDIT_DATA_INODE: {
+@@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
+ dentry->d_name.name);
+ dput(dentry);
+ }
+- audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
+- inode->i_ino);
++ audit_log_format(ab, " dev=");
++ audit_log_untrustedstring(ab, inode->i_sb->s_id);
++ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ break;
+ }
+ case LSM_AUDIT_DATA_TASK:
+diff --git a/security/min_addr.c b/security/min_addr.c
+index f728728..6457a0c 100644
+--- a/security/min_addr.c
++++ b/security/min_addr.c
+@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
+ */
+ static void update_mmap_min_addr(void)
+ {
++#ifndef SPARC
+ #ifdef CONFIG_LSM_MMAP_MIN_ADDR
+ if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
+ mmap_min_addr = dac_mmap_min_addr;
+@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
+ #else
+ mmap_min_addr = dac_mmap_min_addr;
+ #endif
++#endif
+ }
+
+ /*
+diff --git a/security/security.c b/security/security.c
+index e2f684a..1649b69 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -26,8 +26,8 @@
+ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
+ CONFIG_DEFAULT_SECURITY;
+
+-static struct security_operations *security_ops;
+-static struct security_operations default_security_ops = {
++struct security_operations *security_ops __read_only;
++struct security_operations default_security_ops __read_only = {
+ .name = "default",
+ };
+
+@@ -66,11 +66,6 @@ int __init security_init(void)
+ return 0;
+ }
+
+-void reset_security_ops(void)
+-{
+- security_ops = &default_security_ops;
+-}
+-
+ /* Save user chosen LSM */
+ static int __init choose_lsm(char *str)
+ {
+@@ -162,6 +157,13 @@ int security_capable(struct user_namespace *ns, const struct cred *cred,
+ SECURITY_CAP_AUDIT);
+ }
+
++int security_capable_noaudit(struct user_namespace *ns, const struct cred *cred,
++ int cap)
++{
++ return security_ops->capable(current, cred, ns, cap,
++ SECURITY_CAP_NOAUDIT);
++}
++
+ int security_real_capable(struct task_struct *tsk, struct user_namespace *ns,
+ int cap)
+ {
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index dca1c22..4fa4591 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -59,7 +59,7 @@ struct avc_node {
+ struct avc_cache {
+ struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
+ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
+- atomic_t lru_hint; /* LRU hint for reclaim scan */
++ atomic_unchecked_t lru_hint; /* LRU hint for reclaim scan */
+ atomic_t active_nodes;
+ u32 latest_notif; /* latest revocation notification */
+ };
+@@ -173,7 +173,7 @@ void __init avc_init(void)
+ spin_lock_init(&avc_cache.slots_lock[i]);
+ }
+ atomic_set(&avc_cache.active_nodes, 0);
+- atomic_set(&avc_cache.lru_hint, 0);
++ atomic_set_unchecked(&avc_cache.lru_hint, 0);
+
+ avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
+ 0, SLAB_PANIC, NULL);
+@@ -251,7 +251,7 @@ static inline int avc_reclaim_node(void)
+ spinlock_t *lock;
+
+ for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
+- hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
++ hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+ head = &avc_cache.slots[hvalue];
+ lock = &avc_cache.slots_lock[hvalue];
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 5898f34..f44199b 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -95,8 +95,6 @@
+
+ #define NUM_SEL_MNT_OPTS 5
+
+-extern struct security_operations *security_ops;
+-
+ /* SECMARK reference count */
+ static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+
+@@ -2001,6 +1999,13 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
+ new_tsec->sid = old_tsec->exec_sid;
+ /* Reset exec SID on execve. */
+ new_tsec->exec_sid = 0;
++
++ /*
++ * Minimize confusion: if no_new_privs and a transition is
++ * explicitly requested, then fail the exec.
++ */
++ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)
++ return -EPERM;
+ } else {
+ /* Check for a default transition on this program. */
+ rc = security_transition_sid(old_tsec->sid, isec->sid,
+@@ -2013,7 +2018,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
+ COMMON_AUDIT_DATA_INIT(&ad, PATH);
+ ad.u.path = bprm->file->f_path;
+
+- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
++ if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) ||
++ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS))
+ new_tsec->sid = old_tsec->sid;
+
+ if (new_tsec->sid == old_tsec->sid) {
+@@ -4181,8 +4187,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
+ PEER__RECV, &ad);
+- if (err)
++ if (err) {
+ selinux_netlbl_err(skb, err, 0);
++ return err;
++ }
+ }
+
+ if (secmark_active) {
+@@ -5372,11 +5380,11 @@ static int selinux_setprocattr(struct task_struct *p,
+ /* Check for ptracing, and update the task SID if ok.
+ Otherwise, leave SID unchanged and fail. */
+ ptsid = 0;
+- task_lock(p);
++ rcu_read_lock();
+ tracer = ptrace_parent(p);
+ if (tracer)
+ ptsid = task_sid(tracer);
+- task_unlock(p);
++ rcu_read_unlock();
+
+ if (tracer) {
+ error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+@@ -5508,7 +5516,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
+
+ #endif
+
+-static struct security_operations selinux_ops = {
++static struct security_operations selinux_ops __read_only = {
+ .name = "selinux",
+
+ .ptrace_access_check = selinux_ptrace_access_check,
+@@ -5854,6 +5862,9 @@ static void selinux_nf_ip_exit(void)
+ #ifdef CONFIG_SECURITY_SELINUX_DISABLE
+ static int selinux_disabled;
+
++extern struct security_operations *security_ops;
++extern struct security_operations default_security_ops;
++
+ int selinux_disable(void)
+ {
+ if (ss_initialized) {
+@@ -5871,7 +5882,9 @@ int selinux_disable(void)
+ selinux_disabled = 1;
+ selinux_enabled = 0;
+
+- reset_security_ops();
++ pax_open_kernel();
++ security_ops = &default_security_ops;
++ pax_close_kernel();
+
+ /* Try to destroy the avc node cache */
+ avc_disable();
+diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
+index b43813c..74be837 100644
+--- a/security/selinux/include/xfrm.h
++++ b/security/selinux/include/xfrm.h
+@@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+
+ static inline void selinux_xfrm_notify_policyload(void)
+ {
+- atomic_inc(&flow_cache_genid);
++ atomic_inc_unchecked(&flow_cache_genid);
+ }
+ #else
+ static inline int selinux_xfrm_enabled(void)
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 7db62b4..ee4d949 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+ return 0;
+ }
+
+-struct security_operations smack_ops = {
++struct security_operations smack_ops __read_only = {
+ .name = "smack",
+
+ .ptrace_access_check = smack_ptrace_access_check,
+diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
+index 4b327b6..646c57a2b 100644
+--- a/security/tomoyo/tomoyo.c
++++ b/security/tomoyo/tomoyo.c
+@@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ * tomoyo_security_ops is a "struct security_operations" which is used for
+ * registering TOMOYO.
+ */
+-static struct security_operations tomoyo_security_ops = {
++static struct security_operations tomoyo_security_ops __read_only = {
+ .name = "tomoyo",
+ .cred_alloc_blank = tomoyo_cred_alloc_blank,
+ .cred_prepare = tomoyo_cred_prepare,
+diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
+index 762af68..7103453 100644
+--- a/sound/aoa/codecs/onyx.c
++++ b/sound/aoa/codecs/onyx.c
+@@ -54,7 +54,7 @@ struct onyx {
+ spdif_locked:1,
+ analog_locked:1,
+ original_mute:2;
+- int open_count;
++ local_t open_count;
+ struct codec_info *codec_info;
+
+ /* mutex serializes concurrent access to the device
+@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count++;
++ local_inc(&onyx->open_count);
+ mutex_unlock(&onyx->mutex);
+
+ return 0;
+@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
+ struct onyx *onyx = cii->codec_data;
+
+ mutex_lock(&onyx->mutex);
+- onyx->open_count--;
+- if (!onyx->open_count)
++ if (local_dec_and_test(&onyx->open_count))
+ onyx->spdif_locked = onyx->analog_locked = 0;
+ mutex_unlock(&onyx->mutex);
+
+diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
+index ffd2025..df062c9 100644
+--- a/sound/aoa/codecs/onyx.h
++++ b/sound/aoa/codecs/onyx.h
+@@ -11,6 +11,7 @@
+ #include <linux/i2c.h>
+ #include <asm/pmac_low_i2c.h>
+ #include <asm/prom.h>
++#include <asm/local.h>
+
+ /* PCM3052 register definitions */
+
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 542f69e..fe6e8c3 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
+ if (in_kernel) {
+ mm_segment_t fs;
+ fs = snd_enter_user();
+- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
+ snd_leave_user(fs);
+ } else {
+- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
+ }
+ if (ret != -EPIPE && ret != -ESTRPIPE)
+ break;
+@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
+ if (in_kernel) {
+ mm_segment_t fs;
+ fs = snd_enter_user();
+- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
+ snd_leave_user(fs);
+ } else {
+- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
++ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
+ }
+ if (ret == -EPIPE) {
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
+@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
+ struct snd_pcm_plugin_channel *channels;
+ size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
+ if (!in_kernel) {
+- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
++ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
+ return -EFAULT;
+ buf = runtime->oss.buffer;
+ }
+@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ }
+ } else {
+ tmp = snd_pcm_oss_write2(substream,
+- (const char __force *)buf,
++ (const char __force_kernel *)buf,
+ runtime->oss.period_bytes, 0);
+ if (tmp <= 0)
+ goto err;
+@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_sframes_t frames, frames1;
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+- char __user *final_dst = (char __force __user *)buf;
++ char __user *final_dst = (char __force_user *)buf;
+ if (runtime->oss.plugin_first) {
+ struct snd_pcm_plugin_channel *channels;
+ size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
+@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ xfer += tmp;
+ runtime->oss.buffer_used -= tmp;
+ } else {
+- tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
++ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
+ runtime->oss.period_bytes, 0);
+ if (tmp <= 0)
+ goto err;
+@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ size1);
+ size1 /= runtime->channels; /* frames */
+ fs = snd_enter_user();
+- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
++ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
+ snd_leave_user(fs);
+ }
+ } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 91cdf943..4085161 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
+ int err;
+
+ fs = snd_enter_user();
+- err = snd_pcm_delay(substream, &delay);
++ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
+ snd_leave_user(fs);
+ if (err < 0)
+ return err;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 638600b..2e6b1fd 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -2788,11 +2788,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
+- (void __user *)arg);
++ (void __force_user *)arg);
+ break;
+ case SNDRV_PCM_STREAM_CAPTURE:
+ result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
+- (void __user *)arg);
++ (void __force_user *)arg);
+ break;
+ default:
+ result = -EINVAL;
+diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
+index 5cf8d65..912a79c 100644
+--- a/sound/core/seq/seq_device.c
++++ b/sound/core/seq/seq_device.c
+@@ -64,7 +64,7 @@ struct ops_list {
+ int argsize; /* argument size */
+
+ /* operators */
+- struct snd_seq_dev_ops ops;
++ struct snd_seq_dev_ops *ops;
+
+ /* registred devices */
+ struct list_head dev_list; /* list of devices */
+@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
+
+ mutex_lock(&ops->reg_mutex);
+ /* copy driver operators */
+- ops->ops = *entry;
++ ops->ops = entry;
+ ops->driver |= DRIVER_LOADED;
+ ops->argsize = argsize;
+
+@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
+ dev->name, ops->id, ops->argsize, dev->argsize);
+ return -EINVAL;
+ }
+- if (ops->ops.init_device(dev) >= 0) {
++ if (ops->ops->init_device(dev) >= 0) {
+ dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
+ ops->num_init_devices++;
+ } else {
+@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
+ dev->name, ops->id, ops->argsize, dev->argsize);
+ return -EINVAL;
+ }
+- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
++ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
+ dev->status = SNDRV_SEQ_DEVICE_FREE;
+ dev->driver_data = NULL;
+ ops->num_init_devices--;
+diff --git a/sound/core/sound.c b/sound/core/sound.c
+index 8e17b4d..6819e80 100644
+--- a/sound/core/sound.c
++++ b/sound/core/sound.c
+@@ -87,7 +87,7 @@ static void snd_request_other(int minor)
+ case SNDRV_MINOR_TIMER: str = "snd-timer"; break;
+ default: return;
+ }
+- request_module(str);
++ request_module("%s", str);
+ }
+
+ #endif /* modular kernel */
+diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
+index f24bf9a..1f7b67c 100644
+--- a/sound/drivers/mts64.c
++++ b/sound/drivers/mts64.c
+@@ -29,6 +29,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+
+ #define CARD_NAME "Miditerminal 4140"
+ #define DRIVER_NAME "MTS64"
+@@ -67,7 +68,7 @@ struct mts64 {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ local_t open_count;
+ int current_midi_output_port;
+ int current_midi_input_port;
+ u8 mode[MTS64_NUM_INPUT_PORTS];
+@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
+ {
+ struct mts64 *mts = substream->rmidi->private_data;
+
+- if (mts->open_count == 0) {
++ if (local_read(&mts->open_count) == 0) {
+ /* We don't need a spinlock here, because this is just called
+ if the device has not been opened before.
+ So there aren't any IRQs from the device */
+@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
+
+ msleep(50);
+ }
+- ++(mts->open_count);
++ local_inc(&mts->open_count);
+
+ return 0;
+ }
+@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
+ struct mts64 *mts = substream->rmidi->private_data;
+ unsigned long flags;
+
+- --(mts->open_count);
+- if (mts->open_count == 0) {
++ if (local_dec_return(&mts->open_count) == 0) {
+ /* We need the spinlock_irqsave here because we can still
+ have IRQs at this point */
+ spin_lock_irqsave(&mts->lock, flags);
+@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
+
+ msleep(500);
+
+- } else if (mts->open_count < 0)
+- mts->open_count = 0;
++ } else if (local_read(&mts->open_count) < 0)
++ local_set(&mts->open_count, 0);
+
+ return 0;
+ }
+diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
+index b953fb4..1999c01 100644
+--- a/sound/drivers/opl4/opl4_lib.c
++++ b/sound/drivers/opl4/opl4_lib.c
+@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+ MODULE_DESCRIPTION("OPL4 driver");
+ MODULE_LICENSE("GPL");
+
+-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
++static inline void snd_opl4_wait(struct snd_opl4 *opl4)
+ {
+ int timeout = 10;
+ while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
+diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
+index f664823..590c745 100644
+--- a/sound/drivers/portman2x4.c
++++ b/sound/drivers/portman2x4.c
+@@ -48,6 +48,7 @@
+ #include <sound/initval.h>
+ #include <sound/rawmidi.h>
+ #include <sound/control.h>
++#include <asm/local.h>
+
+ #define CARD_NAME "Portman 2x4"
+ #define DRIVER_NAME "portman"
+@@ -85,7 +86,7 @@ struct portman {
+ struct pardevice *pardev;
+ int pardev_claimed;
+
+- int open_count;
++ local_t open_count;
+ int mode[PORTMAN_NUM_INPUT_PORTS];
+ struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
+ };
+diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
+index 87657dd..a8268d4 100644
+--- a/sound/firewire/amdtp.c
++++ b/sound/firewire/amdtp.c
+@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
+ ptr = s->pcm_buffer_pointer + data_blocks;
+ if (ptr >= pcm->runtime->buffer_size)
+ ptr -= pcm->runtime->buffer_size;
+- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
++ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
+
+ s->pcm_period_pointer += data_blocks;
+ if (s->pcm_period_pointer >= pcm->runtime->period_size) {
+@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
+ */
+ void amdtp_out_stream_update(struct amdtp_out_stream *s)
+ {
+- ACCESS_ONCE(s->source_node_id_field) =
++ ACCESS_ONCE_RW(s->source_node_id_field) =
+ (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
+ }
+ EXPORT_SYMBOL(amdtp_out_stream_update);
+diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
+index 537a9cb..8e8c8e9 100644
+--- a/sound/firewire/amdtp.h
++++ b/sound/firewire/amdtp.h
+@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
+ static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
+ struct snd_pcm_substream *pcm)
+ {
+- ACCESS_ONCE(s->pcm) = pcm;
++ ACCESS_ONCE_RW(s->pcm) = pcm;
+ }
+
+ /**
+diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
+index cd094ec..eca1277 100644
+--- a/sound/firewire/isight.c
++++ b/sound/firewire/isight.c
+@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
+ ptr += count;
+ if (ptr >= runtime->buffer_size)
+ ptr -= runtime->buffer_size;
+- ACCESS_ONCE(isight->buffer_pointer) = ptr;
++ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
+
+ isight->period_counter += count;
+ if (isight->period_counter >= runtime->period_size) {
+@@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
+ if (err < 0)
+ return err;
+
+- ACCESS_ONCE(isight->pcm_active) = true;
++ ACCESS_ONCE_RW(isight->pcm_active) = true;
+
+ return 0;
+ }
+@@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
+ {
+ struct isight *isight = substream->private_data;
+
+- ACCESS_ONCE(isight->pcm_active) = false;
++ ACCESS_ONCE_RW(isight->pcm_active) = false;
+
+ mutex_lock(&isight->mutex);
+ isight_stop_streaming(isight);
+@@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+- ACCESS_ONCE(isight->pcm_running) = true;
++ ACCESS_ONCE_RW(isight->pcm_running) = true;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+- ACCESS_ONCE(isight->pcm_running) = false;
++ ACCESS_ONCE_RW(isight->pcm_running) = false;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
+index 733b014..56ce96f 100644
+--- a/sound/oss/sb_audio.c
++++ b/sound/oss/sb_audio.c
+@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
+ buf16 = (signed short *)(localbuf + localoffs);
+ while (c)
+ {
+- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
+ if (copy_from_user(lbuf8,
+ userbuf+useroffs + p,
+ locallen))
+diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
+index 09d4648..cf234c7 100644
+--- a/sound/oss/swarm_cs4297a.c
++++ b/sound/oss/swarm_cs4297a.c
+@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
+ {
+ struct cs4297a_state *s;
+ u32 pwr, id;
+- mm_segment_t fs;
+ int rval;
+ #ifndef CONFIG_BCM_CS4297A_CSWARM
+ u64 cfg;
+@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
+ if (!rval) {
+ char *sb1250_duart_present;
+
++#if 0
++ mm_segment_t fs;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+-#if 0
+ val = SOUND_MASK_LINE;
+ mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
+ for (i = 0; i < ARRAY_SIZE(initvol); i++) {
+ val = initvol[i].vol;
+ mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
+ }
++ set_fs(fs);
+ // cs4297a_write_ac97(s, 0x18, 0x0808);
+ #else
+ // cs4297a_write_ac97(s, 0x5e, 0x180);
+ cs4297a_write_ac97(s, 0x02, 0x0808);
+ cs4297a_write_ac97(s, 0x18, 0x0808);
+ #endif
+- set_fs(fs);
+
+ list_add(&s->list, &cs4297a_devs);
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index faabaa5..9888f8b 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -850,14 +850,10 @@ find_codec_preset(struct hda_codec *codec)
+ mutex_unlock(&preset_mutex);
+
+ if (mod_requested < HDA_MODREQ_MAX_COUNT) {
+- char name[32];
+ if (!mod_requested)
+- snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
+- codec->vendor_id);
++ request_module("snd-hda-codec-id:%08x", codec->vendor_id);
+ else
+- snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
+- (codec->vendor_id >> 16) & 0xffff);
+- request_module(name);
++ request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff);
+ mod_requested++;
+ goto again;
+ }
+diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
+index 03ee4e3..be86b46 100644
+--- a/sound/pci/ymfpci/ymfpci_main.c
++++ b/sound/pci/ymfpci/ymfpci_main.c
+@@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
+ if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
+ break;
+ }
+- if (atomic_read(&chip->interrupt_sleep_count)) {
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ wake_up(&chip->interrupt_sleep);
+ }
+ __end:
+@@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
+ continue;
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&chip->interrupt_sleep, &wait);
+- atomic_inc(&chip->interrupt_sleep_count);
++ atomic_inc_unchecked(&chip->interrupt_sleep_count);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(50));
+ remove_wait_queue(&chip->interrupt_sleep, &wait);
+ }
+@@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
+ snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
+ spin_unlock(&chip->reg_lock);
+
+- if (atomic_read(&chip->interrupt_sleep_count)) {
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ wake_up(&chip->interrupt_sleep);
+ }
+ }
+@@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
+ spin_lock_init(&chip->reg_lock);
+ spin_lock_init(&chip->voice_lock);
+ init_waitqueue_head(&chip->interrupt_sleep);
+- atomic_set(&chip->interrupt_sleep_count, 0);
++ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
+ chip->card = card;
+ chip->pci = pci;
+ chip->irq = -1;
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 83c4bd5..f75658c 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -608,7 +608,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
+ {
+ struct fsl_ssi_private *ssi_private;
+ int ret = 0;
+- struct device_attribute *dev_attr = NULL;
++ device_attribute_no_const *dev_attr = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ const char *p, *sprop;
+ const uint32_t *iprop;
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index ee15337..ab0ec34 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -627,13 +627,15 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
+ rtd->pcm = pcm;
+ pcm->private_data = rtd;
+ if (platform->driver->ops) {
+- soc_pcm_ops.mmap = platform->driver->ops->mmap;
+- soc_pcm_ops.pointer = platform->driver->ops->pointer;
+- soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
+- soc_pcm_ops.copy = platform->driver->ops->copy;
+- soc_pcm_ops.silence = platform->driver->ops->silence;
+- soc_pcm_ops.ack = platform->driver->ops->ack;
+- soc_pcm_ops.page = platform->driver->ops->page;
++ pax_open_kernel();
++ *(void **)&soc_pcm_ops.mmap = platform->driver->ops->mmap;
++ *(void **)&soc_pcm_ops.pointer = platform->driver->ops->pointer;
++ *(void **)&soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
++ *(void **)&soc_pcm_ops.copy = platform->driver->ops->copy;
++ *(void **)&soc_pcm_ops.silence = platform->driver->ops->silence;
++ *(void **)&soc_pcm_ops.ack = platform->driver->ops->ack;
++ *(void **)&soc_pcm_ops.page = platform->driver->ops->page;
++ pax_close_kernel();
+ }
+
+ if (playback)
+diff --git a/sound/sound_core.c b/sound/sound_core.c
+index 6ce2778..f25c378 100644
+--- a/sound/sound_core.c
++++ b/sound/sound_core.c
+@@ -293,7 +293,7 @@ retry:
+ }
+
+ device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
+- NULL, s->name+6);
++ NULL, "%s", s->name+6);
+ return s->unit_minor;
+
+ fail:
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 0a7ca6c..f4b948c 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -45,6 +45,7 @@ struct snd_urb_ops {
+ int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
+ };
++typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
+
+ struct snd_usb_substream {
+ struct snd_usb_stream *stream;
+@@ -96,7 +97,7 @@ struct snd_usb_substream {
+ struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
+ spinlock_t lock;
+
+- struct snd_urb_ops ops; /* callbacks (must be filled at init) */
++ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
+ int last_frame_number; /* stored frame number */
+ int last_delay; /* stored delay */
+ };
+diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
+new file mode 100644
+index 0000000..50f2f2f
+--- /dev/null
++++ b/tools/gcc/.gitignore
+@@ -0,0 +1 @@
++size_overflow_hash.h
+diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
+new file mode 100644
+index 0000000..144dbee
+--- /dev/null
++++ b/tools/gcc/Makefile
+@@ -0,0 +1,45 @@
++#CC := gcc
++#PLUGIN_SOURCE_FILES := pax_plugin.c
++#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
++GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
++#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
++
++ifeq ($(PLUGINCC),$(HOSTCC))
++HOSTLIBS := hostlibs
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
++else
++HOSTLIBS := hostcxxlibs
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -ggdb -Wno-unused-parameter
++endif
++
++$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
++$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
++$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
++$(HOSTLIBS)-y += colorize_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
++$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
++
++always := $($(HOSTLIBS)-y)
++
++constify_plugin-objs := constify_plugin.o
++stackleak_plugin-objs := stackleak_plugin.o
++kallocstat_plugin-objs := kallocstat_plugin.o
++kernexec_plugin-objs := kernexec_plugin.o
++checker_plugin-objs := checker_plugin.o
++colorize_plugin-objs := colorize_plugin.o
++size_overflow_plugin-objs := size_overflow_plugin.o
++latent_entropy_plugin-objs := latent_entropy_plugin.o
++structleak_plugin-objs := structleak_plugin.o
++
++$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
++
++quiet_cmd_build_size_overflow_hash = GENHASH $@
++ cmd_build_size_overflow_hash = \
++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
++$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
++ $(call if_changed,build_size_overflow_hash)
++
++targets += size_overflow_hash.h
+diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
+new file mode 100644
+index 0000000..22f03c0
+--- /dev/null
++++ b/tools/gcc/checker_plugin.c
+@@ -0,0 +1,172 @@
++/*
++ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to implement various sparse (source code checker) features
++ *
++ * TODO:
++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "target.h"
++
++extern void c_register_addr_space (const char *str, addr_space_t as);
++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
++extern enum machine_mode default_addr_space_address_mode (addr_space_t);
++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern rtx emit_move_insn(rtx x, rtx y);
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info checker_plugin_info = {
++ .version = "201111150100",
++ .help = NULL,
++};
++
++#define ADDR_SPACE_KERNEL 0
++#define ADDR_SPACE_FORCE_KERNEL 1
++#define ADDR_SPACE_USER 2
++#define ADDR_SPACE_FORCE_USER 3
++#define ADDR_SPACE_IOMEM 0
++#define ADDR_SPACE_FORCE_IOMEM 0
++#define ADDR_SPACE_PERCPU 0
++#define ADDR_SPACE_FORCE_PERCPU 0
++#define ADDR_SPACE_RCU 0
++#define ADDR_SPACE_FORCE_RCU 0
++
++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
++{
++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
++}
++
++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
++{
++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
++}
++
++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
++{
++ return default_addr_space_valid_pointer_mode(mode, as);
++}
++
++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
++{
++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
++}
++
++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
++{
++ return default_addr_space_legitimize_address(x, oldx, mode, as);
++}
++
++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
++{
++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
++ return true;
++
++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
++ return true;
++
++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
++ return true;
++
++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
++ return true;
++
++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
++ return true;
++
++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
++ return true;
++
++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
++ return true;
++
++ return subset == superset;
++}
++
++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
++{
++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
++
++ return op;
++}
++
++static void register_checker_address_spaces(void *event_data, void *data)
++{
++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
++ c_register_addr_space("__user", ADDR_SPACE_USER);
++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
++// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
++
++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
++ targetm.addr_space.address_mode = checker_addr_space_address_mode;
++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
++ targetm.addr_space.subset_p = checker_addr_space_subset_p;
++ targetm.addr_space.convert = checker_addr_space_convert;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
++
++ for (i = 0; i < argc; ++i)
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++
++ if (TARGET_64BIT == 0)
++ return 0;
++
++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
++
++ return 0;
++}
+diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
+new file mode 100644
+index 0000000..414fe5e
+--- /dev/null
++++ b/tools/gcc/colorize_plugin.c
+@@ -0,0 +1,151 @@
++/*
++ * Copyright 2012-2013 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to colorize diagnostic output
++ *
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info colorize_plugin_info = {
++ .version = "201302112000",
++ .help = NULL,
++};
++
++#define GREEN "\033[32m\033[2m"
++#define LIGHTGREEN "\033[32m\033[1m"
++#define YELLOW "\033[33m\033[2m"
++#define LIGHTYELLOW "\033[33m\033[1m"
++#define RED "\033[31m\033[2m"
++#define LIGHTRED "\033[31m\033[1m"
++#define BLUE "\033[34m\033[2m"
++#define LIGHTBLUE "\033[34m\033[1m"
++#define BRIGHT "\033[m\033[1m"
++#define NORMAL "\033[m"
++
++static diagnostic_starter_fn old_starter;
++static diagnostic_finalizer_fn old_finalizer;
++
++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++ const char *color;
++ char *newprefix;
++
++ switch (diagnostic->kind) {
++ case DK_NOTE:
++ color = LIGHTBLUE;
++ break;
++
++ case DK_PEDWARN:
++ case DK_WARNING:
++ color = LIGHTYELLOW;
++ break;
++
++ case DK_ERROR:
++ case DK_FATAL:
++ case DK_ICE:
++ case DK_PERMERROR:
++ case DK_SORRY:
++ color = LIGHTRED;
++ break;
++
++ default:
++ color = NORMAL;
++ }
++
++ old_starter(context, diagnostic);
++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
++ return;
++ pp_destroy_prefix(context->printer);
++ pp_set_prefix(context->printer, newprefix);
++}
++
++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
++{
++ old_finalizer(context, diagnostic);
++}
++
++static void colorize_arm(void)
++{
++ old_starter = diagnostic_starter(global_dc);
++ old_finalizer = diagnostic_finalizer(global_dc);
++
++ diagnostic_starter(global_dc) = start_colorize;
++ diagnostic_finalizer(global_dc) = finalize_colorize;
++}
++
++static unsigned int execute_colorize_rearm(void)
++{
++ if (diagnostic_starter(global_dc) == start_colorize)
++ return 0;
++
++ colorize_arm();
++ return 0;
++}
++
++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
++ .pass = {
++ .type = SIMPLE_IPA_PASS,
++ .name = "colorize_rearm",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = execute_colorize_rearm,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static void colorize_start_unit(void *gcc_data, void *user_data)
++{
++ colorize_arm();
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info colorize_rearm_pass_info = {
++ .pass = &pass_ipa_colorize_rearm.pass,
++ .reference_pass_name = "*free_lang_data",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
++ return 0;
++}
+diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
+new file mode 100644
+index 0000000..c17312d
+--- /dev/null
++++ b/tools/gcc/constify_plugin.c
+@@ -0,0 +1,560 @@
++/*
++ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2013 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/const_plugin/
++ *
++ * Usage:
++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
++ * $ gcc -fplugin=constify_plugin.so test.c -O2
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "target.h"
++#include "langhooks.h"
++
++// should come from c-tree.h if only it were installed for gcc 4.5...
++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
++
++// unused type flag in all versions 4.5-4.8
++#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info const_plugin_info = {
++ .version = "201305231310",
++ .help = "no-constify\tturn off constification\n",
++};
++
++typedef struct {
++ bool has_fptr_field;
++ bool has_writable_field;
++ bool has_do_const_field;
++ bool has_no_const_field;
++} constify_info;
++
++static const_tree get_field_type(const_tree field)
++{
++ return strip_array_types(TREE_TYPE(field));
++}
++
++static bool is_fptr(const_tree field)
++{
++ const_tree ptr = get_field_type(field);
++
++ if (TREE_CODE(ptr) != POINTER_TYPE)
++ return false;
++
++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
++}
++
++/*
++ * determine whether the given structure type meets the requirements for automatic constification,
++ * including the constification attributes on nested structure types
++ */
++static void constifiable(const_tree node, constify_info *cinfo)
++{
++ const_tree field;
++
++ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
++
++ // e.g., pointer to structure fields while still constructing the structure type
++ if (TYPE_FIELDS(node) == NULL_TREE)
++ return;
++
++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
++ const_tree type = get_field_type(field);
++ enum tree_code code = TREE_CODE(type);
++
++ if (node == type)
++ continue;
++
++ if (is_fptr(field))
++ cinfo->has_fptr_field = true;
++ else if (!TREE_READONLY(field))
++ cinfo->has_writable_field = true;
++
++ if (code == RECORD_TYPE || code == UNION_TYPE) {
++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++ cinfo->has_do_const_field = true;
++ else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
++ cinfo->has_no_const_field = true;
++ else
++ constifiable(type, cinfo);
++ }
++ }
++}
++
++static bool constified(const_tree node)
++{
++ constify_info cinfo = {
++ .has_fptr_field = false,
++ .has_writable_field = false,
++ .has_do_const_field = false,
++ .has_no_const_field = false
++ };
++
++ gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
++
++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
++ gcc_assert(!TYPE_READONLY(node));
++ return false;
++ }
++
++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
++ gcc_assert(TYPE_READONLY(node));
++ return true;
++ }
++
++ constifiable(node, &cinfo);
++ if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
++ return false;
++
++ return TYPE_READONLY(node);
++}
++
++static void deconstify_tree(tree node);
++
++static void deconstify_type(tree type)
++{
++ tree field;
++
++ gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
++
++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
++ const_tree fieldtype = get_field_type(field);
++
++ // special case handling of simple ptr-to-same-array-type members
++ if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
++ const_tree ptrtype = TREE_TYPE(TREE_TYPE(field));
++
++ if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
++ continue;
++ if (TREE_TYPE(TREE_TYPE(field)) == type)
++ continue;
++ if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
++ TREE_TYPE(field) = copy_node(TREE_TYPE(field));
++ TREE_TYPE(TREE_TYPE(field)) = type;
++ }
++ continue;
++ }
++ if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
++ continue;
++ if (!constified(fieldtype))
++ continue;
++
++ deconstify_tree(field);
++ TREE_READONLY(field) = 0;
++ }
++ TYPE_READONLY(type) = 0;
++ C_TYPE_FIELDS_READONLY(type) = 0;
++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++ TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
++}
++
++static void deconstify_tree(tree node)
++{
++ tree old_type, new_type, field;
++
++ old_type = TREE_TYPE(node);
++ while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
++ node = TREE_TYPE(node) = copy_node(old_type);
++ old_type = TREE_TYPE(old_type);
++ }
++
++ gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
++ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
++
++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
++ DECL_FIELD_CONTEXT(field) = new_type;
++
++ deconstify_type(new_type);
++
++ TREE_TYPE(node) = new_type;
++}
++
++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ tree type;
++ constify_info cinfo = {
++ .has_fptr_field = false,
++ .has_writable_field = false,
++ .has_do_const_field = false,
++ .has_no_const_field = false
++ };
++
++ *no_add_attrs = true;
++ if (TREE_CODE(*node) == FUNCTION_DECL) {
++ error("%qE attribute does not apply to functions", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) == PARM_DECL) {
++ error("%qE attribute does not apply to function parameters", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) == VAR_DECL) {
++ error("%qE attribute does not apply to variables", name);
++ return NULL_TREE;
++ }
++
++ if (TYPE_P(*node)) {
++ *no_add_attrs = false;
++ type = *node;
++ } else {
++ gcc_assert(TREE_CODE(*node) == TYPE_DECL);
++ type = TREE_TYPE(*node);
++ }
++
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
++ error("%qE attribute applies to struct and union types only", name);
++ return NULL_TREE;
++ }
++
++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
++ error("%qE attribute is already applied to the type", name);
++ return NULL_TREE;
++ }
++
++ if (TYPE_P(*node)) {
++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++ error("%qE attribute is incompatible with 'do_const'", name);
++ return NULL_TREE;
++ }
++
++ constifiable(type, &cinfo);
++ if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
++ deconstify_tree(*node);
++ TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
++ return NULL_TREE;
++ }
++
++ error("%qE attribute used on type that is not constified", name);
++ return NULL_TREE;
++}
++
++static void constify_type(tree type)
++{
++ TYPE_READONLY(type) = 1;
++ C_TYPE_FIELDS_READONLY(type) = 1;
++ TYPE_CONSTIFY_VISITED(type) = 1;
++// TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
++}
++
++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ *no_add_attrs = true;
++ if (!TYPE_P(*node)) {
++ error("%qE attribute applies to types only", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
++ error("%qE attribute applies to struct and union types only", name);
++ return NULL_TREE;
++ }
++
++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
++ error("%qE attribute is already applied to the type", name);
++ return NULL_TREE;
++ }
++
++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
++ error("%qE attribute is incompatible with 'no_const'", name);
++ return NULL_TREE;
++ }
++
++ *no_add_attrs = false;
++ return NULL_TREE;
++}
++
++static struct attribute_spec no_const_attr = {
++ .name = "no_const",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = false,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_no_const_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = true
++#endif
++};
++
++static struct attribute_spec do_const_attr = {
++ .name = "do_const",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = false,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_do_const_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = true
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&no_const_attr);
++ register_attribute(&do_const_attr);
++}
++
++static void finish_type(void *event_data, void *data)
++{
++ tree type = (tree)event_data;
++ constify_info cinfo = {
++ .has_fptr_field = false,
++ .has_writable_field = false,
++ .has_do_const_field = false,
++ .has_no_const_field = false
++ };
++
++ if (type == NULL_TREE || type == error_mark_node)
++ return;
++
++ if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
++ return;
++
++ constifiable(type, &cinfo);
++
++ if (TYPE_READONLY(type) && C_TYPE_FIELDS_READONLY(type)) {
++ if (!lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
++ return;
++ if (cinfo.has_writable_field)
++ return;
++ error("'do_const' attribute used on type that is%sconstified", cinfo.has_fptr_field ? " " : " not ");
++ return;
++ }
++
++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
++ if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) {
++ deconstify_type(type);
++ TYPE_CONSTIFY_VISITED(type) = 1;
++ } else
++ error("'no_const' attribute used on type that is not constified");
++ return;
++ }
++
++ if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
++ constify_type(type);
++ return;
++ }
++
++ if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
++ constify_type(type);
++ return;
++ }
++
++ deconstify_type(type);
++ TYPE_CONSTIFY_VISITED(type) = 1;
++}
++
++static void check_global_variables(void)
++{
++ struct varpool_node *node;
++
++#if BUILDING_GCC_VERSION <= 4007
++ for (node = varpool_nodes; node; node = node->next) {
++ tree var = node->decl;
++#else
++ FOR_EACH_VARIABLE(node) {
++ tree var = node->symbol.decl;
++#endif
++ tree type = TREE_TYPE(var);
++
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++ continue;
++
++ if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
++ continue;
++
++ if (!TYPE_CONSTIFY_VISITED(type))
++ continue;
++
++ if (DECL_EXTERNAL(var))
++ continue;
++
++ if (DECL_INITIAL(var))
++ continue;
++
++ // this works around a gcc bug/feature where uninitialized globals
++ // are moved into the .bss section regardless of any constification
++ DECL_INITIAL(var) = build_constructor(type, NULL);
++// inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata", var);
++ }
++}
++
++static unsigned int check_local_variables(void)
++{
++ unsigned int ret = 0;
++ tree var;
++
++#if BUILDING_GCC_VERSION == 4005
++ tree vars;
++#else
++ unsigned int i;
++#endif
++
++#if BUILDING_GCC_VERSION == 4005
++ for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
++ var = TREE_VALUE(vars);
++#else
++ FOR_EACH_LOCAL_DECL(cfun, i, var) {
++#endif
++ tree type = TREE_TYPE(var);
++
++ gcc_assert(DECL_P(var));
++ if (is_global_var(var))
++ continue;
++
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++ continue;
++
++ if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
++ continue;
++
++ if (!TYPE_CONSTIFY_VISITED(type))
++ continue;
++
++ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
++ ret = 1;
++ }
++ return ret;
++}
++
++static unsigned int check_variables(void)
++{
++ check_global_variables();
++ return check_local_variables();
++}
++
++ unsigned int ret = 0;
++static struct gimple_opt_pass pass_local_variable = {
++ {
++ .type = GIMPLE_PASS,
++ .name = "check_variables",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = check_variables,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static struct {
++ const char *name;
++ const char *asm_op;
++} sections[] = {
++ {".init.rodata", "\t.section\t.init.rodata,\"a\""},
++ {".ref.rodata", "\t.section\t.ref.rodata,\"a\""},
++ {".devinit.rodata", "\t.section\t.devinit.rodata,\"a\""},
++ {".devexit.rodata", "\t.section\t.devexit.rodata,\"a\""},
++ {".cpuinit.rodata", "\t.section\t.cpuinit.rodata,\"a\""},
++ {".cpuexit.rodata", "\t.section\t.cpuexit.rodata,\"a\""},
++ {".meminit.rodata", "\t.section\t.meminit.rodata,\"a\""},
++ {".memexit.rodata", "\t.section\t.memexit.rodata,\"a\""},
++ {".data..read_only", "\t.section\t.data..read_only,\"a\""},
++};
++
++static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
++
++static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
++{
++ size_t i;
++
++ for (i = 0; i < ARRAY_SIZE(sections); i++)
++ if (!strcmp(sections[i].name, name))
++ return 0;
++ return old_section_type_flags(decl, name, reloc);
++}
++
++static void constify_start_unit(void *gcc_data, void *user_data)
++{
++// size_t i;
++
++// for (i = 0; i < ARRAY_SIZE(sections); i++)
++// sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
++// sections[i].section = get_section(sections[i].name, 0, NULL);
++
++ old_section_type_flags = targetm.section_type_flags;
++ targetm.section_type_flags = constify_section_type_flags;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ bool constify = true;
++
++ struct register_pass_info local_variable_pass_info = {
++ .pass = &pass_local_variable.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ for (i = 0; i < argc; ++i) {
++ if (!(strcmp(argv[i].key, "no-constify"))) {
++ constify = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ if (strcmp(lang_hooks.name, "GNU C")) {
++ inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
++ constify = false;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
++ if (constify) {
++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
++ register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
++ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
+diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
+new file mode 100644
+index 0000000..e518932
+--- /dev/null
++++ b/tools/gcc/generate_size_overflow_hash.sh
+@@ -0,0 +1,94 @@
++#!/bin/bash
++
++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
++
++header1="size_overflow_hash.h"
++database="size_overflow_hash.data"
++n=65536
++
++usage() {
++cat <<EOF
++usage: $0 options
++OPTIONS:
++ -h|--help help
++ -o header file
++ -d database file
++ -n hash array size
++EOF
++ return 0
++}
++
++while true
++do
++ case "$1" in
++ -h|--help) usage && exit 0;;
++ -n) n=$2; shift 2;;
++ -o) header1="$2"; shift 2;;
++ -d) database="$2"; shift 2;;
++ --) shift 1; break ;;
++ *) break ;;
++ esac
++done
++
++create_defines() {
++ for i in `seq 0 31`
++ do
++ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
++ done
++ echo >> "$header1"
++}
++
++create_structs() {
++ rm -f "$header1"
++
++ create_defines
++
++ cat "$database" | while read data
++ do
++ data_array=($data)
++ struct_hash_name="${data_array[0]}"
++ funcn="${data_array[1]}"
++ params="${data_array[2]}"
++ next="${data_array[4]}"
++
++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
++
++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
++ echo -en "\t.param\t= " >> "$header1"
++ line=
++ for param_num in ${params//-/ };
++ do
++ line="${line}PARAM"$param_num"|"
++ done
++
++ echo -e "${line%?},\n};\n" >> "$header1"
++ done
++}
++
++create_headers() {
++ echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
++}
++
++create_array_elements() {
++ index=0
++ grep -v "nohasharray" $database | sort -n -k 4 | while read data
++ do
++ data_array=($data)
++ i="${data_array[3]}"
++ hash="${data_array[0]}"
++ while [[ $index -lt $i ]]
++ do
++ echo -e "\t["$index"]\t= NULL," >> "$header1"
++ index=$(($index + 1))
++ done
++ index=$(($index + 1))
++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
++ done
++ echo '};' >> $header1
++}
++
++create_structs
++create_headers
++create_array_elements
++
++exit 0
+diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
+new file mode 100644
+index 0000000..568b360
+--- /dev/null
++++ b/tools/gcc/kallocstat_plugin.c
+@@ -0,0 +1,170 @@
++/*
++ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to find the distribution of k*alloc sizes
++ *
++ * TODO:
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++
++int plugin_is_GPL_compatible;
++
++static const char * const kalloc_functions[] = {
++ "__kmalloc",
++ "kmalloc",
++ "kmalloc_large",
++ "kmalloc_node",
++ "kmalloc_order",
++ "kmalloc_order_trace",
++ "kmalloc_slab",
++ "kzalloc",
++ "kzalloc_node",
++};
++
++static struct plugin_info kallocstat_plugin_info = {
++ .version = "201302112000",
++};
++
++static unsigned int execute_kallocstat(void);
++
++static struct gimple_opt_pass kallocstat_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "kallocstat",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = execute_kallocstat,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0
++ }
++};
++
++static bool is_kalloc(const char *fnname)
++{
++ size_t i;
++
++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
++ if (!strcmp(fnname, kalloc_functions[i]))
++ return true;
++ return false;
++}
++
++static unsigned int execute_kallocstat(void)
++{
++ basic_block bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match:
++ tree fndecl, size;
++ gimple call_stmt;
++ const char *fnname;
++
++ // is it a call
++ call_stmt = gsi_stmt(gsi);
++ if (!is_gimple_call(call_stmt))
++ continue;
++ fndecl = gimple_call_fndecl(call_stmt);
++ if (fndecl == NULL_TREE)
++ continue;
++ if (TREE_CODE(fndecl) != FUNCTION_DECL)
++ continue;
++
++ // is it a call to k*alloc
++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
++ if (!is_kalloc(fnname))
++ continue;
++
++ // is the size arg the result of a simple const assignment
++ size = gimple_call_arg(call_stmt, 0);
++ while (true) {
++ gimple def_stmt;
++ expanded_location xloc;
++ size_t size_val;
++
++ if (TREE_CODE(size) != SSA_NAME)
++ break;
++ def_stmt = SSA_NAME_DEF_STMT(size);
++ if (!def_stmt || !is_gimple_assign(def_stmt))
++ break;
++ if (gimple_num_ops(def_stmt) != 2)
++ break;
++ size = gimple_assign_rhs1(def_stmt);
++ if (!TREE_CONSTANT(size))
++ continue;
++ xloc = expand_location(gimple_location(def_stmt));
++ if (!xloc.file)
++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++ size_val = TREE_INT_CST_LOW(size);
++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
++ break;
++ }
++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
++//debug_tree(gimple_call_fn(call_stmt));
++//print_node(stderr, "pax", fndecl, 4);
++ }
++ }
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info kallocstat_pass_info = {
++ .pass = &kallocstat_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
++
++ return 0;
++}
+diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
+new file mode 100644
+index 0000000..a25306b
+--- /dev/null
++++ b/tools/gcc/kernexec_plugin.c
+@@ -0,0 +1,474 @@
++/*
++ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
++ *
++ * TODO:
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++extern rtx emit_move_insn(rtx x, rtx y);
++
++#if BUILDING_GCC_VERSION <= 4006
++#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
++#endif
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++int plugin_is_GPL_compatible;
++
++static struct plugin_info kernexec_plugin_info = {
++ .version = "201308230150",
++ .help = "method=[bts|or]\tinstrumentation method\n"
++};
++
++static unsigned int execute_kernexec_reload(void);
++static unsigned int execute_kernexec_fptr(void);
++static unsigned int execute_kernexec_retaddr(void);
++static bool kernexec_cmodel_check(void);
++
++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
++static void (*kernexec_instrument_retaddr)(rtx);
++
++static struct gimple_opt_pass kernexec_reload_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "kernexec_reload",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = kernexec_cmodel_check,
++ .execute = execute_kernexec_reload,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
++ }
++};
++
++static struct gimple_opt_pass kernexec_fptr_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "kernexec_fptr",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = kernexec_cmodel_check,
++ .execute = execute_kernexec_fptr,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
++ }
++};
++
++static struct rtl_opt_pass kernexec_retaddr_pass = {
++ .pass = {
++ .type = RTL_PASS,
++ .name = "kernexec_retaddr",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = kernexec_cmodel_check,
++ .execute = execute_kernexec_retaddr,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
++ }
++};
++
++static bool kernexec_cmodel_check(void)
++{
++ tree section;
++
++ if (ix86_cmodel != CM_KERNEL)
++ return false;
++
++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
++ if (!section || !TREE_VALUE(section))
++ return true;
++
++ section = TREE_VALUE(TREE_VALUE(section));
++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
++ return true;
++
++ return false;
++}
++
++/*
++ * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
++ */
++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
++{
++ gimple asm_movabs_stmt;
++
++ // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL);
++ gimple_asm_set_volatile(asm_movabs_stmt, true);
++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
++ update_stmt(asm_movabs_stmt);
++}
++
++/*
++ * find all asm() stmts that clobber r12 and add a reload of r12
++ */
++static unsigned int execute_kernexec_reload(void)
++{
++ basic_block bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match: __asm__ ("" : : : "r12");
++ gimple asm_stmt;
++ size_t nclobbers;
++
++ // is it an asm ...
++ asm_stmt = gsi_stmt(gsi);
++ if (gimple_code(asm_stmt) != GIMPLE_ASM)
++ continue;
++
++ // ... clobbering r12
++ nclobbers = gimple_asm_nclobbers(asm_stmt);
++ while (nclobbers--) {
++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
++ continue;
++ kernexec_reload_fptr_mask(&gsi);
++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
++ break;
++ }
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
++ */
++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
++{
++ gimple assign_intptr, assign_new_fptr, call_stmt;
++ tree intptr, orptr, old_fptr, new_fptr, kernexec_mask;
++
++ call_stmt = gsi_stmt(*gsi);
++ old_fptr = gimple_call_fn(call_stmt);
++
++ // create temporary unsigned long variable used for bitops and cast fptr to it
++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(intptr);
++#endif
++ intptr = make_ssa_name(intptr, NULL);
++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
++ SSA_NAME_DEF_STMT(intptr) = assign_intptr;
++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
++ update_stmt(assign_intptr);
++
++ // apply logical or to temporary unsigned long and bitmask
++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
++ orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask);
++ intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL);
++ assign_intptr = gimple_build_assign(intptr, orptr);
++ SSA_NAME_DEF_STMT(intptr) = assign_intptr;
++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
++ update_stmt(assign_intptr);
++
++ // cast temporary unsigned long back to a temporary fptr variable
++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(new_fptr);
++#endif
++ new_fptr = make_ssa_name(new_fptr, NULL);
++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
++ SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr;
++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
++ update_stmt(assign_new_fptr);
++
++ // replace call stmt fn with the new fptr
++ gimple_call_set_fn(call_stmt, new_fptr);
++ update_stmt(call_stmt);
++}
++
++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
++{
++ gimple asm_or_stmt, call_stmt;
++ tree old_fptr, new_fptr, input, output;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *inputs = NULL;
++ VEC(tree, gc) *outputs = NULL;
++#else
++ vec<tree, va_gc> *inputs = NULL;
++ vec<tree, va_gc> *outputs = NULL;
++#endif
++
++ call_stmt = gsi_stmt(*gsi);
++ old_fptr = gimple_call_fn(call_stmt);
++
++ // create temporary fptr variable
++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(new_fptr);
++#endif
++ new_fptr = make_ssa_name(new_fptr, NULL);
++
++ // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
++ input = build_tree_list(NULL_TREE, build_string(1, "0"));
++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
++ output = build_tree_list(NULL_TREE, build_string(2, "=r"));
++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, gc, inputs, input);
++ VEC_safe_push(tree, gc, outputs, output);
++#else
++ vec_safe_push(inputs, input);
++ vec_safe_push(outputs, output);
++#endif
++ asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
++ SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
++ gimple_asm_set_volatile(asm_or_stmt, true);
++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
++ update_stmt(asm_or_stmt);
++
++ // replace call stmt fn with the new fptr
++ gimple_call_set_fn(call_stmt, new_fptr);
++ update_stmt(call_stmt);
++}
++
++/*
++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
++ */
++static unsigned int execute_kernexec_fptr(void)
++{
++ basic_block bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
++ tree fn;
++ gimple call_stmt;
++
++ // is it a call ...
++ call_stmt = gsi_stmt(gsi);
++ if (!is_gimple_call(call_stmt))
++ continue;
++ fn = gimple_call_fn(call_stmt);
++ if (TREE_CODE(fn) == ADDR_EXPR)
++ continue;
++ if (TREE_CODE(fn) != SSA_NAME)
++ gcc_unreachable();
++
++ // ... through a function pointer
++ if (SSA_NAME_VAR(fn) != NULL_TREE) {
++ fn = SSA_NAME_VAR(fn);
++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
++ debug_tree(fn);
++ gcc_unreachable();
++ }
++ }
++ fn = TREE_TYPE(fn);
++ if (TREE_CODE(fn) != POINTER_TYPE)
++ continue;
++ fn = TREE_TYPE(fn);
++ if (TREE_CODE(fn) != FUNCTION_TYPE)
++ continue;
++
++ kernexec_instrument_fptr(&gsi);
++
++//debug_tree(gimple_call_fn(call_stmt));
++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
++ }
++ }
++
++ return 0;
++}
++
++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
++static void kernexec_instrument_retaddr_bts(rtx insn)
++{
++ rtx btsq;
++ rtvec argvec, constraintvec, labelvec;
++ int line;
++
++ // create asm volatile("btsq $63,(%%rsp)":::)
++ argvec = rtvec_alloc(0);
++ constraintvec = rtvec_alloc(0);
++ labelvec = rtvec_alloc(0);
++ line = expand_location(RTL_LOCATION(insn)).line;
++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++ MEM_VOLATILE_P(btsq) = 1;
++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
++ emit_insn_before(btsq, insn);
++}
++
++// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
++static void kernexec_instrument_retaddr_or(rtx insn)
++{
++ rtx orq;
++ rtvec argvec, constraintvec, labelvec;
++ int line;
++
++ // create asm volatile("orq %%r12,(%%rsp)":::)
++ argvec = rtvec_alloc(0);
++ constraintvec = rtvec_alloc(0);
++ labelvec = rtvec_alloc(0);
++ line = expand_location(RTL_LOCATION(insn)).line;
++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++ MEM_VOLATILE_P(orq) = 1;
++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
++ emit_insn_before(orq, insn);
++}
++
++/*
++ * find all asm level function returns and forcibly set the highest bit of the return address
++ */
++static unsigned int execute_kernexec_retaddr(void)
++{
++ rtx insn;
++
++// if (stack_realign_drap)
++// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
++ // 1. find function returns
++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
++ // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
++ rtx body;
++
++ // is it a retn
++ if (!JUMP_P(insn))
++ continue;
++ body = PATTERN(insn);
++ if (GET_CODE(body) == PARALLEL)
++ body = XVECEXP(body, 0, 0);
++ if (!ANY_RETURN_P(body))
++ continue;
++ kernexec_instrument_retaddr(insn);
++ }
++
++// print_simple_rtl(stderr, get_insns());
++// print_rtl(stderr, get_insns());
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ struct register_pass_info kernexec_reload_pass_info = {
++ .pass = &kernexec_reload_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++ struct register_pass_info kernexec_fptr_pass_info = {
++ .pass = &kernexec_fptr_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++ struct register_pass_info kernexec_retaddr_pass_info = {
++ .pass = &kernexec_retaddr_pass.pass,
++ .reference_pass_name = "pro_and_epilogue",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
++
++ if (TARGET_64BIT == 0)
++ return 0;
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "method")) {
++ if (!argv[i].value) {
++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ continue;
++ }
++ if (!strcmp(argv[i].value, "bts")) {
++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
++ } else if (!strcmp(argv[i].value, "or")) {
++ kernexec_instrument_fptr = kernexec_instrument_fptr_or;
++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
++ fix_register("r12", 1, 1);
++ } else
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
++
++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
++
++ return 0;
++}
+diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
+new file mode 100644
+index 0000000..679b9ef
+--- /dev/null
++++ b/tools/gcc/latent_entropy_plugin.c
+@@ -0,0 +1,335 @@
++/*
++ * Copyright 2012-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to help generate a little bit of entropy from program state,
++ * used during boot in the kernel
++ *
++ * TODO:
++ * - add ipa pass to identify not explicitly marked candidate functions
++ * - mix in more program state (function arguments/return values, loop variables, etc)
++ * - more instrumentation control via attribute parameters
++ *
++ * BUGS:
++ * - LTO needs -flto-partition=none for now
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++#include "tree-flow.h"
++#include "langhooks.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++int plugin_is_GPL_compatible;
++
++static tree latent_entropy_decl;
++
++static struct plugin_info latent_entropy_plugin_info = {
++ .version = "201308230230",
++ .help = NULL
++};
++
++static unsigned int execute_latent_entropy(void);
++static bool gate_latent_entropy(void);
++
++static struct gimple_opt_pass latent_entropy_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "latent_entropy",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = gate_latent_entropy,
++ .execute = execute_latent_entropy,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_gimple_leh | PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++ }
++};
++
++static unsigned HOST_WIDE_INT seed;
++static unsigned HOST_WIDE_INT get_random_const(void)
++{
++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
++ return seed;
++}
++
++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ switch (TREE_CODE(*node)) {
++ default:
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions and variables", name);
++ break;
++
++ case VAR_DECL:
++ if (DECL_INITIAL(*node)) {
++ *no_add_attrs = true;
++ error("variable %qD with %qE attribute must not be initialized", *node, name);
++ break;
++ }
++ DECL_INITIAL(*node) = build_int_cstu(long_long_unsigned_type_node, get_random_const());
++ break;
++
++ case FUNCTION_DECL:
++ break;
++ }
++
++ return NULL_TREE;
++}
++
++static struct attribute_spec latent_entropy_attr = {
++ .name = "latent_entropy",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_latent_entropy_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = false
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&latent_entropy_attr);
++}
++
++static bool gate_latent_entropy(void)
++{
++ tree latent_entropy_attr;
++
++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
++ return latent_entropy_attr != NULL_TREE;
++}
++
++static enum tree_code get_op(tree *rhs)
++{
++ static enum tree_code op;
++ unsigned HOST_WIDE_INT random_const;
++
++ random_const = get_random_const();
++
++ switch (op) {
++ case BIT_XOR_EXPR:
++ op = PLUS_EXPR;
++ break;
++
++ case PLUS_EXPR:
++ if (rhs) {
++ op = LROTATE_EXPR;
++ random_const &= HOST_BITS_PER_WIDE_INT - 1;
++ break;
++ }
++
++ case LROTATE_EXPR:
++ default:
++ op = BIT_XOR_EXPR;
++ break;
++ }
++ if (rhs)
++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
++ return op;
++}
++
++static void perturb_local_entropy(basic_block bb, tree local_entropy)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++ tree addxorrol, rhs;
++ enum tree_code op;
++
++ op = get_op(&rhs);
++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
++ assign = gimple_build_assign(local_entropy, addxorrol);
++ gsi = gsi_after_labels(bb);
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++//debug_bb(bb);
++}
++
++static void perturb_latent_entropy(basic_block bb, tree rhs)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++ tree addxorrol, temp;
++
++ // 1. create temporary copy of latent_entropy
++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(temp);
++#endif
++
++ // 2. read...
++ temp = make_ssa_name(temp, NULL);
++ assign = gimple_build_assign(temp, latent_entropy_decl);
++ SSA_NAME_DEF_STMT(temp) = assign;
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(latent_entropy_decl);
++#endif
++ gsi = gsi_after_labels(bb);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++
++ // 3. ...modify...
++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
++ temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
++ assign = gimple_build_assign(temp, addxorrol);
++ SSA_NAME_DEF_STMT(temp) = assign;
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++
++ // 4. ...write latent_entropy
++ assign = gimple_build_assign(latent_entropy_decl, temp);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++}
++
++static unsigned int execute_latent_entropy(void)
++{
++ basic_block bb;
++ gimple assign;
++ gimple_stmt_iterator gsi;
++ tree local_entropy;
++
++ if (!latent_entropy_decl) {
++ struct varpool_node *node;
++
++#if BUILDING_GCC_VERSION <= 4007
++ for (node = varpool_nodes; node; node = node->next) {
++ tree var = node->decl;
++#else
++ FOR_EACH_VARIABLE(node) {
++ tree var = node->symbol.decl;
++#endif
++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
++ continue;
++ latent_entropy_decl = var;
++// debug_tree(var);
++ break;
++ }
++ if (!latent_entropy_decl) {
++// debug_tree(current_function_decl);
++ return 0;
++ }
++ }
++
++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
++ // 1. create local entropy variable
++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(local_entropy);
++ mark_sym_for_renaming(local_entropy);
++#endif
++
++ // 2. initialize local entropy variable
++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++ gsi = gsi_start_bb(bb);
++
++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
++// gimple_set_location(assign, loc);
++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++//debug_bb(bb);
++ bb = bb->next_bb;
++
++ // 3. instrument each BB with an operation on the local entropy variable
++ while (bb != EXIT_BLOCK_PTR) {
++ perturb_local_entropy(bb, local_entropy);
++//debug_bb(bb);
++ bb = bb->next_bb;
++ };
++
++ // 4. mix local entropy into the global entropy variable
++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
++//debug_bb(EXIT_BLOCK_PTR->prev_bb);
++ return 0;
++}
++
++static void start_unit_callback(void *gcc_data, void *user_data)
++{
++ tree latent_entropy_type;
++
++#if BUILDING_GCC_VERSION >= 4007
++ seed = get_random_seed(false);
++#else
++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
++ seed *= seed;
++#endif
++
++ if (in_lto_p)
++ return;
++
++ // extern volatile u64 latent_entropy
++ gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
++ latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE);
++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type);
++
++ TREE_STATIC(latent_entropy_decl) = 1;
++ TREE_PUBLIC(latent_entropy_decl) = 1;
++ TREE_USED(latent_entropy_decl) = 1;
++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
++ DECL_EXTERNAL(latent_entropy_decl) = 1;
++ DECL_ARTIFICIAL(latent_entropy_decl) = 1;
++ lang_hooks.decls.pushdecl(latent_entropy_decl);
++// DECL_ASSEMBLER_NAME(latent_entropy_decl);
++// varpool_finalize_decl(latent_entropy_decl);
++// varpool_mark_needed_node(latent_entropy_decl);
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ struct register_pass_info latent_entropy_pass_info = {
++ .pass = &latent_entropy_pass.pass,
++ .reference_pass_name = "optimized",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
+diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
+new file mode 100644
+index 0000000..2d131cc
+--- /dev/null
++++ b/tools/gcc/size_overflow_hash.data
+@@ -0,0 +1,5998 @@
++intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
++ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
++storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
++compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
++carl9170_alloc_27 carl9170_alloc 1 27 NULL
++sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
++padzero_55 padzero 1 55 &sel_read_policyvers_55
++cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
++vis_data_count_prim_sec_64 vis_data_count_prim_sec 0 64 NULL
++__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
++crypto_authenc_setkey_80 crypto_authenc_setkey 3 80 NULL
++snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
++load_msg_95 load_msg 2 95 NULL
++device_flush_iotlb_115 device_flush_iotlb 2-3 115 NULL
++ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL
++init_q_132 init_q 4 132 NULL
++ocfs2_local_alloc_slide_window_134 ocfs2_local_alloc_slide_window 0 134 NULL
++memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
++ext4_ext_get_actual_len_153 ext4_ext_get_actual_len 0 153 NULL nohasharray
++tracing_trace_options_write_153 tracing_trace_options_write 3 153 &ext4_ext_get_actual_len_153
++pci_request_selected_regions_169 pci_request_selected_regions 0 169 NULL
++iscsi_session_setup_196 iscsi_session_setup 4-5-3 196 NULL
++device_add_bin_attributes_205 device_add_bin_attributes 0 205 NULL
++store_cpufv_215 store_cpufv 4 215 NULL
++tcp_skb_seglen_221 tcp_skb_seglen 0 221 NULL
++proc_scsi_write_proc_267 proc_scsi_write_proc 3 267 NULL
++generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
++read_file_war_stats_292 read_file_war_stats 3 292 NULL
++platform_device_add_data_310 platform_device_add_data 3 310 NULL
++iwl_dbgfs_tx_statistics_read_314 iwl_dbgfs_tx_statistics_read 3 314 NULL nohasharray
++dn_setsockopt_314 dn_setsockopt 5 314 &iwl_dbgfs_tx_statistics_read_314
++next_node_allowed_318 next_node_allowed 1-0 318 NULL
++ath9k_wmi_cmd_327 ath9k_wmi_cmd 4 327 NULL
++map_urb_for_dma_332 map_urb_for_dma 0 332 NULL
++cmtp_send_interopmsg_376 cmtp_send_interopmsg 7 376 NULL
++sysfs_create_dir_398 sysfs_create_dir 0 398 NULL
++btmrvl_txdnldready_read_413 btmrvl_txdnldready_read 3 413 NULL
++lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
++insert_vm_struct_428 insert_vm_struct 0 428 NULL
++snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
++_alloc_get_attr_desc_470 _alloc_get_attr_desc 2 470 NULL
++pidlist_resize_496 pidlist_resize 2 496 NULL
++iwl_dbgfs_protection_mode_write_502 iwl_dbgfs_protection_mode_write 3 502 NULL
++smp_send_cmd_512 smp_send_cmd 3 512 NULL
++_snd_pcm_hw_param_first_516 _snd_pcm_hw_param_first 0 516 NULL
++ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
++ipv6_skip_exthdr_536 ipv6_skip_exthdr 0-2 536 NULL
++iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
++dle_count_543 dle_count 0 543 NULL
++devres_alloc_551 devres_alloc 2 551 NULL
++lpfc_nlp_state_name_556 lpfc_nlp_state_name 2 556 NULL
++snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
++dev_hard_header_565 dev_hard_header 0 565 NULL nohasharray
++start_isoc_chain_565 start_isoc_chain 2 565 &dev_hard_header_565
++compat_sys_preadv_583 compat_sys_preadv 3 583 NULL
++ocfs2_refcounted_xattr_delete_need_584 ocfs2_refcounted_xattr_delete_need 0 584 NULL
++ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
++sysfs_acpi_set_625 sysfs_acpi_set 3 625 NULL
++viafb_dfpl_proc_write_627 viafb_dfpl_proc_write 3 627 NULL
++ocfs2_num_free_extents_632 ocfs2_num_free_extents 0 632 NULL
++drbd_bm_find_next_643 drbd_bm_find_next 2 643 NULL
++unlink_queued_645 unlink_queued 4-3 645 NULL
++iwl_legacy_dbgfs_force_reset_read_649 iwl_legacy_dbgfs_force_reset_read 3 649 NULL
++dtim_interval_read_654 dtim_interval_read 3 654 NULL
++ceph_copy_user_to_page_vector_656 ceph_copy_user_to_page_vector 3-4 656 NULL
++div_u64_rem_672 div_u64_rem 0 672 NULL
++xfrm_aevent_msgsize_674 xfrm_aevent_msgsize 0 674 NULL
++rtl8169_try_rx_copy_705 rtl8169_try_rx_copy 3 705 NULL
++sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
++ddp_set_map_751 ddp_set_map 4 751 NULL
++dvb_video_write_754 dvb_video_write 3 754 NULL
++iwl_read_targ_mem_772 iwl_read_targ_mem 0 772 NULL
++jbd2_journal_dirty_metadata_784 jbd2_journal_dirty_metadata 0 784 NULL
++snd_pcm_drain_811 snd_pcm_drain 0 811 NULL
++if_writecmd_815 if_writecmd 2 815 NULL
++aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
++read_fifo_826 read_fifo 3 826 NULL
++o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
++iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
++intel_alloc_iova_883 intel_alloc_iova 3 883 NULL
++snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
++btmrvl_hsstate_read_920 btmrvl_hsstate_read 3 920 NULL
++v4l2_ctrl_handler_init_928 v4l2_ctrl_handler_init 2 928 NULL
++readw_931 readw 0 931 NULL
++carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
++__nodes_weight_956 __nodes_weight 2-0 956 NULL
++sys_msgrcv_959 sys_msgrcv 3 959 NULL
++memcmp_990 memcmp 0 990 NULL
++hdlcdev_rx_997 hdlcdev_rx 3 997 NULL
++free_ind_block_999 free_ind_block 0 999 NULL
++readreg_1017 readreg 0-1 1017 NULL
++pohmelfs_name_alloc_1036 pohmelfs_name_alloc 1 1036 NULL
++gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
++Read_hfc16_1070 Read_hfc16 0 1070 NULL
++mce_request_packet_1073 mce_request_packet 3 1073 NULL nohasharray
++mlx4_create_eq_1073 mlx4_create_eq 2 1073 &mce_request_packet_1073
++agp_create_memory_1075 agp_create_memory 1 1075 NULL
++_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
++inode_ref_info_1094 inode_ref_info 0 1094 NULL nohasharray
++llc_mac_hdr_init_1094 llc_mac_hdr_init 0 1094 &inode_ref_info_1094
++__arch_hweight8_1105 __arch_hweight8 0 1105 NULL
++__btrfs_cow_block_1125 __btrfs_cow_block 0-7 1125 NULL nohasharray
++__ext4_journal_stop_1125 __ext4_journal_stop 0 1125 &__btrfs_cow_block_1125
++i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
++pfkey_xfrm_policy2msg_size_1176 pfkey_xfrm_policy2msg_size 0 1176 NULL
++ipc_alloc_1192 ipc_alloc 1 1192 NULL
++ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
++i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
++i2cdev_read_1206 i2cdev_read 3 1206 NULL
++generic_file_splice_read_1220 generic_file_splice_read 4 1220 NULL
++ipw_packet_received_skb_1230 ipw_packet_received_skb 2 1230 NULL
++acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
++ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
++ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
++ocfs2_append_rec_to_path_1321 ocfs2_append_rec_to_path 0 1321 NULL
++ffs_1322 ffs 0 1322 NULL
++push_node_left_1327 push_node_left 0 1327 NULL
++carl9170_rx_stream_1334 carl9170_rx_stream 3 1334 NULL
++btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
++snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
++ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
++ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
++sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
++do_msgsnd_1387 do_msgsnd 4 1387 NULL
++zone_page_state_1393 zone_page_state 0 1393 NULL
++file_read_actor_1401 file_read_actor 4 1401 NULL
++hci_si_event_1404 hci_si_event 3 1404 NULL
++init_rs_internal_1436 init_rs_internal 1 1436 NULL
++stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
++tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
++xprt_alloc_1475 xprt_alloc 2 1475 NULL
++sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
++posix_acl_permission_1495 posix_acl_permission 0 1495 NULL
++fpregs_set_1497 fpregs_set 4 1497 NULL
++ocfs2_alloc_dinode_update_counts_1507 ocfs2_alloc_dinode_update_counts 0 1507 NULL
++tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
++vp_request_intx_1578 vp_request_intx 0 1578 NULL
++ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
++ipath_ht_handle_hwerrors_1592 ipath_ht_handle_hwerrors 3 1592 NULL
++fc_frame_alloc_1596 fc_frame_alloc 2 1596 NULL
++packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
++i915_gem_execbuffer_wait_for_flips_1612 i915_gem_execbuffer_wait_for_flips 0 1612 NULL
++btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
++v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
++bluetooth_proc_write_1630 bluetooth_proc_write 3 1630 NULL
++btmrvl_hsmode_read_1647 btmrvl_hsmode_read 3 1647 NULL
++ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
++configfs_read_file_1683 configfs_read_file 3 1683 NULL
++pdu_write_u_1710 pdu_write_u 3 1710 NULL
++coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
++wl1271_rx_handle_data_1714 wl1271_rx_handle_data 3 1714 NULL nohasharray
++btrfs_dir_data_len_1714 btrfs_dir_data_len 0 1714 &wl1271_rx_handle_data_1714
++dma_memcpy_pg_to_iovec_1725 dma_memcpy_pg_to_iovec 6 1725 NULL
++internal_create_group_1733 internal_create_group 0 1733 NULL
++ieee80211_new_mesh_header_1761 ieee80211_new_mesh_header 0 1761 NULL
++cosa_write_1774 cosa_write 3 1774 NULL
++__nodelist_scnprintf_1815 __nodelist_scnprintf 2-4-0 1815 NULL
++hidp_queue_report_1881 hidp_queue_report 3 1881 NULL
++sb_issue_zeroout_1884 sb_issue_zeroout 0 1884 NULL
++iwl_legacy_dbgfs_rxon_flags_read_1894 iwl_legacy_dbgfs_rxon_flags_read 3 1894 NULL
++ext3_fiemap_1936 ext3_fiemap 4 1936 NULL
++ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL
++__assign_irq_vector_1961 __assign_irq_vector 0 1961 NULL
++ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
++store_iwmct_log_level_fw_1974 store_iwmct_log_level_fw 4 1974 NULL
++sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
++xfs_trans_count_vecs_1991 xfs_trans_count_vecs 0 1991 NULL nohasharray
++gpio_power_write_1991 gpio_power_write 3 1991 &xfs_trans_count_vecs_1991
++__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
++atomic_read_unchecked_1995 atomic_read_unchecked 0 1995 NULL
++ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
++write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
++BcmCopySection_2035 BcmCopySection 0-5 2035 NULL
++ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
++ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
++subbuf_read_actor_2071 subbuf_read_actor 3 2071 NULL
++__generic_copy_from_user_intel_2073 __generic_copy_from_user_intel 0-3 2073 NULL
++diva_set_driver_dbg_mask_2077 diva_set_driver_dbg_mask 0 2077 NULL nohasharray
++alloc_retstack_tasklist_2077 alloc_retstack_tasklist 0 2077 &diva_set_driver_dbg_mask_2077
++iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
++get_unaligned_le32_2092 get_unaligned_le32 0 2092 NULL
++idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
++audit_expand_2098 audit_expand 2-0 2098 NULL
++num_pages_spanned_2105 num_pages_spanned 0 2105 NULL
++iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
++ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
++enable_read_2117 enable_read 3 2117 NULL
++pcf50633_write_block_2124 pcf50633_write_block 3 2124 NULL
++snd_interval_refine_last_2127 snd_interval_refine_last 0 2127 NULL
++check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
++mlx4_init_icm_table_2151 mlx4_init_icm_table 4-5 2151 NULL
++iov_iter_count_2152 iov_iter_count 0 2152 NULL
++__copy_to_user_ll_2157 __copy_to_user_ll 0-3 2157 NULL
++ocfs2_et_sanity_check_2164 ocfs2_et_sanity_check 0 2164 NULL
++_ore_get_io_state_2166 _ore_get_io_state 5-3-4 2166 NULL
++picolcd_debug_reset_write_2195 picolcd_debug_reset_write 3 2195 NULL
++page_cache_async_readahead_2219 page_cache_async_readahead 6-5 2219 NULL nohasharray
++u32_array_read_2219 u32_array_read 3 2219 &page_cache_async_readahead_2219
++vhci_write_2224 vhci_write 3 2224 NULL
++__ocfs2_journal_access_2241 __ocfs2_journal_access 0 2241 NULL
++mlx4_buddy_init_2244 mlx4_buddy_init 2 2244 NULL
++ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
++netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
++sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
++do_update_counters_2259 do_update_counters 4 2259 NULL
++ocfs2_shift_tree_depth_2292 ocfs2_shift_tree_depth 0 2292 NULL
++kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
++picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL
++gart_map_page_2325 gart_map_page 4-3 2325 NULL
++__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
++create_subvol_2347 create_subvol 4 2347 NULL
++zr364xx_read_2354 zr364xx_read 3 2354 NULL
++viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
++rose_recvmsg_2368 rose_recvmsg 4 2368 NULL
++rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
++isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
++hfcpci_empty_fifo_2427 hfcpci_empty_fifo 4 2427 NULL
++tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
++arch_msi_check_device_2449 arch_msi_check_device 0 2449 NULL
++__sock_recvmsg_2467 __sock_recvmsg 0 2467 NULL
++b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
++xfrm_spdinfo_msgsize_2474 xfrm_spdinfo_msgsize 0 2474 NULL
++fc_fcp_send_data_2479 fc_fcp_send_data 4-3 2479 NULL
++update_pmkid_2481 update_pmkid 4 2481 NULL
++wiphy_new_2482 wiphy_new 2 2482 NULL
++squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
++dm_write_2513 dm_write 3 2513 NULL
++v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
++ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
++ata_host_start_2545 ata_host_start 0 2545 NULL
++gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
++pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
++store_pwm1_enable_2577 store_pwm1_enable 4 2577 NULL
++smk_write_logging_2618 smk_write_logging 3 2618 NULL
++nlmsg_msg_size_2623 nlmsg_msg_size 0-1 2623 NULL
++iwl4965_ucode_general_stats_read_2639 iwl4965_ucode_general_stats_read 3 2639 NULL
++lro_gen_skb_2644 lro_gen_skb 6 2644 NULL
++ffs_ep0_read_2672 ffs_ep0_read 3 2672 NULL
++ocfs2_rotate_subtree_right_2674 ocfs2_rotate_subtree_right 0 2674 NULL
++oti6858_write_2692 oti6858_write 4 2692 NULL
++memcpy_fromiovecend_2707 memcpy_fromiovecend 4-3 2707 NULL
++hid_report_raw_event_2762 hid_report_raw_event 4 2762 NULL
++mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL nohasharray
++bictcp_update_2771 bictcp_update 2 2771 &mon_bin_ioctl_2771
++__next_cpu_2782 __next_cpu 1 2782 NULL
++usbatm_pdu_length_2786 usbatm_pdu_length 0-1 2786 NULL
++device_add_attrs_2789 device_add_attrs 0 2789 NULL
++iwl_dbgfs_clear_ucode_statistics_write_2804 iwl_dbgfs_clear_ucode_statistics_write 3 2804 NULL
++sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
++snd_pcm_reset_2829 snd_pcm_reset 0 2829 NULL
++wait_for_avail_2847 wait_for_avail 0 2847 NULL
++ufs_free_fragments_2857 ufs_free_fragments 2 2857 NULL
++move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
++nla_padlen_2883 nla_padlen 1 2883 NULL
++cmm_write_2896 cmm_write 3 2896 NULL
++count_esp_combs_2926 count_esp_combs 0 2926 NULL
++nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
++tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
++ppp_cp_event_2965 ppp_cp_event 6 2965 NULL
++ocfs2_find_branch_target_2989 ocfs2_find_branch_target 0 2989 NULL
++p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
++store_cardr_2997 store_cardr 4 2997 NULL
++spin_time_accum_spinning_3020 spin_time_accum_spinning 1 3020 NULL
++depth_write_3021 depth_write 3 3021 NULL
++snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
++xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
++lpfc_idiag_mbxacc_write_3038 lpfc_idiag_mbxacc_write 3 3038 NULL nohasharray
++iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 &lpfc_idiag_mbxacc_write_3038
++nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
++calculate_min_size_3053 calculate_min_size 0 3053 NULL
++__blk_end_bidi_request_3070 __blk_end_bidi_request 4-3 3070 NULL
++dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
++ocfs2_get_right_path_3097 ocfs2_get_right_path 0 3097 NULL
++rb_alloc_3102 rb_alloc 1 3102 NULL
++simple_write_to_buffer_3122 simple_write_to_buffer 2-5 3122 NULL
++fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
++b1_get_slice_3145 b1_get_slice 0 3145 NULL
++CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
++compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
++encrypted_instantiate_3168 encrypted_instantiate 3 3168 NULL
++uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
++find_free_extent_3178 find_free_extent 5-7 3178 NULL
++compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
++alloc_context_3194 alloc_context 1 3194 NULL
++codec_reg_write_file_3204 codec_reg_write_file 3 3204 NULL
++ath6kl_mgmt_tx_3230 ath6kl_mgmt_tx 9 3230 NULL
++btrfs_next_leaf_3232 btrfs_next_leaf 0 3232 NULL
++kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
++write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
++ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
++key_key_read_3241 key_key_read 3 3241 NULL
++shrink_delalloc_3250 shrink_delalloc 0 3250 NULL
++__ilog2_u64_3284 __ilog2_u64 0-1 3284 NULL
++iwl_legacy_dbgfs_traffic_log_write_3296 iwl_legacy_dbgfs_traffic_log_write 3 3296 NULL
++arvo_sysfs_write_3311 arvo_sysfs_write 6 3311 NULL
++__iovec_copy_from_user_inatomic_3314 __iovec_copy_from_user_inatomic 0-4-3 3314 NULL
++i915_gem_gtt_bind_object_3319 i915_gem_gtt_bind_object 0 3319 NULL
++compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
++de600_read_byte_3332 de600_read_byte 0 3332 NULL
++sctp_make_init_ack_3335 sctp_make_init_ack 4 3335 NULL
++read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
++sysfs_create_group_3339 sysfs_create_group 0 3339 NULL
++noack_write_3343 noack_write 3 3343 NULL
++gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
++scnprintf_3360 scnprintf 0-2 3360 NULL
++ReadByteAmd7930_3365 ReadByteAmd7930 0 3365 NULL
++send_stream_3397 send_stream 4 3397 NULL
++isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
++pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
++crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
++pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
++percpu_modalloc_3448 percpu_modalloc 2-3 3448 NULL
++s3fb_ddc_read_3451 s3fb_ddc_read 0 3451 NULL
++softsynth_write_3455 softsynth_write 3 3455 NULL
++jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 NULL nohasharray
++snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 5-4-2 3464 &jffs2_acl_setxattr_3464
++alloc_skb_fclone_3467 alloc_skb_fclone 1 3467 NULL
++security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
++xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1-0 3496 NULL
++ieee80211_wx_set_gen_ie_rsl_3521 ieee80211_wx_set_gen_ie_rsl 3 3521 NULL
++btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
++b43legacy_read16_3561 b43legacy_read16 0 3561 NULL
++get_interface_3562 get_interface 0 3562 NULL
++alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
++evtchn_read_3569 evtchn_read 3 3569 NULL
++vc_resize_3585 vc_resize 3-2 3585 NULL
++compat_sys_semtimedop_3606 compat_sys_semtimedop 3 3606 NULL
++sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
++aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
++cm_copy_private_data_3649 cm_copy_private_data 2 3649 NULL
++i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
++ntfs_attr_make_non_resident_3694 ntfs_attr_make_non_resident 0 3694 NULL
++btmrvl_psmode_write_3703 btmrvl_psmode_write 3 3703 NULL nohasharray
++snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 &btmrvl_psmode_write_3703 nohasharray
++create_irq_3703 create_irq 0 3703 &snd_m3_assp_read_3703
++ci_ll_write_3740 ci_ll_write 4 3740 NULL
++ping_sendmsg_3782 ping_sendmsg 4 3782 NULL
++sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
++btrfs_alloc_chunk_3808 btrfs_alloc_chunk 0 3808 NULL
++ncp_file_write_3813 ncp_file_write 3 3813 NULL
++llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
++read_file_tx_chainmask_3829 read_file_tx_chainmask 3 3829 NULL
++stringify_nodemap_3842 stringify_nodemap 2 3842 NULL
++__buf_prepare_3846 __buf_prepare 0 3846 NULL
++ubi_eba_read_leb_3847 ubi_eba_read_leb 0 3847 NULL
++smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
++get_fd_set_3866 get_fd_set 1 3866 NULL
++apei_res_sub_3873 apei_res_sub 0 3873 NULL
++garp_attr_create_3883 garp_attr_create 3 3883 NULL
++uea_send_modem_cmd_3888 uea_send_modem_cmd 3 3888 NULL
++nvram_write_3894 nvram_write 3 3894 NULL
++comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
++vcs_write_3910 vcs_write 3 3910 NULL
++pm860x_read_device_3958 pm860x_read_device 3 3958 NULL
++i915_gem_object_get_fence_3981 i915_gem_object_get_fence 0 3981 NULL
++do_add_counters_3992 do_add_counters 3 3992 NULL
++userspace_status_4004 userspace_status 4 4004 NULL
++mei_write_4005 mei_write 3 4005 NULL nohasharray
++xfs_check_block_4005 xfs_check_block 4 4005 &mei_write_4005
++snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
++i915_gem_object_unbind_4016 i915_gem_object_unbind 0 4016 NULL
++blk_end_request_4024 blk_end_request 3 4024 NULL
++ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
++b1_get_word_4035 b1_get_word 0 4035 NULL
++i915_gpu_idle_4062 i915_gpu_idle 0 4062 NULL
++get_dmabuf_4065 get_dmabuf 2 4065 NULL
++sctp_make_asconf_4078 sctp_make_asconf 3 4078 NULL
++fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
++ab8500_address_write_4099 ab8500_address_write 3 4099 NULL
++tm6000_read_4151 tm6000_read 3 4151 NULL
++mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
++msg_bits_4158 msg_bits 0-3-4 4158 NULL
++get_alua_req_4166 get_alua_req 3 4166 NULL
++blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
++read_file_bool_4180 read_file_bool 3 4180 NULL
++ocfs2_find_cpos_for_right_leaf_4194 ocfs2_find_cpos_for_right_leaf 0 4194 NULL
++f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
++_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
++__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
++ext4_new_inode_4247 ext4_new_inode 5 4247 NULL
++dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 2-3-5 4303 NULL
++ath6kl_wmi_tcmd_test_report_rx_4314 ath6kl_wmi_tcmd_test_report_rx 3 4314 NULL
++count_strings_4315 count_strings 0 4315 NULL
++snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
++__copy_from_user_inatomic_4365 __copy_from_user_inatomic 0-3 4365 NULL nohasharray
++lookup_string_4365 lookup_string 0 4365 &__copy_from_user_inatomic_4365
++sys_setdomainname_4373 sys_setdomainname 2 4373 NULL
++irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
++access_process_vm_4412 access_process_vm 0 4412 NULL nohasharray
++cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412
++libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
++do_pages_stat_4437 do_pages_stat 2 4437 NULL
++memparse_4444 memparse 0 4444 NULL
++dn_alloc_send_pskb_4465 dn_alloc_send_pskb 2 4465 NULL
++at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
++recv_control_msg_4476 recv_control_msg 5 4476 NULL
++snd_seq_expand_var_event_4481 snd_seq_expand_var_event 5-0 4481 NULL
++sys_semtimedop_4486 sys_semtimedop 3 4486 NULL
++ocfs2_grow_tree_4492 ocfs2_grow_tree 0 4492 NULL nohasharray
++udp_sendmsg_4492 udp_sendmsg 4 4492 &ocfs2_grow_tree_4492
++vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
++l1oip_socket_parse_4507 l1oip_socket_parse 4 4507 NULL
++sys_llistxattr_4532 sys_llistxattr 3 4532 NULL
++Read_4560 Read 0 4560 NULL
++btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
++bch_alloc_4593 bch_alloc 1 4593 NULL
++ocfs2_refcount_lock_4595 ocfs2_refcount_lock 0 4595 NULL
++rbd_create_rw_ops_4605 rbd_create_rw_ops 2 4605 NULL
++iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
++virtqueue_add_buf_gfp_4662 virtqueue_add_buf_gfp 4-3 4662 NULL
++map_addr_4666 map_addr 6 4666 NULL
++skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
++cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
++short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
++kone_receive_4690 kone_receive 4 4690 NULL
++round_pipe_size_4701 round_pipe_size 1-0 4701 NULL
++cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
++trusted_instantiate_4710 trusted_instantiate 3 4710 NULL
++btmrvl_gpiogap_read_4718 btmrvl_gpiogap_read 3 4718 NULL
++ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
++show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
++find_next_best_node_4774 find_next_best_node 1-0 4774 NULL
++ip6_ufo_append_data_4780 ip6_ufo_append_data 5-6-7 4780 NULL
++ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
++__iio_allocate_sw_ring_buffer_4843 __iio_allocate_sw_ring_buffer 3-2 4843 NULL
++gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
++key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
++ocfs2_defrag_extent_4873 ocfs2_defrag_extent 2-3 4873 NULL
++hid_register_field_4874 hid_register_field 2-3 4874 NULL
++vga_arb_read_4886 vga_arb_read 3 4886 NULL
++sys_ipc_4889 sys_ipc 3 4889 NULL
++del_ptr_4894 del_ptr 0 4894 NULL
++sys_process_vm_writev_4928 sys_process_vm_writev 5-3 4928 NULL
++ocfs2_readahead_for_cow_4932 ocfs2_readahead_for_cow 4-3 4932 NULL
++ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
++devm_kzalloc_4966 devm_kzalloc 2 4966 NULL
++compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
++skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
++do_mincore_5018 do_mincore 0-2-1 5018 NULL
++mtd_device_parse_register_5024 mtd_device_parse_register 5 5024 NULL
++ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 3-2 5066 NULL
++__kmalloc_track_caller_5071 __kmalloc_track_caller 1 5071 NULL
++snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
++snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
++get_random_bytes_5091 get_random_bytes 2 5091 NULL nohasharray
++kfifo_copy_from_user_5091 kfifo_copy_from_user 4-3-0 5091 &get_random_bytes_5091 nohasharray
++blk_rq_sectors_5091 blk_rq_sectors 0 5091 &kfifo_copy_from_user_5091
++sound_write_5102 sound_write 3 5102 NULL
++qib_7220_handle_hwerrors_5142 qib_7220_handle_hwerrors 3 5142 NULL
++ufs_add_fragments_5144 ufs_add_fragments 2 5144 NULL
++ocfs2_inode_lock_full_nested_5148 ocfs2_inode_lock_full_nested 0 5148 NULL
++__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
++iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
++acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
++ds1wm_read_5200 ds1wm_read 0 5200 NULL
++pipe_set_size_5204 pipe_set_size 2 5204 NULL
++ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
++isdn_ppp_skb_push_5236 isdn_ppp_skb_push 2 5236 NULL
++iommu_domain_identity_map_5284 iommu_domain_identity_map 2-3 5284 NULL
++usb_descriptor_fillbuf_5302 usb_descriptor_fillbuf 0 5302 NULL
++r592_write_fifo_pio_5315 r592_write_fifo_pio 3 5315 NULL
++pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
++ad714x_i2c_read_5345 ad714x_i2c_read 4 5345 NULL
++ata_tlink_add_5349 ata_tlink_add 0 5349 NULL
++ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
++cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 2-3 5368 NULL
++bitmap_fold_5396 bitmap_fold 4 5396 NULL
++nilfs_palloc_entries_per_group_5418 nilfs_palloc_entries_per_group 0 5418 NULL
++xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
++xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
++cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
++tty_write_5494 tty_write 3 5494 NULL
++tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
++ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
++__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
++spidev_message_5518 spidev_message 3 5518 NULL
++sctp_make_op_error_space_5528 sctp_make_op_error_space 3 5528 NULL
++ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
++iommu_prepare_identity_map_5540 iommu_prepare_identity_map 2-3 5540 NULL
++brcmu_pkt_buf_get_skb_5556 brcmu_pkt_buf_get_skb 1 5556 NULL
++le_readq_5557 le_readq 0 5557 NULL
++inw_5558 inw 0 5558 NULL
++__first_dma_cap_5560 __first_dma_cap 0 5560 NULL
++fir16_create_5574 fir16_create 3 5574 NULL
++bioset_create_5580 bioset_create 1 5580 NULL
++domain_sg_mapping_5586 domain_sg_mapping 4 5586 NULL
++do_msgrcv_5590 do_msgrcv 4 5590 NULL
++ldm_frag_add_5611 ldm_frag_add 2 5611 NULL
++hidp_output_raw_report_5629 hidp_output_raw_report 3 5629 NULL
++parse_arg_5657 parse_arg 2 5657 NULL
++ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
++posix_clock_register_5662 posix_clock_register 2 5662 NULL
++get_arg_5694 get_arg 3 5694 NULL
++ntfs_attr_record_resize_5720 ntfs_attr_record_resize 0 5720 NULL
++vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
++rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
++get_packet_5747 get_packet 3 5747 NULL
++sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
++mlx4_alloc_resize_buf_5778 mlx4_alloc_resize_buf 3 5778 NULL
++compat_sys_writev_5784 compat_sys_writev 3 5784 NULL
++__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
++lpfc_sli_issue_mbox_5792 lpfc_sli_issue_mbox 0 5792 NULL
++skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
++ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
++ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
++ixgb_check_copybreak_5847 ixgb_check_copybreak 3 5847 NULL
++setup_req_5848 setup_req 3 5848 NULL
++rx_q_entry_to_length_5855 rx_q_entry_to_length 0-1 5855 NULL
++compat_sys_move_pages_5861 compat_sys_move_pages 2 5861 NULL
++config_buf_5862 config_buf 0 5862 NULL
++ext4_ext_correct_indexes_5865 ext4_ext_correct_indexes 0 5865 NULL
++port_show_regs_5904 port_show_regs 3 5904 NULL
++uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
++lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
++edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
++tipc_subseq_alloc_5957 tipc_subseq_alloc 1 5957 NULL
++__apu_get_register_5967 __apu_get_register 0 5967 NULL
++ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
++jbd2_journal_stop_5979 jbd2_journal_stop 0 5979 NULL
++device_add_attributes_6058 device_add_attributes 0 6058 NULL
++send_video_command_6073 send_video_command 4 6073 NULL nohasharray
++sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 &send_video_command_6073
++logarithmic_accumulation_6094 logarithmic_accumulation 0-2-1 6094 NULL
++ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
++dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
++netfs_trans_alloc_6136 netfs_trans_alloc 2-4 6136 NULL
++ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
++wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
++i915_gem_execbuffer_move_to_gpu_6197 i915_gem_execbuffer_move_to_gpu 0 6197 NULL
++nfc_alloc_skb_6216 nfc_alloc_skb 1 6216 NULL
++v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
++mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
++ata_host_register_6229 ata_host_register 0 6229 NULL
++f_hidg_read_6238 f_hidg_read 3 6238 NULL
++fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
++ext4_ext_split_6249 ext4_ext_split 0 6249 NULL
++pcpu_next_pop_6277 pcpu_next_pop 4 6277 NULL
++snd_hda_override_conn_list_6282 snd_hda_override_conn_list 0 6282 NULL nohasharray
++xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
++iwl4965_rs_sta_dbgfs_stats_table_read_6289 iwl4965_rs_sta_dbgfs_stats_table_read 3 6289 NULL
++set_local_name_6310 set_local_name 4 6310 NULL
++hfa384x_inw_6329 hfa384x_inw 0 6329 NULL
++_proc_do_string_6376 _proc_do_string 2 6376 NULL
++global_reclaimable_pages_6378 global_reclaimable_pages 0 6378 NULL nohasharray
++osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 &global_reclaimable_pages_6378
++BcmFlash2xBulkRead_6395 BcmFlash2xBulkRead 0 6395 NULL
++bt_skb_alloc_6404 bt_skb_alloc 1 6404 NULL
++l2up_create_6430 l2up_create 3 6430 NULL
++ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
++__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
++ceph_sync_write_6466 ceph_sync_write 3 6466 NULL
++ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL
++cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
++dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
++ttm_get_pages_6504 ttm_get_pages 4 6504 NULL
++mei_read_6507 mei_read 3 6507 NULL
++read_file_disable_ani_6536 read_file_disable_ani 3 6536 NULL
++rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
++wdm_read_6549 wdm_read 3 6549 NULL
++fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
++bt_skb_send_alloc_6581 bt_skb_send_alloc 2 6581 NULL
++snd_pcm_hw_refine_old_user_6586 snd_pcm_hw_refine_old_user 0 6586 NULL
++snmp_mib_init_6604 snmp_mib_init 2-3 6604 NULL
++ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
++dn_alloc_skb_6631 dn_alloc_skb 2 6631 NULL
++process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
++iwl_dbgfs_clear_traffic_statistics_write_6681 iwl_dbgfs_clear_traffic_statistics_write 3 6681 NULL
++ql_process_mac_rx_skb_6689 ql_process_mac_rx_skb 4 6689 NULL nohasharray
++pvscsi_allocate_rings_6689 pvscsi_allocate_rings 0 6689 &ql_process_mac_rx_skb_6689
++ieee80211_build_preq_ies_6691 ieee80211_build_preq_ies 0 6691 NULL
++btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL
++ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
++mpeg_read_6708 mpeg_read 3 6708 NULL
++ibmpex_query_sensor_count_6709 ibmpex_query_sensor_count 0 6709 NULL
++video_proc_write_6724 video_proc_write 3 6724 NULL
++posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
++rds_rdma_pages_6735 rds_rdma_pages 0 6735 NULL
++ocfs2_insert_extent_6737 ocfs2_insert_extent 0 6737 NULL
++device_queue_depth_6771 device_queue_depth 0 6771 NULL
++kobject_add_varg_6781 kobject_add_varg 0 6781 NULL
++iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
++ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
++hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
++ocfs2_calc_refcount_meta_credits_6802 ocfs2_calc_refcount_meta_credits 0 6802 NULL
++lbs_rdrf_write_6826 lbs_rdrf_write 3 6826 NULL
++make_8259A_irq_6828 make_8259A_irq 1 6828 NULL
++calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
++mon_bin_read_6841 mon_bin_read 3 6841 NULL
++snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
++ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 NULL nohasharray
++raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 &ieee80211_if_fmt_path_refresh_time_6888
++dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
++spi_show_regs_6911 spi_show_regs 3 6911 NULL nohasharray
++proc_sessionid_read_6911 proc_sessionid_read 3 6911 &spi_show_regs_6911
++__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
++ieee80211_rx_mgmt_probe_resp_6918 ieee80211_rx_mgmt_probe_resp 3 6918 NULL
++ieee80211_send_probe_req_6924 ieee80211_send_probe_req 6-4 6924 NULL
++cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
++ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
++qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
++ab3100_get_register_page_interruptible_6951 ab3100_get_register_page_interruptible 4 6951 NULL
++dn_ifaddr_nlmsg_size_6955 dn_ifaddr_nlmsg_size 0 6955 NULL nohasharray
++tg3_nvram_write_block_unbuffered_6955 tg3_nvram_write_block_unbuffered 3 6955 &dn_ifaddr_nlmsg_size_6955
++pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
++crypto_authenc_esn_setkey_6985 crypto_authenc_esn_setkey 3 6985 NULL
++request_key_async_6990 request_key_async 4 6990 NULL
++r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
++cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
++tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
++ld_usb_write_7022 ld_usb_write 3 7022 NULL
++wimax_msg_7030 wimax_msg 4 7030 NULL
++ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
++snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
++sctp_make_op_error_7057 sctp_make_op_error 6-5 7057 NULL
++hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
++event_enable_read_7074 event_enable_read 3 7074 NULL
++beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
++check_header_7108 check_header 0 7108 NULL
++do_async_mmap_readahead_7123 do_async_mmap_readahead 5 7123 NULL
++qib_format_hwerrors_7133 qib_format_hwerrors 5 7133 NULL
++send_mpa_reject_7135 send_mpa_reject 3 7135 NULL
++ipv6_recv_rxpmtu_7142 ipv6_recv_rxpmtu 3 7142 NULL
++ocfs2_get_left_path_7159 ocfs2_get_left_path 0 7159 NULL
++__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL nohasharray
++utf16_strsize_7203 utf16_strsize 0 7203 &__alloc_objio_seg_7203
++sys32_ipc_7238 sys32_ipc 3 7238 NULL
++hdlc_loop_7255 hdlc_loop 0 7255 NULL
++snd_mask_refine_7267 snd_mask_refine 0 7267 NULL
++f_midi_start_ep_7270 f_midi_start_ep 0 7270 NULL
++dma_ops_alloc_addresses_7272 dma_ops_alloc_addresses 3-4-5-0 7272 NULL
++get_string_7302 get_string 0 7302 NULL
++ieee80211_compatible_rates_7318 ieee80211_compatible_rates 0 7318 NULL
++wait_on_sync_kiocb_7327 wait_on_sync_kiocb 0 7327 NULL
++mgmt_control_7349 mgmt_control 3 7349 NULL
++t1_get_slice_7350 t1_get_slice 0 7350 NULL
++ext3_free_blocks_7362 ext3_free_blocks 3-4 7362 NULL
++ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
++schedule_timeout_7371 schedule_timeout 0 7371 NULL
++hweight_long_7388 hweight_long 1-0 7388 NULL
++sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
++readb_7401 readb 0 7401 NULL
++drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
++kvm_pv_mmu_op_7436 kvm_pv_mmu_op 3-2 7436 NULL
++ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
++rt2x00debug_read_queue_stats_7455 rt2x00debug_read_queue_stats 3 7455 NULL
++ms_rw_multi_sector_7459 ms_rw_multi_sector 3-4 7459 NULL
++__mutex_lock_common_7469 __mutex_lock_common 0 7469 NULL
++garp_request_join_7471 garp_request_join 4 7471 NULL
++compat_sys_msgrcv_7482 compat_sys_msgrcv 2 7482 NULL
++get_stats_7483 get_stats 0 7483 NULL
++snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
++sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 NULL nohasharray
++ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 &sdhci_alloc_host_7509
++goal_in_my_reservation_7553 goal_in_my_reservation 3 7553 NULL
++ext4_ext_insert_extent_7576 ext4_ext_insert_extent 0 7576 NULL
++ext3_try_to_allocate_7590 ext3_try_to_allocate 3-5-0 7590 NULL
++create_dir_7614 create_dir 0 7614 NULL nohasharray
++groups_alloc_7614 groups_alloc 1 7614 &create_dir_7614
++set_connectable_7649 set_connectable 4 7649 NULL
++skb_copy_expand_7685 skb_copy_expand 3-2 7685 NULL nohasharray
++acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 1-2 7685 &skb_copy_expand_7685
++acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
++dev_write_7708 dev_write 3 7708 NULL
++dbg_check_cats_7713 dbg_check_cats 0 7713 NULL
++pci_raw_set_power_state_7729 pci_raw_set_power_state 0 7729 NULL
++manip_pkt_7741 manip_pkt 3 7741 NULL
++vxge_device_register_7752 vxge_device_register 4 7752 NULL
++pohmelfs_path_length_7758 pohmelfs_path_length 0 7758 NULL
++btrfs_force_ra_7761 btrfs_force_ra 5-4 7761 NULL
++osdv2_attr_list_elem_size_7763 osdv2_attr_list_elem_size 0-1 7763 NULL
++ubi_io_read_vid_hdr_7766 ubi_io_read_vid_hdr 0 7766 NULL
++paths_from_inode_7774 paths_from_inode 0 7774 NULL
++alloc_candev_7776 alloc_candev 1-2 7776 NULL
++dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
++bnx2_nvram_write_7790 bnx2_nvram_write 2-4 7790 NULL
++diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL
++ubifs_leb_read_7828 ubifs_leb_read 0 7828 NULL
++btrfs_find_space_for_alloc_7876 btrfs_find_space_for_alloc 2 7876 NULL
++config_desc_7878 config_desc 0 7878 NULL
++dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
++xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
++gfs2_tune_get_i_7903 gfs2_tune_get_i 0 7903 NULL
++ext3_group_extend_7911 ext3_group_extend 3 7911 NULL
++libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
++do_surface_dirty_sou_7920 do_surface_dirty_sou 7 7920 NULL
++f_hidg_write_7932 f_hidg_write 3 7932 NULL
++io_apic_setup_irq_pin_once_7934 io_apic_setup_irq_pin_once 1 7934 NULL
++smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
++sys_mbind_7990 sys_mbind 5 7990 NULL
++sep_lock_user_pages_8000 sep_lock_user_pages 2-3 8000 NULL
++extend_or_restart_transaction_8008 extend_or_restart_transaction 0 8008 NULL
++vcs_read_8017 vcs_read 3 8017 NULL
++normalize_up_8037 normalize_up 0-2-1 8037 NULL
++vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
++iser_rcv_completion_8048 iser_rcv_completion 2 8048 NULL
++ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
++leb_read_lock_8070 leb_read_lock 0 8070 NULL
++ext4_ext_map_blocks_8078 ext4_ext_map_blocks 0 8078 NULL
++venus_lookup_8121 venus_lookup 4 8121 NULL
++ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
++CalcCalPLL_8136 CalcCalPLL 0 8136 NULL
++ext_sd_execute_write_data_8175 ext_sd_execute_write_data 9 8175 NULL
++dma_map_area_8178 dma_map_area 3-2-5-0 8178 NULL
++__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
++ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
++__nf_nat_mangle_tcp_packet_8190 __nf_nat_mangle_tcp_packet 5-7 8190 NULL
++recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
++__ocfs2_lock_refcount_tree_8207 __ocfs2_lock_refcount_tree 0 8207 NULL
++rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
++ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
++play_iframe_8219 play_iframe 3 8219 NULL
++sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
++check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
++add_rx_skb_8257 add_rx_skb 3 8257 NULL
++t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
++init_cdev_8274 init_cdev 1 8274 NULL
++qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
++snd_pcm_update_state_8320 snd_pcm_update_state 0 8320 NULL
++construct_key_and_link_8321 construct_key_and_link 4 8321 NULL
++ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
++__c4iw_init_resource_fifo_8334 __c4iw_init_resource_fifo 3 8334 NULL
++tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
++ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
++ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3 8362 NULL
++ocfs2_decrease_refcount_rec_8385 ocfs2_decrease_refcount_rec 0 8385 NULL
++xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
++zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
++uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 NULL
++snd_usb_ctl_msg_8436 snd_usb_ctl_msg 8 8436 NULL
++generic_bin_search_8440 generic_bin_search 0 8440 NULL
++afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
++fore200e_chunk_alloc_8501 fore200e_chunk_alloc 4-3 8501 NULL
++dev_config_8506 dev_config 3 8506 NULL
++ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
++utf16_strnlen_8513 utf16_strnlen 0 8513 NULL
++snd_malloc_sgbuf_pages_8532 snd_malloc_sgbuf_pages 2 8532 NULL
++ocfs2_read_virt_blocks_8538 ocfs2_read_virt_blocks 2-3 8538 NULL
++profile_remove_8556 profile_remove 3 8556 NULL
++pci_msi_check_device_8570 pci_msi_check_device 0 8570 NULL nohasharray
++cache_slow_downcall_8570 cache_slow_downcall 2 8570 &pci_msi_check_device_8570
++isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
++tower_write_8580 tower_write 3 8580 NULL
++ocfs2_reserve_local_alloc_bits_8581 ocfs2_reserve_local_alloc_bits 0 8581 NULL
++tsi721_open_inb_mbox_8598 tsi721_open_inb_mbox 4 8598 NULL
++rtllib_MFIE_rate_len_8606 rtllib_MFIE_rate_len 0 8606 NULL
++shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
++it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
++scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
++fuse_send_write_pages_8636 fuse_send_write_pages 0 8636 NULL
++nf_nat_mangle_tcp_packet_8643 nf_nat_mangle_tcp_packet 5-7 8643 NULL
++generic_acl_set_8658 generic_acl_set 4 8658 NULL
++ath6kl_tm_rx_report_event_8660 ath6kl_tm_rx_report_event 3 8660 NULL
++lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
++skb_frag_size_8695 skb_frag_size 0 8695 NULL
++arcfb_write_8702 arcfb_write 3 8702 NULL
++i_size_read_8703 i_size_read 0 8703 NULL nohasharray
++init_header_8703 init_header 0 8703 &i_size_read_8703
++cifs_writedata_alloc_8710 cifs_writedata_alloc 1 8710 NULL
++ctrl_out_8712 ctrl_out 5-3 8712 NULL
++tracing_max_lat_write_8728 tracing_max_lat_write 3 8728 NULL
++jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL
++ocfs2_find_path_8754 ocfs2_find_path 0 8754 NULL
++em28xx_init_isoc_8755 em28xx_init_isoc 3-2-4-0 8755 NULL
++yurex_write_8761 yurex_write 3 8761 NULL
++joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
++kstrtoint_from_user_8778 kstrtoint_from_user 2 8778 NULL
++aligned_nrpages_8791 aligned_nrpages 0-1-2 8791 NULL
++__bitmap_weight_8796 __bitmap_weight 2-0 8796 NULL
++cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
++intel_ring_begin_8808 intel_ring_begin 0 8808 NULL
++metronomefb_write_8823 metronomefb_write 3 8823 NULL
++get_queue_depth_8833 get_queue_depth 0 8833 NULL
++dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
++usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
++wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
++iwl_dbgfs_debug_level_write_8871 iwl_dbgfs_debug_level_write 3 8871 NULL
++compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
++ab3100_get_set_reg_8890 ab3100_get_set_reg 3 8890 NULL nohasharray
++tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 &ab3100_get_set_reg_8890
++sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
++sysfs_merge_group_8917 sysfs_merge_group 0 8917 NULL
++write_file_ani_8918 write_file_ani 3 8918 NULL
++layout_commit_8926 layout_commit 3 8926 NULL
++adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
++driver_stats_read_8944 driver_stats_read 3 8944 NULL
++read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
++seq_bitmap_list_8963 seq_bitmap_list 3 8963 NULL
++usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
++qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
++venus_mkdir_8967 venus_mkdir 4 8967 NULL
++vol_cdev_read_8968 vol_cdev_read 3 8968 NULL nohasharray
++seq_open_net_8968 seq_open_net 4 8968 &vol_cdev_read_8968
++bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
++btrfs_alloc_free_block_8986 btrfs_alloc_free_block 8 8986 NULL
++get_pipes_9008 get_pipes 0 9008 NULL
++snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
++fd_ioctl_9028 fd_ioctl 3 9028 NULL
++nla_put_9042 nla_put 3 9042 NULL
++snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 5-3 9061 NULL
++snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
++fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
++create_queues_9088 create_queues 3-2 9088 NULL
++ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
++caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL
++pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
++brcmf_sdbrcm_send_buf_9129 brcmf_sdbrcm_send_buf 6 9129 NULL
++apei_resources_merge_9149 apei_resources_merge 0 9149 NULL
++dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
++isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
++alloc_group_attrs_9194 alloc_group_attrs 2 9194 NULL nohasharray
++altera_swap_ir_9194 altera_swap_ir 2 9194 &alloc_group_attrs_9194
++sep_prepare_input_output_dma_table_9200 sep_prepare_input_output_dma_table 4-3-2 9200 NULL
++snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
++l2cap_create_connless_pdu_9222 l2cap_create_connless_pdu 3 9222 NULL
++sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
++ext4_mark_iloc_dirty_9239 ext4_mark_iloc_dirty 0 9239 NULL
++schedule_erase_9240 schedule_erase 0 9240 NULL
++cmtp_add_msgpart_9252 cmtp_add_msgpart 4 9252 NULL
++ocfs2_clear_ext_refcount_9256 ocfs2_clear_ext_refcount 0-4 9256 NULL
++tcf_csum_ipv4_icmp_9258 tcf_csum_ipv4_icmp 3 9258 NULL
++btrfs_search_slot_9264 btrfs_search_slot 0 9264 NULL
++ocfs2_merge_rec_right_9267 ocfs2_merge_rec_right 0 9267 NULL
++sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
++hdpvr_read_9273 hdpvr_read 3 9273 NULL
++iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
++ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
++sk_rmem_schedule_9331 sk_rmem_schedule 2 9331 NULL
++ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
++get_request_type_9393 get_request_type 0 9393 NULL nohasharray
++mlx4_bitmap_init_9393 mlx4_bitmap_init 5-2 9393 &get_request_type_9393
++read_9397 read 3 9397 NULL
++set_gpio_9412 set_gpio 0 9412 NULL
++bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
++ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
++kmalloc_array_9444 kmalloc_array 1-2 9444 NULL
++ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
++mcs_unwrap_mir_9455 mcs_unwrap_mir 3 9455 NULL
++ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
++agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL
++rbd_coll_end_req_9472 rbd_coll_end_req 3 9472 NULL
++__alloc_preds_9492 __alloc_preds 2 9492 NULL
++sock_recvmsg_9500 sock_recvmsg 0 9500 NULL
++lbs_threshold_write_9502 lbs_threshold_write 5 9502 NULL
++lp_write_9511 lp_write 3 9511 NULL
++mext_calc_swap_extents_9517 mext_calc_swap_extents 4 9517 NULL
++scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
++read_file_dma_9530 read_file_dma 3 9530 NULL
++ext3_alloc_branch_9534 ext3_alloc_branch 5 9534 NULL
++nlmsg_parse_9536 nlmsg_parse 2 9536 NULL
++pohmelfs_send_readpages_9537 pohmelfs_send_readpages 3 9537 NULL
++audit_log_n_untrustedstring_9548 audit_log_n_untrustedstring 3 9548 NULL
++readl_9557 readl 0 9557 NULL
++fw_node_create_9559 fw_node_create 2 9559 NULL
++kobj_map_9566 kobj_map 3-2 9566 NULL
++biovec_create_pools_9575 biovec_create_pools 2 9575 NULL
++ieee80211_tdls_mgmt_9581 ieee80211_tdls_mgmt 8 9581 NULL
++lguest_setup_irq_9587 lguest_setup_irq 1 9587 NULL
++do_sync_9604 do_sync 1 9604 NULL
++snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
++ocfs2_claim_suballoc_bits_9615 ocfs2_claim_suballoc_bits 0 9615 NULL
++saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
++acpi_ex_insert_into_field_9638 acpi_ex_insert_into_field 3 9638 NULL
++compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
++ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
++queue_received_packet_9657 queue_received_packet 5 9657 NULL
++snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
++ks8842_read16_9676 ks8842_read16 0 9676 NULL nohasharray
++dns_query_9676 dns_query 3-0 9676 &ks8842_read16_9676
++ea_get_unstuffed_9677 ea_get_unstuffed 0 9677 NULL
++qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
++__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
++is_hole_9694 is_hole 2 9694 NULL
++vx_transfer_end_9701 vx_transfer_end 0 9701 NULL
++fnb_9703 fnb 2-3-0 9703 NULL
++ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 NULL
++ddb_input_read_9743 ddb_input_read 3-0 9743 NULL
++__alloc_percpu_9764 __alloc_percpu 2-1 9764 NULL
++do_sigpending_9766 do_sigpending 2 9766 NULL
++btrfs_write_and_wait_transaction_9768 btrfs_write_and_wait_transaction 0 9768 NULL
++__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
++snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
++qlcnic_validate_ringparam_9794 qlcnic_validate_ringparam 1-2-3 9794 NULL
++parse_uac2_sample_rate_range_9801 parse_uac2_sample_rate_range 0 9801 NULL
++tpm_data_in_9802 tpm_data_in 0 9802 NULL
++ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
++udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 NULL nohasharray
++ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 &udpv6_recvmsg_9813
++cfg80211_send_deauth_9862 cfg80211_send_deauth 3 9862 NULL
++get_blk_table_len_9863 get_blk_table_len 0 9863 NULL
++pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
++mlx4_bitmap_alloc_range_9876 mlx4_bitmap_alloc_range 2-3 9876 NULL
++bm_register_write_9893 bm_register_write 3 9893 NULL nohasharray
++snd_midi_event_new_9893 snd_midi_event_new 1 9893 &bm_register_write_9893
++snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 5-3 9895 NULL
++iwm_rx_packet_alloc_9898 iwm_rx_packet_alloc 3 9898 NULL
++receive_DataRequest_9904 receive_DataRequest 3 9904 NULL
++norm_maxw_9907 norm_maxw 0 9907 NULL
++ext4_map_blocks_9916 ext4_map_blocks 0 9916 NULL
++root_nfs_parse_options_9937 root_nfs_parse_options 3 9937 NULL
++read_file_misc_9948 read_file_misc 3 9948 NULL
++set_rxd_buffer_pointer_9950 set_rxd_buffer_pointer 8 9950 NULL
++ext2_new_blocks_9954 ext2_new_blocks 2-0 9954 NULL
++csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 4-3 9957 NULL
++btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
++nfs_readdata_alloc_9990 nfs_readdata_alloc 1 9990 NULL
++kovaplus_send_10009 kovaplus_send 4 10009 NULL
++aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
++handle_request_10024 handle_request 9 10024 NULL
++rbd_coll_end_req_index_10041 rbd_coll_end_req_index 5 10041 NULL
++userpolicy_type_attrsize_10067 userpolicy_type_attrsize 0 10067 NULL
++cifs_llseek_10091 cifs_llseek 2 10091 NULL
++ufs_bitmap_search_10105 ufs_bitmap_search 0-3 10105 NULL
++get_elem_size_10110 get_elem_size 0-2 10110 NULL
++gfs2_meta_read_10112 gfs2_meta_read 0 10112 NULL
++offset_to_bit_10134 offset_to_bit 0 10134 NULL
++aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
++rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
++ol_chunk_entries_10159 ol_chunk_entries 0 10159 NULL
++asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
++kstrtol_from_user_10168 kstrtol_from_user 2 10168 NULL
++proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
++mlx4_ib_create_cq_10177 mlx4_ib_create_cq 2 10177 NULL
++jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
++register_ftrace_function_10218 register_ftrace_function 0 10218 NULL
++cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
++snd_pcm_lib_preallocate_pages1_10273 snd_pcm_lib_preallocate_pages1 2 10273 NULL
++snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
++highmem_dirtyable_memory_10301 highmem_dirtyable_memory 0-1 10301 NULL
++read_emulate_10310 read_emulate 2-4 10310 NULL
++ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
++ubi_leb_read_10328 ubi_leb_read 0 10328 NULL
++tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
++em28xx_read_reg_req_len_10340 em28xx_read_reg_req_len 0 10340 NULL
++ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
++whci_add_cap_10350 whci_add_cap 0 10350 NULL
++dbAllocAny_10354 dbAllocAny 0 10354 NULL
++ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
++sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
++ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
++led_classdev_register_10384 led_classdev_register 0 10384 NULL
++do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
++event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
++qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
++sel_write_disable_10511 sel_write_disable 3 10511 NULL nohasharray
++rbd_get_segment_10511 rbd_get_segment 0-3-4 10511 &sel_write_disable_10511
++osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
++rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
++snd_pcm_hw_params_user_10520 snd_pcm_hw_params_user 0 10520 NULL
++ocfs2_add_refcounted_extent_10526 ocfs2_add_refcounted_extent 6 10526 NULL
++snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 NULL nohasharray
++kstrtouint_from_user_10536 kstrtouint_from_user 2 10536 &snd_pcm_lib_read_10536
++bcm_ioctl_fw_download_10548 bcm_ioctl_fw_download 0 10548 NULL
++i915_write_fence_reg_10551 i915_write_fence_reg 0 10551 NULL
++otp_read_10594 otp_read 5-4-2 10594 NULL
++supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
++ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
++cxgb3_get_cpl_reply_skb_10620 cxgb3_get_cpl_reply_skb 2 10620 NULL
++nes_alloc_resource_10624 nes_alloc_resource 3 10624 NULL
++write_file_rx_chainmask_10636 write_file_rx_chainmask 3 10636 NULL
++devm_request_irq_10640 devm_request_irq 0 10640 NULL
++__qbuf_mmap_10642 __qbuf_mmap 0 10642 NULL
++br_nlmsg_size_10645 br_nlmsg_size 0 10645 NULL
++ubi_io_write_vid_hdr_10660 ubi_io_write_vid_hdr 0 10660 NULL
++efx_max_tx_len_10662 efx_max_tx_len 0-2 10662 NULL
++ni65_alloc_mem_10664 ni65_alloc_mem 3 10664 NULL
++parport_write_10669 parport_write 0 10669 NULL
++tcp_push_10680 tcp_push 3 10680 NULL
++edge_write_10692 edge_write 4 10692 NULL
++inl_10708 inl 0 10708 NULL nohasharray
++selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 &inl_10708
++pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 NULL nohasharray
++shash_async_setkey_10720 shash_async_setkey 3 10720 &pvr2_ioread_read_10720
++__iscsi_complete_pdu_10726 __iscsi_complete_pdu 4 10726 NULL
++spi_sync_10731 spi_sync 0 10731 NULL
++sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL nohasharray
++apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737
++compat_sys_msgsnd_10738 compat_sys_msgsnd 2 10738 NULL
++ttm_ref_object_add_10748 ttm_ref_object_add 0 10748 NULL
++vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
++kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
++posix_acl_to_xattr_10767 posix_acl_to_xattr 0 10767 NULL
++loopback_bytepos_update_10776 loopback_bytepos_update 2 10776 NULL
++i915_gem_wait_for_error_10791 i915_gem_wait_for_error 0 10791 NULL
++snd_mask_value_10794 snd_mask_value 0 10794 NULL
++sys_bind_10799 sys_bind 3 10799 NULL
++aun_incoming_10814 aun_incoming 3 10814 NULL
++diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL
++send_command_10832 send_command 4 10832 NULL
++lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
++ida_get_new_above_10853 ida_get_new_above 2 10853 NULL
++fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
++ol_chunk_blocks_10864 ol_chunk_blocks 0 10864 NULL
++snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
++drm_ht_insert_item_10877 drm_ht_insert_item 0 10877 NULL
++get_scq_10897 get_scq 2 10897 NULL
++cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
++tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
++__copy_from_user_10918 __copy_from_user 3-0 10918 NULL
++kobject_add_10919 kobject_add 0 10919 NULL
++iwl_calib_set_10944 iwl_calib_set 3 10944 NULL
++bm_entry_read_10976 bm_entry_read 3 10976 NULL
++sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
++xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
++mb_find_next_bit_11037 mb_find_next_bit 0-2-3 11037 NULL
++tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
++carl9170_handle_mpdu_11056 carl9170_handle_mpdu 3 11056 NULL
++tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
++snd_pcm_delay_11081 snd_pcm_delay 0 11081 NULL
++count_argc_11083 count_argc 0 11083 NULL
++kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
++tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
++page_offset_11120 page_offset 0 11120 NULL
++tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
++alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
++ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL nohasharray
++snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 &ioat2_alloc_ring_11172
++__swab16p_11220 __swab16p 0 11220 NULL
++hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
++ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
++construct_key_11329 construct_key 3 11329 NULL nohasharray
++__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
++next_segment_11330 next_segment 0-2-1 11330 NULL
++i915_max_freq_write_11350 i915_max_freq_write 3 11350 NULL
++sel_write_create_11353 sel_write_create 3 11353 NULL
++drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
++qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
++dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
++___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
++str_to_user_11411 str_to_user 2 11411 NULL
++trace_options_read_11419 trace_options_read 3 11419 NULL
++__irq_set_trigger_11422 __irq_set_trigger 0 11422 NULL nohasharray
++xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 &__irq_set_trigger_11422
++prepare_image_11424 prepare_image 0 11424 NULL
++bttv_read_11432 bttv_read 3 11432 NULL
++__bm_find_next_11447 __bm_find_next 2 11447 NULL
++pci_set_power_state_11479 pci_set_power_state 0 11479 NULL nohasharray
++sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 &pci_set_power_state_11479
++sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
++kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
++setup_IO_APIC_irq_extra_11537 setup_IO_APIC_irq_extra 1 11537 NULL
++skb_cow_data_11565 skb_cow_data 0-2 11565 NULL
++mlx4_init_cmpt_table_11569 mlx4_init_cmpt_table 3 11569 NULL
++lpfc_idiag_ctlacc_write_11576 lpfc_idiag_ctlacc_write 3 11576 NULL
++oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
++snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
++fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
++hycapi_rx_capipkt_11602 hycapi_rx_capipkt 3 11602 NULL
++sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
++nla_total_size_11658 nla_total_size 0-1 11658 NULL
++ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
++btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
++iwm_ntf_calib_res_11686 iwm_ntf_calib_res 3 11686 NULL
++sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
++split_11691 split 2 11691 NULL
++snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
++blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
++i2c_master_recv_11734 i2c_master_recv 0-3 11734 NULL
++tcf_csum_ipv6_icmp_11738 tcf_csum_ipv6_icmp 4 11738 NULL
++nfsd4_get_drc_mem_11748 nfsd4_get_drc_mem 0-1-2 11748 NULL
++iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
++intel_map_page_11762 intel_map_page 4-3 11762 NULL
++rd_regl_11767 rd_regl 0 11767 NULL
++ocfs2_relink_block_group_11769 ocfs2_relink_block_group 0 11769 NULL
++ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
++btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL
++pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
++umc_device_register_11824 umc_device_register 0 11824 NULL
++zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
++sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
++rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
++shmem_xattr_set_11843 shmem_xattr_set 4 11843 NULL
++unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
++ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
++l2cap_chan_send_11878 l2cap_chan_send 3 11878 NULL
++_l2_alloc_skb_11883 _l2_alloc_skb 1 11883 NULL
++xstateregs_get_11906 xstateregs_get 4 11906 NULL
++ti_write_11916 ti_write 4 11916 NULL
++kmalloc_slab_11917 kmalloc_slab 1 11917 NULL
++fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
++bitmap_remap_11929 bitmap_remap 5 11929 NULL
++atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
++dccp_feat_clone_sp_val_11942 dccp_feat_clone_sp_val 3 11942 NULL
++kvm_set_msr_common_11953 kvm_set_msr_common 3 11953 NULL
++f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
++usc_InReg_11976 usc_InReg 0 11976 NULL nohasharray
++split_node_11976 split_node 0 11976 &usc_InReg_11976
++BeceemFlashBulkRead_11979 BeceemFlashBulkRead 0 11979 NULL
++atmel_read16_11981 atmel_read16 0 11981 NULL
++ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
++pyra_send_12061 pyra_send 4 12061 NULL
++ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
++i915_gem_object_pin_12083 i915_gem_object_pin 0 12083 NULL
++alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
++xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
++rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
++generic_file_llseek_12139 generic_file_llseek 2 12139 NULL
++iwl4965_ucode_tx_stats_read_12143 iwl4965_ucode_tx_stats_read 3 12143 NULL
++rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
++get_idx_gc_leb_12148 get_idx_gc_leb 0 12148 NULL
++btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
++ocfs2_local_alloc_new_window_12153 ocfs2_local_alloc_new_window 0 12153 NULL
++vmbus_open_12154 vmbus_open 2-3 12154 NULL
++tt_update_changes_12155 tt_update_changes 3 12155 NULL
++dma_memcpy_to_iovec_12173 dma_memcpy_to_iovec 5 12173 NULL
++ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
++compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
++ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
++pair_device_12188 pair_device 4 12188 NULL
++qt2160_read_block_12198 qt2160_read_block 4 12198 NULL
++bl_is_sector_init_12199 bl_is_sector_init 2 12199 NULL
++receive_copy_12216 receive_copy 3 12216 NULL
++snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
++aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 NULL
++ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
++ath_descdma_setup_12257 ath_descdma_setup 5 12257 NULL
++shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
++add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
++roccat_common_send_12284 roccat_common_send 4 12284 NULL
++note_last_dentry_12285 note_last_dentry 3 12285 NULL
++roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL
++iwm_notif_send_12295 iwm_notif_send 6 12295 NULL
++wrap_min_12303 wrap_min 0-1-2 12303 NULL
++__einj_error_trigger_12304 __einj_error_trigger 0 12304 NULL
++bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
++alloc_trace_probe_12323 alloc_trace_probe 6 12323 NULL
++tipc_msg_build_12326 tipc_msg_build 4 12326 NULL
++pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
++mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
++receive_packet_12367 receive_packet 2 12367 NULL
++xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
++btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL nohasharray
++populate_dir_12391 populate_dir 0 12391 &btrfs_file_extent_ram_bytes_12391
++ntfs_get_size_for_mapping_pairs_12413 ntfs_get_size_for_mapping_pairs 0 12413 NULL
++gfs2_llseek_12464 gfs2_llseek 2 12464 NULL
++skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
++x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
++rtllib_auth_challenge_12493 rtllib_auth_challenge 3 12493 NULL
++nfs_readdir_make_qstr_12509 nfs_readdir_make_qstr 3 12509 NULL
++qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
++xfs_get_extsz_hint_12531 xfs_get_extsz_hint 0 12531 NULL
++iwl_legacy_dbgfs_rx_statistics_read_12545 iwl_legacy_dbgfs_rx_statistics_read 3 12545 NULL
++WriteRegs_12569 WriteRegs 0 12569 NULL
++ceph_osdc_wait_request_12572 ceph_osdc_wait_request 0 12572 NULL
++hvc_alloc_12579 hvc_alloc 4 12579 NULL
++pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
++vhci_put_user_12604 vhci_put_user 4 12604 NULL
++fc_fcp_frame_alloc_12624 fc_fcp_frame_alloc 2 12624 NULL
++pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
++nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
++ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
++trusted_update_12664 trusted_update 3 12664 NULL
++sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
++sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
++ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
++inet6_prefix_nlmsg_size_12722 inet6_prefix_nlmsg_size 0 12722 NULL
++key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
++ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
++listxattr_12769 listxattr 3 12769 NULL
++sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
++ip_ufo_append_data_12775 ip_ufo_append_data 6-7-8 12775 NULL
++platform_create_bundle_12785 platform_create_bundle 4-6 12785 NULL
++scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
++xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
++v4l_bound_align_image_12833 v4l_bound_align_image 2-3-6-7 12833 NULL
++TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
++spidev_sync_12842 spidev_sync 0 12842 NULL
++twl4030_init_irq_12843 twl4030_init_irq 2 12843 NULL
++spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
++xfs_rtallocate_extent_exact_12865 xfs_rtallocate_extent_exact 3-5-9 12865 NULL
++get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
++ocfs2_hamming_encode_block_12904 ocfs2_hamming_encode_block 2 12904 NULL
++get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
++cxgbi_create_session_12922 cxgbi_create_session 2 12922 NULL nohasharray
++rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 &cxgbi_create_session_12922
++free_tind_blocks_12926 free_tind_blocks 0 12926 NULL
++iwl_legacy_dbgfs_sram_write_12932 iwl_legacy_dbgfs_sram_write 3 12932 NULL
++do_inode_permission_12946 do_inode_permission 0 12946 NULL
++bcsp_prepare_pkt_12961 bcsp_prepare_pkt 3 12961 NULL
++bm_status_write_12964 bm_status_write 3 12964 NULL
++sctp_make_chunk_12986 sctp_make_chunk 4 12986 NULL
++TransmitTcb_12989 TransmitTcb 4 12989 NULL
++__get_extent_inline_ref_13021 __get_extent_inline_ref 0 13021 NULL
++subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
++generic_segment_checks_13041 generic_segment_checks 0 13041 NULL nohasharray
++ubi_eba_atomic_leb_change_13041 ubi_eba_atomic_leb_change 0 13041 &generic_segment_checks_13041
++ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
++ctnetlink_timestamp_size_13060 ctnetlink_timestamp_size 0 13060 NULL nohasharray
++__dn_setsockopt_13060 __dn_setsockopt 5 13060 &ctnetlink_timestamp_size_13060
++sandybridge_write_fence_reg_13080 sandybridge_write_fence_reg 0 13080 NULL
++_ocfs2_free_suballoc_bits_13085 _ocfs2_free_suballoc_bits 0 13085 NULL
++irq_set_chip_and_handler_13088 irq_set_chip_and_handler 1 13088 NULL
++xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
++blk_rq_map_sg_13092 blk_rq_map_sg 0 13092 NULL
++mb_find_next_zero_bit_13100 mb_find_next_zero_bit 2-3-0 13100 NULL
++snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
++snd_pcm_lib_preallocate_pages_for_all_13112 snd_pcm_lib_preallocate_pages_for_all 4 13112 NULL
++xen_allocate_irq_dynamic_13116 xen_allocate_irq_dynamic 0 13116 NULL
++bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
++ip_make_skb_13129 ip_make_skb 5-6 13129 NULL
++blk_update_request_13146 blk_update_request 3 13146 NULL
++caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
++pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
++__cmpxchg64_13187 __cmpxchg64 0 13187 NULL
++comedi_read_13199 comedi_read 3 13199 NULL
++mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
++__nodes_fold_13215 __nodes_fold 4 13215 NULL
++svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
++wait_events_13243 wait_events 0 13243 NULL
++asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
++snd_emu10k1_fx8010_tram_setup_13248 snd_emu10k1_fx8010_tram_setup 2 13248 NULL
++fw_download_code_13249 fw_download_code 3 13249 NULL
++init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
++hostap_80211_get_hdrlen_13255 hostap_80211_get_hdrlen 0 13255 NULL
++bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
++simple_attr_write_13260 simple_attr_write 3 13260 NULL
++smctr_process_rx_packet_13270 smctr_process_rx_packet 2 13270 NULL
++carl9170_rx_13272 carl9170_rx 3 13272 NULL
++pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
++lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
++platform_device_add_resources_13289 platform_device_add_resources 3 13289 NULL
++find_get_pages_contig_13317 find_get_pages_contig 0 13317 NULL
++nf_nat_mangle_udp_packet_13321 nf_nat_mangle_udp_packet 5-7 13321 NULL
++us122l_ctl_msg_13330 us122l_ctl_msg 8 13330 NULL
++kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
++iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
++wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
++BcmSetActiveSection_13389 BcmSetActiveSection 0 13389 NULL
++sky2_receive_13407 sky2_receive 2 13407 NULL
++encrypted_update_13414 encrypted_update 3 13414 NULL nohasharray
++ocfs2_inode_lock_update_13414 ocfs2_inode_lock_update 0 13414 &encrypted_update_13414
++netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL nohasharray
++i915_gem_execbuffer_sync_rings_13417 i915_gem_execbuffer_sync_rings 0 13417 &netxen_alloc_sds_rings_13417
++keyring_read_13438 keyring_read 3 13438 NULL
++sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL
++ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 7-8-9 13443 NULL
++ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 2 13512 NULL
++core_status_13515 core_status 4 13515 NULL
++sctp_tsnmap_mark_13527 sctp_tsnmap_mark 2 13527 NULL
++bm_init_13529 bm_init 2 13529 NULL
++request_any_context_irq_13530 request_any_context_irq 0 13530 NULL
++usb_hcd_link_urb_to_ep_13560 usb_hcd_link_urb_to_ep 0 13560 NULL
++ubifs_get_idx_gc_leb_13566 ubifs_get_idx_gc_leb 0 13566 NULL
++read_file_antenna_13574 read_file_antenna 3 13574 NULL
++cache_write_13589 cache_write 3 13589 NULL
++mpt_lan_receive_post_turbo_13592 mpt_lan_receive_post_turbo 2 13592 NULL
++irias_new_octseq_value_13596 irias_new_octseq_value 2 13596 NULL
++Rd_Indx_13602 Rd_Indx 3-2 13602 NULL
++wm8994_bulk_write_13615 wm8994_bulk_write 3 13615 NULL
++pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
++iio_device_add_event_sysfs_13627 iio_device_add_event_sysfs 0 13627 NULL
++packet_snd_13634 packet_snd 3 13634 NULL
++__qbuf_userptr_13636 __qbuf_userptr 0 13636 NULL
++blk_msg_write_13655 blk_msg_write 3 13655 NULL
++nfs_idmap_lookup_id_13665 nfs_idmap_lookup_id 2 13665 NULL
++cache_downcall_13666 cache_downcall 3 13666 NULL
++ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0 13682 NULL
++usb_get_string_13693 usb_get_string 0 13693 NULL
++atomic_cmpxchg_13700 atomic_cmpxchg 0 13700 NULL
++ocfs2_cache_block_dealloc_13731 ocfs2_cache_block_dealloc 0 13731 NULL
++cfg80211_testmode_alloc_event_skb_13739 cfg80211_testmode_alloc_event_skb 2 13739 NULL
++audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
++ufs_dtog_13750 ufs_dtog 0-2 13750 NULL
++fb_sys_read_13778 fb_sys_read 3 13778 NULL
++CalcMainPLL_13811 CalcMainPLL 0 13811 NULL
++bat_ogm_aggregate_new_13813 bat_ogm_aggregate_new 2 13813 NULL
++random_read_13815 random_read 3 13815 NULL
++mutex_lock_interruptible_nested_13817 mutex_lock_interruptible_nested 0 13817 NULL
++mtd_do_readoob_13850 mtd_do_readoob 4 13850 NULL
++evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
++compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL
++snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
++qla2x00_get_ctx_sp_13912 qla2x00_get_ctx_sp 3 13912 NULL
++ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
++ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 NULL
++iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
++ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
++lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
++snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
++ext4_ind_calc_metadata_amount_13975 ext4_ind_calc_metadata_amount 2 13975 NULL
++qcam_read_13977 qcam_read 3 13977 NULL
++dsp_read_13980 dsp_read 2 13980 NULL
++bm_block_bits_13981 bm_block_bits 0 13981 NULL nohasharray
++dvb_demux_read_13981 dvb_demux_read 3 13981 &bm_block_bits_13981
++ieee80211_bss_info_update_13991 ieee80211_bss_info_update 4 13991 NULL
++create_files_14003 create_files 0 14003 NULL
++sddr09_write_data_14014 sddr09_write_data 3 14014 NULL
++btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
++_rtl92s_firmware_downloadcode_14021 _rtl92s_firmware_downloadcode 3 14021 NULL
++dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
++read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
++ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
++utf8_to_utf16le_14057 utf8_to_utf16le 0 14057 NULL
++sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
++do_tcp_sendpages_14083 do_tcp_sendpages 4-3 14083 NULL
++do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
++compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
++nlmsg_len_14115 nlmsg_len 0 14115 NULL
++gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
++print_input_mask_14168 print_input_mask 3-0 14168 NULL
++ocfs2_split_and_insert_14171 ocfs2_split_and_insert 0 14171 NULL
++ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
++datafab_read_data_14186 datafab_read_data 4 14186 NULL
++tcp_manip_pkt_14202 tcp_manip_pkt 2 14202 NULL
++alloc_async_14208 alloc_async 1 14208 NULL
++ath6kl_regread_write_14220 ath6kl_regread_write 3 14220 NULL
++sys_kexec_load_14222 sys_kexec_load 2 14222 NULL
++dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4 14244 NULL
++snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 4 14245 NULL
++ext4_journal_restart_14251 ext4_journal_restart 0 14251 NULL
++radix_tree_prev_hole_14252 radix_tree_prev_hole 0-2 14252 NULL
++ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
++add_numbered_child_14273 add_numbered_child 5 14273 NULL
++OS_mem_token_alloc_14276 OS_mem_token_alloc 1 14276 NULL
++em28xx_i2c_eeprom_14280 em28xx_i2c_eeprom 3 14280 NULL
++snd_seq_oss_readq_new_14283 snd_seq_oss_readq_new 2 14283 NULL
++audit_send_reply_14292 audit_send_reply 7 14292 NULL
++rr_status_14293 rr_status 5 14293 NULL
++read_default_ldt_14302 read_default_ldt 2 14302 NULL
++xfs_qm_qino_alloc_14309 xfs_qm_qino_alloc 3 14309 NULL
++i915_gem_object_finish_gpu_14312 i915_gem_object_finish_gpu 0 14312 NULL
++oo_objects_14319 oo_objects 0 14319 NULL
++iwl_legacy_dbgfs_interrupt_read_14324 iwl_legacy_dbgfs_interrupt_read 3 14324 NULL
++p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
++snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL
++acpi_get_override_irq_14381 acpi_get_override_irq 1 14381 NULL
++ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
++smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
++first_logical_byte_14403 first_logical_byte 0 14403 NULL
++mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
++get_kcore_size_14425 get_kcore_size 0 14425 NULL
++gart_alloc_coherent_14437 gart_alloc_coherent 2 14437 NULL
++check_lpt_crc_14442 check_lpt_crc 0 14442 NULL
++block_size_14443 block_size 0 14443 NULL
++snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
++udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
++ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
++cmd_complete_14502 cmd_complete 5 14502 NULL
++ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
++prepare_data_14536 prepare_data 3 14536 NULL nohasharray
++ep0_write_14536 ep0_write 3 14536 &prepare_data_14536 nohasharray
++dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 &ep0_write_14536
++register_trace_sched_switch_14545 register_trace_sched_switch 0 14545 NULL
++l2cap_send_cmd_14548 l2cap_send_cmd 4 14548 NULL
++picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
++nfqnl_mangle_14583 nfqnl_mangle 2 14583 NULL
++idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
++ocfs2_steal_meta_14602 ocfs2_steal_meta 0 14602 NULL
++ocfs2_trim_group_14641 ocfs2_trim_group 4-3 14641 NULL
++dbJoin_14644 dbJoin 0 14644 NULL
++profile_replace_14652 profile_replace 3 14652 NULL
++min_bytes_needed_14675 min_bytes_needed 0 14675 NULL
++ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL
++u_audio_playback_14709 u_audio_playback 3 14709 NULL
++vfd_write_14717 vfd_write 3 14717 NULL
++ext4_da_map_blocks_14723 ext4_da_map_blocks 2 14723 NULL nohasharray
++inet_listen_14723 inet_listen 2 14723 &ext4_da_map_blocks_14723
++__blk_end_request_14729 __blk_end_request 3 14729 NULL nohasharray
++do_mmap_14729 do_mmap 0 14729 &__blk_end_request_14729
++rh_urb_enqueue_14733 rh_urb_enqueue 0 14733 NULL
++store_camera_14751 store_camera 4 14751 NULL
++sta_dev_read_14782 sta_dev_read 3 14782 NULL
++keys_proc_write_14792 keys_proc_write 3 14792 NULL nohasharray
++cp_tm1217_read_14792 cp_tm1217_read 3 14792 &keys_proc_write_14792
++ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
++__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
++nfs_parse_server_name_14800 nfs_parse_server_name 2 14800 NULL
++snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 NULL nohasharray
++hpet_readl_14801 hpet_readl 0 14801 &snd_als300_gcr_read_14801
++__i2400ms_rx_get_size_14826 __i2400ms_rx_get_size 0 14826 NULL
++do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
++__mutex_fastpath_lock_retval_14844 __mutex_fastpath_lock_retval 0 14844 NULL
++lcd_write_14857 lcd_write 3 14857 NULL nohasharray
++__krealloc_14857 __krealloc 2 14857 &lcd_write_14857
++get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
++acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
++ubi_leb_change_14899 ubi_leb_change 0 14899 NULL
++krealloc_14908 krealloc 2 14908 NULL
++__arch_hweight64_14923 __arch_hweight64 0 14923 NULL
++store_sys_wmi_14934 store_sys_wmi 4 14934 NULL
++ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
++queue_cnt_14951 queue_cnt 0 14951 NULL
++unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
++videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
++help_14971 help 4 14971 NULL
++mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
++setkey_14987 setkey 3 14987 NULL
++store_touchpad_15003 store_touchpad 4 15003 NULL
++blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
++irq_get_next_irq_15053 irq_get_next_irq 1-0 15053 NULL
++store_lslvl_15059 store_lslvl 4 15059 NULL
++nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
++ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5-0 15072 NULL
++pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
++hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
++start_port_15124 start_port 0 15124 NULL
++perf_ctx_adjust_freq_15133 perf_ctx_adjust_freq 2 15133 NULL
++ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
++iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
++reserve_resources_15194 reserve_resources 3 15194 NULL
++bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
++udf_bitmap_new_block_15230 udf_bitmap_new_block 5 15230 NULL nohasharray
++nlmsg_total_size_15230 nlmsg_total_size 0-1 15230 &udf_bitmap_new_block_15230
++variax_alloc_sysex_buffer_15237 variax_alloc_sysex_buffer 3 15237 NULL
++iwl_dbgfs_sram_write_15239 iwl_dbgfs_sram_write 3 15239 NULL
++simple_strtol_15273 simple_strtol 0 15273 NULL
++fw_realloc_buffer_15280 fw_realloc_buffer 2 15280 NULL
++sys_connect_15291 sys_connect 3 15291 NULL
++arch_enable_uv_irq_15294 arch_enable_uv_irq 2 15294 NULL
++acpi_ev_create_gpe_block_15297 acpi_ev_create_gpe_block 5 15297 NULL
++ocfs2_read_refcount_block_15305 ocfs2_read_refcount_block 0 15305 NULL
++fcoe_ctlr_send_keep_alive_15308 fcoe_ctlr_send_keep_alive 3 15308 NULL
++__ocfs2_remove_xattr_range_15330 __ocfs2_remove_xattr_range 4-3-5 15330 NULL
++ioread16_15342 ioread16 0 15342 NULL
++alloc_ring_15345 alloc_ring 4-2 15345 NULL
++acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
++compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
++domain_flush_pages_15379 domain_flush_pages 2-3 15379 NULL
++alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
++get_modalias_15406 get_modalias 2 15406 NULL
++__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4-0 15423 NULL
++tcp_mtu_to_mss_15438 tcp_mtu_to_mss 0-2 15438 NULL
++hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
++__mutex_lock_killable_slowpath_15472 __mutex_lock_killable_slowpath 0 15472 NULL
++iwl_legacy_dbgfs_wd_timeout_write_15478 iwl_legacy_dbgfs_wd_timeout_write 3 15478 NULL
++insert_old_idx_znode_15500 insert_old_idx_znode 0 15500 NULL
++zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
++ifx_spi_write_15531 ifx_spi_write 3 15531 NULL
++p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
++ql_process_mac_rx_page_15543 ql_process_mac_rx_page 4 15543 NULL
++xfrm_state_mtu_15548 xfrm_state_mtu 0-2 15548 NULL
++ieee80211_amsdu_to_8023s_15561 ieee80211_amsdu_to_8023s 5 15561 NULL
++mlx4_buf_alloc_15572 mlx4_buf_alloc 2 15572 NULL nohasharray
++snd_pcm_channel_info_15572 snd_pcm_channel_info 0 15572 &mlx4_buf_alloc_15572
++persistent_status_15574 persistent_status 4 15574 NULL
++bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
++vme_user_write_15587 vme_user_write 3 15587 NULL
++ocfs2_truncate_rec_15595 ocfs2_truncate_rec 0-7 15595 NULL
++get_event_length_15598 get_event_length 0 15598 NULL
++sx150x_install_irq_chip_15609 sx150x_install_irq_chip 3 15609 NULL
++iommu_device_max_index_15620 iommu_device_max_index 0-3-2-1 15620 NULL nohasharray
++compat_fillonedir_15620 compat_fillonedir 3 15620 &iommu_device_max_index_15620
++dsp_cmx_send_member_15625 dsp_cmx_send_member 2 15625 NULL
++proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
++tomoyo_scan_bprm_15642 tomoyo_scan_bprm 4-2 15642 NULL
++joydev_handle_JSIOCSBTNMAP_15643 joydev_handle_JSIOCSBTNMAP 3 15643 NULL
++xsd_read_15653 xsd_read 3 15653 NULL
++compat_sys_fcntl_15654 compat_sys_fcntl 3 15654 NULL
++unix_bind_15668 unix_bind 3 15668 NULL
++dm_read_15674 dm_read 3 15674 NULL
++i915_gem_object_set_to_cpu_domain_15705 i915_gem_object_set_to_cpu_domain 0 15705 NULL
++inet6_if_nlmsg_size_15711 inet6_if_nlmsg_size 0 15711 NULL
++ocfs2_split_tree_15716 ocfs2_split_tree 0-5 15716 NULL
++HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
++sk_wmem_schedule_15759 sk_wmem_schedule 2 15759 NULL
++ftrace_profile_init_cpu_15761 ftrace_profile_init_cpu 0 15761 NULL
++bitmap_search_next_usable_block_15762 bitmap_search_next_usable_block 3-1-0 15762 NULL
++msi_alloc_irte_15798 msi_alloc_irte 3 15798 NULL
++smk_read_direct_15803 smk_read_direct 3 15803 NULL
++gnttab_expand_15817 gnttab_expand 1 15817 NULL
++afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
++table_size_15851 table_size 0-1-2 15851 NULL
++ubi_io_write_15870 ubi_io_write 0 15870 NULL nohasharray
++media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
++ddr_init_15874 ddr_init 0 15874 NULL
++__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
++nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
++native_read_msr_15905 native_read_msr 0 15905 NULL
++parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
++power_read_15939 power_read 3 15939 NULL
++lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL nohasharray
++i2c_write_15948 i2c_write 0 15948 &lpfc_idiag_drbacc_read_15948
++snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 5-2-4 15952 NULL
++calculate_max_size_15977 calculate_max_size 0 15977 NULL
++get_entry_16003 get_entry 4 16003 NULL
++hdpvr_register_videodev_16010 hdpvr_register_videodev 3 16010 NULL
++viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
++got_frame_16028 got_frame 2 16028 NULL
++dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
++snd_sgbuf_aligned_pages_16063 snd_sgbuf_aligned_pages 0-1 16063 NULL
++ocfs2_sync_local_to_main_16076 ocfs2_sync_local_to_main 0 16076 NULL
++isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
++dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 NULL nohasharray
++isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 &dma_tx_requested_read_16110
++irq_set_chip_and_handler_name_16111 irq_set_chip_and_handler_name 1 16111 NULL
++rd_mem_16117 rd_mem 0 16117 NULL
++snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
++compat_sys_select_16131 compat_sys_select 1 16131 NULL
++fsm_init_16134 fsm_init 2 16134 NULL
++hysdn_rx_netpkt_16136 hysdn_rx_netpkt 3 16136 NULL
++ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
++cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
++create_table_16213 create_table 2 16213 NULL
++atomic_read_file_16227 atomic_read_file 3 16227 NULL
++BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
++btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 NULL
++mark_written_sectors_16262 mark_written_sectors 2 16262 NULL
++reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
++ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
++drbd_setsockopt_16280 drbd_setsockopt 5 16280 NULL nohasharray
++nand_bch_init_16280 nand_bch_init 2-3 16280 &drbd_setsockopt_16280
++account_16283 account 0-4-2 16283 NULL
++jumpshot_read_data_16287 jumpshot_read_data 4 16287 NULL
++stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
++rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
++sst_allocate_decode_buf_16349 sst_allocate_decode_buf 3 16349 NULL
++total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
++iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
++nl80211_send_unprot_deauth_16378 nl80211_send_unprot_deauth 4 16378 NULL
++i8042_create_kbd_port_16379 i8042_create_kbd_port 0 16379 NULL
++scsi_nl_send_vendor_msg_16394 scsi_nl_send_vendor_msg 5 16394 NULL
++alloc_trdev_16399 alloc_trdev 1 16399 NULL
++ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
++rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
++calculate_inocache_hashsize_16449 calculate_inocache_hashsize 0-1 16449 NULL
++ocfs2_expand_refcount_tree_16455 ocfs2_expand_refcount_tree 0 16455 NULL
++netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
++sock_wmalloc_16472 sock_wmalloc 2 16472 NULL
++ab8500_val_write_16473 ab8500_val_write 3 16473 NULL
++ocfs2_block_group_set_bits_16488 ocfs2_block_group_set_bits 0 16488 NULL
++tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
++filemap_write_and_wait_16506 filemap_write_and_wait 0 16506 NULL
++start_this_handle_16519 start_this_handle 0 16519 NULL
++snd_interval_max_16529 snd_interval_max 0 16529 NULL
++lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
++agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
++__cfg80211_testmode_alloc_skb_16611 __cfg80211_testmode_alloc_skb 2 16611 NULL
++btrfs_wait_marked_extents_16615 btrfs_wait_marked_extents 0 16615 NULL
++bnx2i_session_create_16624 bnx2i_session_create 2 16624 NULL
++packet_recv_error_16669 packet_recv_error 3 16669 NULL
++dlm_new_lockspace_16688 dlm_new_lockspace 2 16688 NULL
++calc_layout_16690 calc_layout 4 16690 NULL
++em28xx_v4l2_read_16701 em28xx_v4l2_read 3 16701 NULL
++iscsi_recv_pdu_16755 iscsi_recv_pdu 4 16755 NULL
++arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
++blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
++i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
++get_server_iovec_16804 get_server_iovec 2 16804 NULL
++tipc_send2name_16809 tipc_send2name 6 16809 NULL
++mled_proc_write_16831 mled_proc_write 3 16831 NULL nohasharray
++drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 &mled_proc_write_16831
++scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
++hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
++alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
++carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
++st_write_16874 st_write 3 16874 NULL
++__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
++ext4_ext_zeroout_16895 ext4_ext_zeroout 0 16895 NULL
++mwifiex_update_curr_bss_params_16908 mwifiex_update_curr_bss_params 5 16908 NULL
++ivtv_v4l2_ioctl_16915 ivtv_v4l2_ioctl 2 16915 NULL
++psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 NULL nohasharray
++snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 &psb_unlocked_ioctl_16926
++paranoid_check_vid_hdr_16932 paranoid_check_vid_hdr 0 16932 NULL
++random32_16937 random32 0 16937 NULL
++ip_append_data_16942 ip_append_data 5-6 16942 NULL
++_sp2d_alloc_16944 _sp2d_alloc 3-2-1 16944 NULL
++squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
++cfg80211_send_unprot_disassoc_16951 cfg80211_send_unprot_disassoc 3 16951 NULL
++wrm_16966 wrm 0 16966 NULL
++keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
++ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
++snd_mask_refine_first_17026 snd_mask_refine_first 0 17026 NULL
++copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL nohasharray
++iwm_if_alloc_17027 iwm_if_alloc 1 17027 &copy_counters_to_user_17027
++jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
++__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
++sddr55_read_data_17072 sddr55_read_data 4 17072 NULL
++dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
++simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
++expand_files_17080 expand_files 2 17080 NULL
++carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
++entry_length_17093 entry_length 0 17093 NULL
++sys_preadv_17100 sys_preadv 3 17100 NULL
++ocfs2_get_refcount_cpos_end_17113 ocfs2_get_refcount_cpos_end 0 17113 NULL
++write_mem_17114 write_mem 3 17114 NULL
++pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
++mwifiex_get_common_rates_17131 mwifiex_get_common_rates 3 17131 NULL
++wrmaltWithLock_17139 wrmaltWithLock 0 17139 NULL
++jumpshot_write_data_17151 jumpshot_write_data 4 17151 NULL
++befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
++pm860x_page_bulk_read_17174 pm860x_page_bulk_read 3 17174 NULL
++access_remote_vm_17189 access_remote_vm 0 17189 NULL nohasharray
++iwl_dbgfs_txfifo_flush_write_17189 iwl_dbgfs_txfifo_flush_write 3 17189 &access_remote_vm_17189
++driver_state_read_17194 driver_state_read 3 17194 NULL nohasharray
++iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 &driver_state_read_17194
++dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
++ms_rw_17220 ms_rw 3-4 17220 NULL
++__be16_to_cpup_17261 __be16_to_cpup 0 17261 NULL
++alloc_ep_17269 alloc_ep 1 17269 NULL
++pg_read_17276 pg_read 3 17276 NULL
++raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
++neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
++minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
++skb_pad_17302 skb_pad 2 17302 NULL
++mb_cache_create_17307 mb_cache_create 2 17307 NULL
++iwm_umac_set_config_var_17320 iwm_umac_set_config_var 4 17320 NULL
++ata_host_alloc_pinfo_17325 ata_host_alloc_pinfo 3 17325 NULL
++alloc_fdtable_17389 alloc_fdtable 1 17389 NULL
++lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
++compat_sys_ppoll_17430 compat_sys_ppoll 2 17430 NULL
++sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
++snd_hammerfall_get_buffer_17441 snd_hammerfall_get_buffer 3 17441 NULL
++nla_get_u32_17455 nla_get_u32 0 17455 NULL
++__send_request_17461 __send_request 0 17461 NULL nohasharray
++__ref_totlen_17461 __ref_totlen 0 17461 &__send_request_17461
++probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
++TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
++bitmap_pos_to_ord_17503 bitmap_pos_to_ord 3 17503 NULL
++lbs_highrssi_write_17515 lbs_highrssi_write 3 17515 NULL
++restore_i387_fxsave_17528 restore_i387_fxsave 2 17528 NULL
++__cfg80211_roamed_17529 __cfg80211_roamed 5-7 17529 NULL
++xlog_do_log_recovery_17550 xlog_do_log_recovery 3 17550 NULL
++__copy_to_user_17551 __copy_to_user 3-0 17551 NULL
++copy_from_user_17559 copy_from_user 3-0 17559 NULL
++snd_pcm_action_lock_irq_17569 snd_pcm_action_lock_irq 0 17569 NULL
++acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
++neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
++rts51x_write_mem_17598 rts51x_write_mem 4 17598 NULL
++brcmf_process_nvram_vars_17601 brcmf_process_nvram_vars 0 17601 NULL nohasharray
++iwl_dump_nic_event_log_17601 iwl_dump_nic_event_log 0 17601 &brcmf_process_nvram_vars_17601
++__inode_info_17603 __inode_info 0 17603 NULL
++osst_execute_17607 osst_execute 7-6 17607 NULL
++ocfs2_mark_extent_written_17615 ocfs2_mark_extent_written 6 17615 NULL
++dma_map_page_17628 dma_map_page 0 17628 NULL
++ocfs2_rotate_subtree_left_17634 ocfs2_rotate_subtree_left 0 17634 NULL
++packet_setsockopt_17662 packet_setsockopt 5 17662 NULL nohasharray
++ubi_io_read_data_17662 ubi_io_read_data 0 17662 &packet_setsockopt_17662
++dsp_tone_hw_message_17678 dsp_tone_hw_message 3 17678 NULL
++pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
++filemap_fdatawait_17688 filemap_fdatawait 0 17688 NULL
++venus_rename_17707 venus_rename 4-5 17707 NULL
++intel_wait_ring_buffer_17727 intel_wait_ring_buffer 0 17727 NULL
++exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
++sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
++mark_unsafe_pages_17759 mark_unsafe_pages 0 17759 NULL
++perf_clock_17787 perf_clock 0 17787 NULL
++get_unaligned_be64_17794 get_unaligned_be64 0 17794 NULL
++gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
++cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
++count_leafs_17842 count_leafs 0 17842 NULL
++tcp_left_out_17860 tcp_left_out 0 17860 NULL
++sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
++alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
++ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
++orinoco_set_key_17878 orinoco_set_key 7-5 17878 NULL
++init_per_cpu_17880 init_per_cpu 1 17880 NULL
++ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL
++compat_sys_pwritev_17886 compat_sys_pwritev 3 17886 NULL
++ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL
++ocfs2_clusters_to_blocks_17896 ocfs2_clusters_to_blocks 0-2 17896 NULL
++dccp_feat_register_sp_17914 dccp_feat_register_sp 5 17914 NULL
++xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
++xfs_rtallocate_extent_near_17916 xfs_rtallocate_extent_near 3-5-9 17916 NULL
++srp_iu_pool_alloc_17920 srp_iu_pool_alloc 2 17920 NULL
++scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
++beacon_interval_write_17952 beacon_interval_write 3 17952 NULL
++ufs_free_blocks_17963 ufs_free_blocks 3-2 17963 NULL
++calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
++smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
++ext4_num_overhead_clusters_18001 ext4_num_overhead_clusters 2 18001 NULL
++pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
++alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
++fill_read_18019 fill_read 0 18019 NULL
++o2hb_highest_node_18034 o2hb_highest_node 2 18034 NULL
++ocfs2_cache_cluster_dealloc_18043 ocfs2_cache_cluster_dealloc 0 18043 NULL
++cryptd_alloc_instance_18048 cryptd_alloc_instance 3-2 18048 NULL
++ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
++fpregs_get_18066 fpregs_get 4 18066 NULL
++packet_came_18072 packet_came 3 18072 NULL
++kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
++pvclock_get_nsec_offset_18104 pvclock_get_nsec_offset 0 18104 NULL
++netlink_kernel_create_18110 netlink_kernel_create 3 18110 NULL
++iwch_create_cq_18115 iwch_create_cq 2 18115 NULL
++dfs_file_read_18116 dfs_file_read 3 18116 NULL
++svc_getnl_18120 svc_getnl 0 18120 NULL
++selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
++is_idx_node_in_use_18165 is_idx_node_in_use 0 18165 NULL
++_has_tag_18169 _has_tag 2 18169 NULL
++pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
++cfpkt_create_18197 cfpkt_create 1 18197 NULL
++snd_pcm_hw_refine_user_18204 snd_pcm_hw_refine_user 0 18204 NULL
++orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
++gsm_control_message_18209 gsm_control_message 4 18209 NULL
++ocfs2_divide_leaf_refcount_block_18214 ocfs2_divide_leaf_refcount_block 0 18214 NULL
++do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
++koneplus_send_18226 koneplus_send 4 18226 NULL
++gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
++rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
++__sysfs_add_one_18258 __sysfs_add_one 0 18258 NULL
++qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
++call_usermodehelper_18268 call_usermodehelper 0 18268 NULL
++gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
++alloc_ring_18278 alloc_ring 4-2 18278 NULL
++find_dirty_idx_leb_18280 find_dirty_idx_leb 0 18280 NULL
++mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
++um_idi_write_18293 um_idi_write 3 18293 NULL
++ip6ip6_err_18308 ip6ip6_err 5 18308 NULL
++vga_r_18310 vga_r 0 18310 NULL
++alloc_and_copy_string_18321 alloc_and_copy_string 2 18321 NULL
++ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
++bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
++lcd_proc_write_18351 lcd_proc_write 3 18351 NULL
++pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
++xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
++ep_io_18367 ep_io 0 18367 NULL
++__video_register_device_18399 __video_register_device 3 18399 NULL
++crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
++snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
++fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
++regset_tls_set_18459 regset_tls_set 4 18459 NULL
++write_file_tx_chainmask_18487 write_file_tx_chainmask 3 18487 NULL nohasharray
++udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 &write_file_tx_chainmask_18487
++__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL
++snd_vx_inb_18514 snd_vx_inb 0 18514 NULL
++snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
++seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
++acpi_register_gsi_ioapic_18550 acpi_register_gsi_ioapic 2 18550 NULL
++sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
++vb2_streamon_18562 vb2_streamon 0 18562 NULL
++debug_output_18575 debug_output 3 18575 NULL
++check_lpt_type_18577 check_lpt_type 0 18577 NULL
++__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
++slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
++iowarrior_write_18604 iowarrior_write 3 18604 NULL
++from_buffer_18625 from_buffer 3 18625 NULL
++f1x_map_sysaddr_to_csrow_18628 f1x_map_sysaddr_to_csrow 2 18628 NULL
++cfg80211_send_rx_assoc_18638 cfg80211_send_rx_assoc 3 18638 NULL
++snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
++unmap_page_18665 unmap_page 2-3 18665 NULL
++edge_tty_recv_18667 edge_tty_recv 4 18667 NULL nohasharray
++xfs_iext_insert_18667 xfs_iext_insert 3 18667 &edge_tty_recv_18667
++iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
++ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
++ocfs2_trim_extent_18711 ocfs2_trim_extent 3-4 18711 NULL
++blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
++snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
++o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
++__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
++wep_packets_read_18751 wep_packets_read 3 18751 NULL
++read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
++ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
++alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
++ieee80211_auth_challenge_18810 ieee80211_auth_challenge 3 18810 NULL
++setup_ioapic_irq_18813 setup_ioapic_irq 1 18813 NULL
++iio_allocate_device_18821 iio_allocate_device 1 18821 NULL
++sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
++mtf_test_write_18844 mtf_test_write 3 18844 NULL
++drm_ht_create_18853 drm_ht_create 2 18853 NULL
++sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
++ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
++xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
++width_to_agaw_18883 width_to_agaw 0-1 18883 NULL
++ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
++snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
++sctp_tsnmap_num_gabs_18952 sctp_tsnmap_num_gabs 0 18952 NULL
++fdb_nlmsg_size_18957 fdb_nlmsg_size 0 18957 NULL
++__nla_reserve_18974 __nla_reserve 3 18974 NULL
++alc_auto_create_extra_outs_18975 alc_auto_create_extra_outs 2 18975 NULL
++find_dirtiest_idx_leb_19001 find_dirtiest_idx_leb 0 19001 NULL
++layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
++huge_page_size_19008 huge_page_size 0 19008 NULL
++push_leaf_right_19017 push_leaf_right 0 19017 NULL
++prepare_highmem_image_19028 prepare_highmem_image 0 19028 NULL
++ocfs2_steal_resource_19036 ocfs2_steal_resource 0 19036 NULL
++revalidate_19043 revalidate 2 19043 NULL
++drm_fb_helper_init_19044 drm_fb_helper_init 4-3 19044 NULL
++afs_vnode_store_data_19048 afs_vnode_store_data 2-3-4-5 19048 NULL
++do_vm86_irq_handling_19056 do_vm86_irq_handling 2 19056 NULL
++create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
++ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
++alloc_pbl_19075 alloc_pbl 2 19075 NULL
++copy_and_check_19089 copy_and_check 3 19089 NULL
++sys_process_vm_readv_19090 sys_process_vm_readv 5-3 19090 NULL
++sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
++cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
++skb_gro_offset_19123 skb_gro_offset 0 19123 NULL
++ext4_inode_table_19125 ext4_inode_table 0 19125 NULL
++snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
++alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
++count_history_pages_19171 count_history_pages 0-3 19171 NULL
++iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
++vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
++__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3-0 19214 NULL
++dev_counters_read_19216 dev_counters_read 3 19216 NULL
++gsi_to_irq_19220 gsi_to_irq 0-1 19220 NULL
++snd_mask_max_19224 snd_mask_max 0 19224 NULL
++snd_pcm_capture_rewind_19229 snd_pcm_capture_rewind 0-2 19229 NULL
++sys_fcntl_19267 sys_fcntl 3 19267 NULL
++setup_shmem_window_19292 setup_shmem_window 2-3 19292 NULL
++qc_capture_19298 qc_capture 3 19298 NULL
++ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 4-3 19303 NULL
++event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
++debug_read_19322 debug_read 3 19322 NULL
++cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL nohasharray
++lbs_host_sleep_write_19332 lbs_host_sleep_write 3 19332 &cfg80211_inform_bss_19332
++firmware_data_write_19360 firmware_data_write 6-5 19360 NULL
++read_zero_19366 read_zero 3 19366 NULL
++interpret_user_input_19393 interpret_user_input 2 19393 NULL
++get_unaligned_be16_19400 get_unaligned_be16 0 19400 NULL
++get_n_events_by_type_19401 get_n_events_by_type 0 19401 NULL
++pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
++dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
++__phys_addr_19434 __phys_addr 0 19434 NULL
++xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
++gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
++sky2_read16_19475 sky2_read16 0 19475 NULL
++refill_pool_19477 refill_pool 2 19477 NULL
++efivar_create_sysfs_entry_19485 efivar_create_sysfs_entry 2 19485 NULL
++__read_status_pciv2_19492 __read_status_pciv2 0 19492 NULL
++kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
++v4l2_event_subscribe_19510 v4l2_event_subscribe 3 19510 NULL
++skb_realloc_headroom_19516 skb_realloc_headroom 2 19516 NULL
++atm_alloc_charge_19517 atm_alloc_charge 2 19517 NULL nohasharray
++dev_alloc_skb_19517 dev_alloc_skb 1 19517 &atm_alloc_charge_19517
++apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 NULL
++ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
++ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
++nfsd_read_19568 nfsd_read 5 19568 NULL
++cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
++bm_status_read_19583 bm_status_read 3 19583 NULL
++load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
++buffRdbkVerify_19644 buffRdbkVerify 0 19644 NULL
++alloc_coherent_19649 alloc_coherent 2 19649 NULL
++LoadBitmap_19658 LoadBitmap 2 19658 NULL
++rbd_snap_add_19678 rbd_snap_add 4 19678 NULL
++delay_status_19685 delay_status 4 19685 NULL
++btrfs_write_marked_extents_19720 btrfs_write_marked_extents 0 19720 NULL
++read_reg_19723 read_reg 0 19723 NULL
++memcpy_toiovecend_19736 memcpy_toiovecend 3-4 19736 NULL
++snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
++p9_client_read_19750 p9_client_read 0-5 19750 NULL
++pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
++jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
++ocfs2_read_group_descriptor_19771 ocfs2_read_group_descriptor 0 19771 NULL
++__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
++saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
++madgemc_sifreadw_19811 madgemc_sifreadw 0 19811 NULL
++irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
++ubi_eba_write_leb_19826 ubi_eba_write_leb 0 19826 NULL
++pcpu_next_unpop_19831 pcpu_next_unpop 4 19831 NULL
++vfs_getxattr_19832 vfs_getxattr 0 19832 NULL
++key_validate_19834 key_validate 0 19834 NULL
++security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
++cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
++__nla_put_19857 __nla_put 3 19857 NULL
++aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
++ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
++cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
++iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
++ax25_send_frame_19964 ax25_send_frame 2 19964 NULL
++dbg_leb_change_19969 dbg_leb_change 0 19969 NULL
++attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
++ip_send_reply_19987 ip_send_reply 5 19987 NULL
++diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
++jbd2_journal_create_slab_20043 jbd2_journal_create_slab 1 20043 NULL
++__be32_to_cpup_20056 __be32_to_cpup 0 20056 NULL
++alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
++rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
++fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
++aat2870_reg_write_file_20086 aat2870_reg_write_file 3 20086 NULL
++qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
++root_nfs_copy_20111 root_nfs_copy 3 20111 NULL
++hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
++kmem_cache_create_20124 kmem_cache_create 3 20124 NULL
++tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
++read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
++wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
++create_trace_probe_20175 create_trace_probe 1 20175 NULL
++crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL
++ext4_llseek_20183 ext4_llseek 2 20183 NULL
++pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
++rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
++tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
++pcpu_alloc_20255 pcpu_alloc 1-2 20255 NULL
++resource_size_20256 resource_size 0 20256 NULL
++_rtl92s_get_h2c_cmdlen_20312 _rtl92s_get_h2c_cmdlen 0 20312 NULL
++vx_send_msg_nolock_20322 vx_send_msg_nolock 0 20322 NULL
++snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
++ocfs2_et_insert_check_20341 ocfs2_et_insert_check 0 20341 NULL
++gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
++snd_pcm_stop_20376 snd_pcm_stop 0 20376 NULL
++block_read_full_page_20380 block_read_full_page 0 20380 NULL
++snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL
++__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL
++interface_rx_20404 interface_rx 4 20404 NULL
++find_skb_20431 find_skb 2 20431 NULL
++fmc_send_cmd_20435 fmc_send_cmd 5 20435 NULL
++tcp_fragment_20436 tcp_fragment 3 20436 NULL
++nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
++ip_vs_icmp_xmit_v6_20464 ip_vs_icmp_xmit_v6 4 20464 NULL
++compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
++read_buf_20469 read_buf 2 20469 NULL
++ocfs2_db_frozen_trigger_20503 ocfs2_db_frozen_trigger 4 20503 NULL nohasharray
++hidraw_report_event_20503 hidraw_report_event 3 20503 &ocfs2_db_frozen_trigger_20503
++pcpu_alloc_area_20511 pcpu_alloc_area 0-3 20511 NULL
++pcpu_depopulate_chunk_20517 pcpu_depopulate_chunk 3-2 20517 NULL
++xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
++drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
++amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
++venus_create_20555 venus_create 4 20555 NULL
++crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
++i915_max_freq_read_20581 i915_max_freq_read 3 20581 NULL
++ocfs2_cluster_lock_20588 ocfs2_cluster_lock 0 20588 NULL
++lirc_write_20604 lirc_write 3 20604 NULL
++qib_qsfp_write_20614 qib_qsfp_write 0-2-4 20614 NULL
++regcache_lzo_block_count_20628 regcache_lzo_block_count 0 20628 NULL
++snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
++kfifo_copy_to_user_20646 kfifo_copy_to_user 4-3 20646 NULL
++cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL
++ceph_osdc_new_request_20654 ceph_osdc_new_request 15-4 20654 NULL
++snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
++dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 NULL
++cpumask_size_20683 cpumask_size 0 20683 NULL
++btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL
++read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
++__maestro_read_20700 __maestro_read 0 20700 NULL
++cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
++page_cache_sync_readahead_20706 page_cache_sync_readahead 5-4 20706 NULL
++pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
++ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
++security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
++vring_add_indirect_20737 vring_add_indirect 4-3 20737 NULL
++io_apic_set_pci_routing_20740 io_apic_set_pci_routing 2 20740 NULL
++fb_prepare_logo_20743 fb_prepare_logo 0 20743 NULL
++vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
++ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
++ubi_io_read_20767 ubi_io_read 0 20767 NULL
++fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
++iommu_range_alloc_20794 iommu_range_alloc 3 20794 NULL
++iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
++sys_sendto_20809 sys_sendto 6 20809 NULL
++ext4_convert_unwritten_extents_endio_20812 ext4_convert_unwritten_extents_endio 0 20812 NULL
++strndup_user_20819 strndup_user 2 20819 NULL
++iwl_legacy_dbgfs_qos_read_20825 iwl_legacy_dbgfs_qos_read 3 20825 NULL
++wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
++uvc_alloc_entity_20836 uvc_alloc_entity 3-4 20836 NULL
++p9_tag_alloc_20845 p9_tag_alloc 3 20845 NULL
++snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
++ocfs2_bmap_20874 ocfs2_bmap 2 20874 NULL
++iwl3945_ucode_tx_stats_read_20879 iwl3945_ucode_tx_stats_read 3 20879 NULL
++rb_simple_write_20890 rb_simple_write 3 20890 NULL
++sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
++key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
++ext4_calc_metadata_amount_20905 ext4_calc_metadata_amount 2 20905 NULL
++compat_sys_readv_20911 compat_sys_readv 3 20911 NULL
++ixj_write_20912 ixj_write 3 20912 NULL
++lbs_rdbbp_write_20918 lbs_rdbbp_write 3 20918 NULL
++htable_bits_20933 htable_bits 0 20933 NULL
++check_eofblocks_fl_20942 check_eofblocks_fl 0 20942 NULL
++altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
++get_init_ra_size_20955 get_init_ra_size 1 20955 NULL
++insert_ptr_20961 insert_ptr 0 20961 NULL
++snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
++brcmf_tx_frame_20978 brcmf_tx_frame 3 20978 NULL
++alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
++qib_verbs_send_20999 qib_verbs_send 5-3 20999 NULL
++ocfs2_free_clusters_21001 ocfs2_free_clusters 4 21001 NULL
++btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
++snd_pcm_lib_preallocate_pages_21031 snd_pcm_lib_preallocate_pages 4 21031 NULL
++lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
++proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
++rose_create_facilities_21067 rose_create_facilities 0 21067 NULL
++event_calibration_read_21083 event_calibration_read 3 21083 NULL
++__cfg80211_send_disassoc_21096 __cfg80211_send_disassoc 3 21096 NULL
++ext2_valid_block_bitmap_21101 ext2_valid_block_bitmap 3 21101 NULL
++ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
++i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
++new_skb_21148 new_skb 1 21148 NULL
++ocfs2_block_check_validate_21149 ocfs2_block_check_validate 2 21149 NULL
++setup_msi_irq_21169 setup_msi_irq 3 21169 NULL
++cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
++ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
++_ocfs2_free_clusters_21220 _ocfs2_free_clusters 0-4 21220 NULL
++get_numpages_21227 get_numpages 0-1-2 21227 NULL
++input_ff_create_21240 input_ff_create 2 21240 NULL
++cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
++sock_alloc_send_pskb_21246 sock_alloc_send_pskb 2 21246 NULL
++ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
++store_bluetooth_21320 store_bluetooth 4 21320 NULL
++get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
++ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
++iwl_legacy_tx_queue_init_21332 iwl_legacy_tx_queue_init 3 21332 NULL
++gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL
++alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
++split_leaf_21378 split_leaf 0 21378 NULL
++video_ioctl2_21380 video_ioctl2 2 21380 NULL
++diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
++snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
++snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
++tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
++tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
++concat_writev_21451 concat_writev 3 21451 NULL
++ReadISAR_21453 ReadISAR 0 21453 NULL
++read_file_xmit_21487 read_file_xmit 3 21487 NULL
++mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
++dma_skb_copy_datagram_iovec_21516 dma_skb_copy_datagram_iovec 3-5 21516 NULL
++btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
++cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
++rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
++snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL
++ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
++xlog_do_recovery_pass_21618 xlog_do_recovery_pass 3 21618 NULL
++ndisc_addr_option_pad_21630 ndisc_addr_option_pad 0 21630 NULL
++__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
++validate_nnode_21638 validate_nnode 0 21638 NULL
++__irq_alloc_descs_21639 __irq_alloc_descs 2-1-3-0 21639 NULL
++ocfs2_lock_refcount_allocators_21646 ocfs2_lock_refcount_allocators 0 21646 NULL
++carl9170_rx_copy_data_21656 carl9170_rx_copy_data 2 21656 NULL
++hpet_setup_msi_irq_21662 hpet_setup_msi_irq 1 21662 NULL
++atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
++ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
++rtllib_alloc_txb_21687 rtllib_alloc_txb 1-2 21687 NULL
++kobject_uevent_env_21703 kobject_uevent_env 0 21703 NULL
++evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
++drm_sman_init_21710 drm_sman_init 2-4-3 21710 NULL
++ocfs2_remove_rightmost_path_21729 ocfs2_remove_rightmost_path 0 21729 NULL
++ext4_split_extent_at_21732 ext4_split_extent_at 0 21732 NULL
++mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
++l2down_create_21755 l2down_create 4 21755 NULL
++usbat_flash_read_data_21762 usbat_flash_read_data 4 21762 NULL
++gen_pool_add_21776 gen_pool_add 3 21776 NULL
++atomic64_cmpxchg_21782 atomic64_cmpxchg 0 21782 NULL
++xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
++kmalloc_order_trace_21788 kmalloc_order_trace 1 21788 NULL
++libipw_get_hdrlen_21792 libipw_get_hdrlen 0 21792 NULL
++dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
++__ocfs2_cluster_lock_21812 __ocfs2_cluster_lock 0 21812 NULL
++lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
++msix_capability_init_21870 msix_capability_init 0 21870 NULL
++sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL nohasharray
++tcp_cookie_size_check_21873 tcp_cookie_size_check 0-1 21873 &sisusbcon_bmove_21873
++__alloc_reserved_percpu_21895 __alloc_reserved_percpu 2-1 21895 NULL
++dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
++qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
++__ocfs2_claim_clusters_21936 __ocfs2_claim_clusters 0 21936 NULL
++rbd_req_read_21952 rbd_req_read 4-5 21952 NULL
++rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL
++ocfs2_reserve_cluster_bitmap_bits_22016 ocfs2_reserve_cluster_bitmap_bits 0 22016 NULL
++ti_recv_22027 ti_recv 4 22027 NULL
++zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
++atalk_recvmsg_22053 atalk_recvmsg 4 22053 NULL
++ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL
++iwl_legacy_dbgfs_clear_ucode_statistics_write_22072 iwl_legacy_dbgfs_clear_ucode_statistics_write 3 22072 NULL
++btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2-3 22077 NULL
++mem_rw_22085 mem_rw 3 22085 NULL
++snd_pcm_xrun_22088 snd_pcm_xrun 0 22088 NULL
++sched_clock_cpu_22098 sched_clock_cpu 0 22098 NULL
++rt2x00debug_read_crypto_stats_22109 rt2x00debug_read_crypto_stats 3 22109 NULL
++sys_remap_file_pages_22124 sys_remap_file_pages 1 22124 NULL
++snd_hda_codec_read_22130 snd_hda_codec_read 0 22130 NULL
++__kfifo_alloc_22173 __kfifo_alloc 3-2 22173 NULL
++fls_22210 fls 0 22210 NULL nohasharray
++snd_soc_lzo_block_count_22210 snd_soc_lzo_block_count 0 22210 &fls_22210
++bio_chain_clone_22227 bio_chain_clone 4 22227 NULL nohasharray
++do_sync_mmap_readahead_22227 do_sync_mmap_readahead 4 22227 &bio_chain_clone_22227 nohasharray
++rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 &do_sync_mmap_readahead_22227 nohasharray
++bitmap_clear_bits_22227 bitmap_clear_bits 3 22227 &rfcomm_sock_recvmsg_22227
++mem_write_22232 mem_write 3 22232 NULL
++p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
++compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
++__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
++__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL
++mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
++udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
++atomic_read_22342 atomic_read 0 22342 NULL
++mlx4_db_alloc_22358 mlx4_db_alloc 3 22358 NULL
++irq_reserve_irq_22360 irq_reserve_irq 1 22360 NULL
++snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
++iwch_alloc_fast_reg_mr_22368 iwch_alloc_fast_reg_mr 2 22368 NULL
++evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
++ocfs2_assign_bh_22392 ocfs2_assign_bh 0 22392 NULL
++btmrvl_psmode_read_22395 btmrvl_psmode_read 3 22395 NULL
++ubifs_leb_change_22399 ubifs_leb_change 0 22399 NULL nohasharray
++alloc_private_22399 alloc_private 2 22399 &ubifs_leb_change_22399
++zoran_write_22404 zoran_write 3 22404 NULL
++queue_reply_22416 queue_reply 3 22416 NULL
++__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
++queue_max_segments_22441 queue_max_segments 0 22441 NULL
++handle_received_packet_22457 handle_received_packet 3 22457 NULL
++rt6_nlmsg_size_22473 rt6_nlmsg_size 0 22473 NULL
++ecryptfs_write_22488 ecryptfs_write 4-3 22488 NULL
++cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
++mp_find_ioapic_pin_22499 mp_find_ioapic_pin 0-2 22499 NULL
++mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
++pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
++ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
++agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
++snd_pcm_hw_params_choose_22560 snd_pcm_hw_params_choose 0 22560 NULL
++dbFindCtl_22587 dbFindCtl 0 22587 NULL
++snapshot_read_22601 snapshot_read 3 22601 NULL
++ocfs2_get_refcount_block_22610 ocfs2_get_refcount_block 0 22610 NULL
++btrfs_delalloc_reserve_space_22617 btrfs_delalloc_reserve_space 0 22617 NULL
++sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
++ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
++pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
++sysfs_attr_ns_22645 sysfs_attr_ns 0 22645 NULL
++fill_gap_22681 fill_gap 0 22681 NULL nohasharray
++l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 &fill_gap_22681
++ocfs2_get_block_22687 ocfs2_get_block 2 22687 NULL
++sys_ppoll_22688 sys_ppoll 2 22688 NULL
++alloc_libipw_22708 alloc_libipw 1 22708 NULL
++brcmf_sdbrcm_read_control_22721 brcmf_sdbrcm_read_control 3 22721 NULL
++aa_features_read_22730 aa_features_read 3 22730 NULL
++cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4-0 22735 NULL
++ax25_output_22736 ax25_output 2 22736 NULL
++ceph_decode_32_22738 ceph_decode_32 0 22738 NULL
++print_frame_22769 print_frame 0 22769 NULL
++ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
++ocfs2_block_group_alloc_22774 ocfs2_block_group_alloc 0 22774 NULL
++__generic_copy_to_user_intel_22806 __generic_copy_to_user_intel 0-3 22806 NULL
++can_nocow_odirect_22854 can_nocow_odirect 3-4 22854 NULL nohasharray
++read_file_rcstat_22854 read_file_rcstat 3 22854 &can_nocow_odirect_22854
++create_attr_set_22861 create_attr_set 1 22861 NULL
++usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
++mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
++virtqueue_add_buf_22924 virtqueue_add_buf 3-4 22924 NULL
++xstateregs_set_22932 xstateregs_set 4 22932 NULL
++pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
++alloc_sglist_22960 alloc_sglist 1-3-2 22960 NULL
++caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
++vme_get_size_22964 vme_get_size 0 22964 NULL
++usb_get_langid_22983 usb_get_langid 0 22983 NULL
++remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
++viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
++ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0 23029 NULL
++st_status_23032 st_status 5 23032 NULL
++xfs_qm_write_sb_changes_23039 xfs_qm_write_sb_changes 2 23039 NULL
++reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
++unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
++vivi_read_23073 vivi_read 3 23073 NULL
++kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL
++raw_sendmsg_23078 raw_sendmsg 4 23078 NULL
++isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
++rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
++fls_long_23096 fls_long 0-1 23096 NULL
++ntfs_ucstonls_23097 ntfs_ucstonls 5-3 23097 NULL
++pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
++dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
++ip_recv_error_23109 ip_recv_error 3 23109 NULL
++msix_setup_entries_23110 msix_setup_entries 0 23110 NULL nohasharray
++mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 &msix_setup_entries_23110
++nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
++__clear_user_23118 __clear_user 0-2 23118 NULL
++iwl_legacy_dbgfs_interrupt_write_23122 iwl_legacy_dbgfs_interrupt_write 3 23122 NULL nohasharray
++drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 &iwl_legacy_dbgfs_interrupt_write_23122
++ata_scsi_change_queue_depth_23126 ata_scsi_change_queue_depth 2 23126 NULL
++cfg80211_rx_mgmt_23138 cfg80211_rx_mgmt 4 23138 NULL nohasharray
++em28xx_write_regs_req_23138 em28xx_write_regs_req 0 23138 &cfg80211_rx_mgmt_23138
++read_file_ani_23161 read_file_ani 3 23161 NULL
++usblp_write_23178 usblp_write 3 23178 NULL
++gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
++ieee80211_get_mesh_hdrlen_23183 ieee80211_get_mesh_hdrlen 0 23183 NULL
++__next_dma_cap_23195 __next_dma_cap 1-0 23195 NULL
++tty_buffer_request_room_23228 tty_buffer_request_room 2-0 23228 NULL
++__read_status_pci_23229 __read_status_pci 0 23229 NULL nohasharray
++xlog_get_bp_23229 xlog_get_bp 2 23229 &__read_status_pci_23229
++__kmalloc_23231 __kmalloc 1 23231 NULL
++ft1000_read_dpram_mag_32_23232 ft1000_read_dpram_mag_32 0 23232 NULL
++rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
++sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL nohasharray
++ad799x_single_channel_from_ring_23265 ad799x_single_channel_from_ring 2 23265 &sctp_recvmsg_23265
++uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
++diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
++i2cdev_write_23310 i2cdev_write 3 23310 NULL
++nl_pid_hash_zalloc_23314 nl_pid_hash_zalloc 1 23314 NULL
++try_context_readahead_23342 try_context_readahead 4-3 23342 NULL
++page_readlink_23346 page_readlink 3 23346 NULL
++get_dst_timing_23358 get_dst_timing 0 23358 NULL
++ip_nat_sdp_media_23386 ip_nat_sdp_media 8 23386 NULL
++iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
++vga_mm_r_23419 vga_mm_r 0 23419 NULL
++ulog_alloc_skb_23427 ulog_alloc_skb 1 23427 NULL
++__cxio_init_resource_fifo_23447 __cxio_init_resource_fifo 3 23447 NULL nohasharray
++ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 &__cxio_init_resource_fifo_23447
++hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
++__ata_change_queue_depth_23484 __ata_change_queue_depth 3-0 23484 NULL
++linear_conf_23485 linear_conf 2 23485 NULL
++si4713_send_command_23493 si4713_send_command 6 23493 NULL
++event_filter_read_23494 event_filter_read 3 23494 NULL
++ext4_remove_blocks_23497 ext4_remove_blocks 0 23497 NULL
++write_led_23517 write_led 2 23517 NULL
++__fill_vb2_buffer_23521 __fill_vb2_buffer 0 23521 NULL
++ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
++xen_allocate_irq_gsi_23546 xen_allocate_irq_gsi 1-0 23546 NULL
++tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
++tcp_match_skb_to_sack_23568 tcp_match_skb_to_sack 4-3 23568 NULL
++venus_symlink_23570 venus_symlink 6-4 23570 NULL
++iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
++l2cap_parse_conf_req_23575 l2cap_parse_conf_req 0 23575 NULL
++xfpregs_get_23586 xfpregs_get 4 23586 NULL
++cifs_spnego_key_instantiate_23588 cifs_spnego_key_instantiate 3 23588 NULL
++snd_interval_min_23590 snd_interval_min 0 23590 NULL
++cfpkt_create_pfx_23594 cfpkt_create_pfx 2-1 23594 NULL
++_alloc_cdb_cont_23609 _alloc_cdb_cont 2 23609 NULL
++islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
++ocfs2_journal_access_23616 ocfs2_journal_access 0 23616 NULL
++__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
++sInW_23663 sInW 0 23663 NULL
++nftl_partscan_23688 nftl_partscan 0 23688 NULL
++cx18_read_23699 cx18_read 3 23699 NULL
++sock_alloc_send_skb_23720 sock_alloc_send_skb 2 23720 NULL
++snd_pcm_hw_refine_23721 snd_pcm_hw_refine 0 23721 NULL
++mp_config_acpi_gsi_23728 mp_config_acpi_gsi 2 23728 NULL
++pack_sg_list_p_23739 pack_sg_list_p 0-2 23739 NULL
++__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
++tt_save_orig_buffer_23779 tt_save_orig_buffer 4 23779 NULL
++security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
++rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
++ocfs2_replace_cow_23803 ocfs2_replace_cow 0 23803 NULL
++__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
++xfs_dir2_leaf_getdents_23841 xfs_dir2_leaf_getdents 3 23841 NULL
++iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
++p54_init_common_23850 p54_init_common 1 23850 NULL
++ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
++ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
++ieee80211_if_read_channel_type_23884 ieee80211_if_read_channel_type 3 23884 NULL
++iwch_reject_cr_23901 iwch_reject_cr 3 23901 NULL
++device_create_bin_file_23914 device_create_bin_file 0 23914 NULL
++ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL
++i915_gem_object_bind_to_gtt_23921 i915_gem_object_bind_to_gtt 0 23921 NULL
++kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL
++__alloc_skb_23940 __alloc_skb 1 23940 NULL
++cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
++ixj_enhanced_write_23973 ixj_enhanced_write 3 23973 NULL
++sddr55_write_data_23983 sddr55_write_data 4 23983 NULL
++zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
++brcmf_sdcard_recv_buf_24006 brcmf_sdcard_recv_buf 6 24006 NULL
++cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
++ocfs2_mark_extent_refcounted_24035 ocfs2_mark_extent_refcounted 6 24035 NULL
++afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
++blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
++request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
++iwl_legacy_dbgfs_stations_read_24121 iwl_legacy_dbgfs_stations_read 3 24121 NULL
++mpu401_read_24126 mpu401_read 3-0 24126 NULL
++_picolcd_flash_write_24134 _picolcd_flash_write 4 24134 NULL
++irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
++UpdateReg_24148 UpdateReg 0 24148 NULL
++adu_read_24177 adu_read 3 24177 NULL
++safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
++shrink_tnc_24190 shrink_tnc 0 24190 NULL
++get_order_24203 get_order 0 24203 NULL
++ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
++tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
++pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL
++pci_num_vf_24235 pci_num_vf 0 24235 NULL
++sel_read_bool_24236 sel_read_bool 3 24236 NULL
++esp6_get_mtu_24264 esp6_get_mtu 0-2 24264 NULL
++calculate_sizes_24273 calculate_sizes 2 24273 NULL
++msg_size_24288 msg_size 0 24288 NULL
++ext2_free_blocks_24292 ext2_free_blocks 3-2 24292 NULL
++map_page_24298 map_page 3-4 24298 NULL
++gserial_connect_24302 gserial_connect 0 24302 NULL
++btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
++ocfs2_direct_IO_get_blocks_24333 ocfs2_direct_IO_get_blocks 2 24333 NULL
++kzalloc_node_24352 kzalloc_node 1 24352 NULL
++qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
++cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
++btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
++igetword_24373 igetword 0 24373 NULL
++max_io_len_24384 max_io_len 0-1 24384 NULL
++pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 NULL nohasharray
++getxattr_24398 getxattr 4 24398 &pvr2_v4l2_ioctl_24398
++blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
++b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
++xenbus_file_read_24427 xenbus_file_read 3 24427 NULL
++ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
++__push_leaf_left_24456 __push_leaf_left 0 24456 NULL
++evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
++lbs_highsnr_write_24460 lbs_highsnr_write 3 24460 NULL
++skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL nohasharray
++ocfs2_write_cluster_by_desc_24466 ocfs2_write_cluster_by_desc 6-5 24466 &skb_copy_and_csum_datagram_iovec_24466
++snd_pcm_hw_param_first_24487 snd_pcm_hw_param_first 0 24487 NULL
++push_nodes_for_insert_24496 push_nodes_for_insert 0 24496 NULL
++pd_video_read_24510 pd_video_read 3 24510 NULL
++request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
++named_prepare_buf_24532 named_prepare_buf 2 24532 NULL
++rtnl_port_size_24537 rtnl_port_size 0 24537 NULL
++write_cache_pages_24562 write_cache_pages 0 24562 NULL
++printer_set_config_24568 printer_set_config 0 24568 NULL
++netlbl_domhsh_init_24576 netlbl_domhsh_init 1 24576 NULL
++ath6kl_wmi_startscan_cmd_24580 ath6kl_wmi_startscan_cmd 7 24580 NULL
++udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
++ip6addrlbl_msgsize_24595 ip6addrlbl_msgsize 0 24595 NULL
++count_preds_24600 count_preds 0 24600 NULL
++alloc_wr_24635 alloc_wr 2-1 24635 NULL
++context_alloc_24645 context_alloc 3 24645 NULL
++blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
++btrfs_check_data_free_space_24692 btrfs_check_data_free_space 0 24692 NULL
++datafab_write_data_24696 datafab_write_data 4 24696 NULL
++ext4_da_reserve_space_24702 ext4_da_reserve_space 2 24702 NULL
++simple_attr_read_24738 simple_attr_read 3 24738 NULL
++qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
++ath_rxbuf_alloc_24745 ath_rxbuf_alloc 2 24745 NULL
++get_dma_residue_24749 get_dma_residue 0 24749 NULL
++kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
++nfsd4_sanitize_slot_size_24756 nfsd4_sanitize_slot_size 0-1 24756 NULL
++mI_alloc_skb_24770 mI_alloc_skb 1 24770 NULL
++i915_cache_sharing_read_24775 i915_cache_sharing_read 3 24775 NULL
++ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
++skb_make_writable_24783 skb_make_writable 2 24783 NULL
++datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
++cache_read_24790 cache_read 3 24790 NULL
++user_regset_copyout_24796 user_regset_copyout 7 24796 NULL
++unpack_str_24798 unpack_str 0 24798 NULL
++kvm_read_guest_virt_helper_24804 kvm_read_guest_virt_helper 3-1 24804 NULL
++__next_cpu_nr_24805 __next_cpu_nr 1 24805 NULL
++ath6kl_fwlog_mask_write_24810 ath6kl_fwlog_mask_write 3 24810 NULL
++net2272_read_24825 net2272_read 0 24825 NULL
++copy_for_split_24826 copy_for_split 0 24826 NULL
++c4iw_alloc_fast_reg_mr_24838 c4iw_alloc_fast_reg_mr 2 24838 NULL nohasharray
++free_coherent_24838 free_coherent 4-2 24838 &c4iw_alloc_fast_reg_mr_24838
++snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
++snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
++pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
++put_data_to_circ_buf_24869 put_data_to_circ_buf 3 24869 &pnp_alloc_24869
++bnx2fc_cmd_mgr_alloc_24873 bnx2fc_cmd_mgr_alloc 2-3 24873 NULL
++queues_read_24877 queues_read 3 24877 NULL
++iwm_rx_handle_24899 iwm_rx_handle 3 24899 NULL
++codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
++ocfs2_fiemap_24949 ocfs2_fiemap 4-3 24949 NULL
++packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
++sys_rt_sigpending_24961 sys_rt_sigpending 2 24961 NULL
++llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
++key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
++iwl3945_ucode_general_stats_read_25009 iwl3945_ucode_general_stats_read 3 25009 NULL
++ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
++gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
++cxio_hal_init_rhdl_resource_25104 cxio_hal_init_rhdl_resource 1 25104 NULL
++ubifs_dir_llseek_25106 ubifs_dir_llseek 2 25106 NULL nohasharray
++snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 &ubifs_dir_llseek_25106
++oom_adjust_read_25127 oom_adjust_read 3 25127 NULL
++fs32_to_cpu_25143 fs32_to_cpu 0 25143 NULL
++sys_fgetxattr_25166 sys_fgetxattr 4 25166 NULL
++ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL nohasharray
++sethdraddr_25167 sethdraddr 0 25167 &ipath_init_qp_table_25167
++sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
++ks8851_rdreg32_25187 ks8851_rdreg32 0 25187 NULL
++ocfs2_block_check_compute_25223 ocfs2_block_check_compute 2 25223 NULL
++mon_stat_read_25238 mon_stat_read 3 25238 NULL
++tcf_csum_ipv6_udp_25241 tcf_csum_ipv6_udp 4 25241 NULL
++compat_rw_copy_check_uvector_25242 compat_rw_copy_check_uvector 0-3 25242 NULL
++nilfs_palloc_find_available_slot_25245 nilfs_palloc_find_available_slot 5-3 25245 NULL
++snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
++crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
++vfs_writev_25278 vfs_writev 3 25278 NULL
++sys_dup2_25284 sys_dup2 2 25284 NULL
++l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
++ceph_calc_object_layout_25305 ceph_calc_object_layout 0 25305 NULL
++ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 NULL
++rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
++wait_for_completion_killable_25352 wait_for_completion_killable 0 25352 NULL
++i915_gem_execbuffer_relocate_slow_25355 i915_gem_execbuffer_relocate_slow 7-0 25355 NULL
++unix_mkname_25368 unix_mkname 0-2 25368 NULL
++sel_read_mls_25369 sel_read_mls 3 25369 NULL
++rh_queue_status_25378 rh_queue_status 0 25378 NULL
++ThermometerRead_25393 ThermometerRead 0 25393 NULL
++et61x251_read_25420 et61x251_read 3 25420 NULL
++dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
++generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
++ipath_decode_err_25468 ipath_decode_err 3 25468 NULL
++crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
++ocfs2_hamming_encode_25501 ocfs2_hamming_encode 3 25501 NULL
++ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4-0 25502 NULL
++snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
++ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
++ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
++c4iw_init_resource_fifo_random_25547 c4iw_init_resource_fifo_random 3 25547 NULL
++wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
++taskstats_packet_size_25553 taskstats_packet_size 0 25553 NULL
++ht_print_chan_25556 ht_print_chan 0 25556 NULL
++skb_tailroom_25567 skb_tailroom 0 25567 NULL
++realloc_packet_buffer_25569 realloc_packet_buffer 2 25569 NULL
++ping_recvmsg_25597 ping_recvmsg 4 25597 NULL
++__devres_alloc_25598 __devres_alloc 2 25598 NULL
++ddp_ppod_write_idata_25610 ddp_ppod_write_idata 5 25610 NULL
++copy_user_generic_25611 copy_user_generic 0 25611 NULL
++proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
++__get_user_pages_25628 __get_user_pages 0 25628 NULL nohasharray
++befs_utf2nls_25628 befs_utf2nls 3 25628 &__get_user_pages_25628
++ext2_try_to_allocate_25667 ext2_try_to_allocate 2-4-0 25667 NULL
++aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
++lpfc_idiag_cmd_get_25672 lpfc_idiag_cmd_get 2 25672 NULL
++sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
++ebitmap_start_positive_25703 ebitmap_start_positive 0 25703 NULL
++ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
++rx_queue_entry_next_25715 rx_queue_entry_next 0 25715 NULL
++sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
++__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 &sel_write_context_25726
++mcs_unwrap_fir_25733 mcs_unwrap_fir 3 25733 NULL
++ext2_find_near_25734 ext2_find_near 0 25734 NULL
++cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
++iommu_flush_iotlb_psi_25780 iommu_flush_iotlb_psi 4 25780 NULL
++event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
++sg_read_25799 sg_read 3 25799 NULL
++sys32_rt_sigpending_25814 sys32_rt_sigpending 2 25814 NULL
++system_enable_read_25815 system_enable_read 3 25815 NULL
++realloc_buffer_25816 realloc_buffer 2 25816 NULL
++ftrace_profile_init_25821 ftrace_profile_init 0 25821 NULL
++pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
++parport_read_25855 parport_read 0 25855 NULL
++xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
++ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
++run_delalloc_nocow_25896 run_delalloc_nocow 3-4 25896 NULL
++sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
++lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
++do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
++rcname_read_25919 rcname_read 3 25919 NULL
++_get_word_25929 _get_word 0 25929 NULL
++snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
++key_flags_read_25931 key_flags_read 3 25931 NULL
++copy_play_buf_25932 copy_play_buf 3 25932 NULL
++video_register_device_25971 video_register_device 3 25971 NULL
++udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
++cap_file_mmap_26018 cap_file_mmap 0 26018 NULL
++xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
++mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
++selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
++security_file_mmap_26056 security_file_mmap 0 26056 NULL
++keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
++intel_wrap_ring_buffer_26117 intel_wrap_ring_buffer 0 26117 NULL nohasharray
++__strnlen_user_26117 __strnlen_user 0-2 26117 &intel_wrap_ring_buffer_26117
++user_instantiate_26131 user_instantiate 3 26131 NULL
++skb_cow_26138 skb_cow 2 26138 NULL
++__fswab64_26155 __fswab64 0 26155 NULL
++copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL
++gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL
++disk_devt_26180 disk_devt 0 26180 NULL
++get_registers_26187 get_registers 3 26187 NULL
++ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
++xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
++mce_write_26201 mce_write 3 26201 NULL
++mwifiex_regrdwr_write_26225 mwifiex_regrdwr_write 3 26225 NULL nohasharray
++store_sys_hwmon_26225 store_sys_hwmon 3 26225 &mwifiex_regrdwr_write_26225
++_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
++cxio_num_stags_26233 cxio_num_stags 0 26233 NULL nohasharray
++rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 &cxio_num_stags_26233
++bio_split_26235 bio_split 2 26235 NULL
++crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
++apei_resources_request_26279 apei_resources_request 0 26279 NULL
++ext2_find_goal_26306 ext2_find_goal 0 26306 NULL
++snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL nohasharray
++pax_get_random_long_26309 pax_get_random_long 0 26309 &snd_pcm_plug_client_channels_buf_26309
++tled_proc_write_26315 tled_proc_write 3 26315 NULL
++pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
++tcp_sacktag_walk_26339 tcp_sacktag_walk 5-6 26339 NULL
++snd_vx_check_reg_bit_26344 snd_vx_check_reg_bit 0 26344 NULL
++ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 0-6-3-5 26357 NULL
++dup_to_netobj_26363 dup_to_netobj 3 26363 NULL
++invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
++ntty_write_26404 ntty_write 3 26404 NULL
++tcp_shift_skb_data_26405 tcp_shift_skb_data 5 26405 NULL
++iwl_legacy_dbgfs_sram_read_26419 iwl_legacy_dbgfs_sram_read 3 26419 NULL
++__vb2_get_done_vb_26426 __vb2_get_done_vb 0 26426 NULL
++pagemap_read_26441 pagemap_read 3 26441 NULL
++tower_read_26461 tower_read 3 26461 NULL
++ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
++ulong_write_file_26485 ulong_write_file 3 26485 NULL
++dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
++read_vmcore_26501 read_vmcore 3 26501 NULL
++l2cap_build_conf_req_26513 l2cap_build_conf_req 0 26513 NULL
++rds_message_inc_copy_to_user_26540 rds_message_inc_copy_to_user 3 26540 NULL
++__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
++rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL
++__unmap_single_26604 __unmap_single 2-3 26604 NULL
++iommu_alloc_26621 iommu_alloc 4 26621 NULL
++pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
++drm_ht_find_item_26637 drm_ht_find_item 0 26637 NULL
++mmap_region_26649 mmap_region 0-2 26649 NULL
++irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
++inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
++usb_reset_device_26661 usb_reset_device 0 26661 NULL
++cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
++__alloc_pred_stack_26687 __alloc_pred_stack 2 26687 NULL
++rtllib_authentication_req_26713 rtllib_authentication_req 3 26713 NULL
++bos_desc_26752 bos_desc 0 26752 NULL
++srp_ring_alloc_26760 srp_ring_alloc 2 26760 NULL
++snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
++dma_map_single_attrs_26779 dma_map_single_attrs 0 26779 NULL
++qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
++cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
++smk_write_load_26829 smk_write_load 3 26829 NULL
++__nodes_onto_26838 __nodes_onto 4 26838 NULL
++scnprint_id_26842 scnprint_id 3-0 26842 NULL
++ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
++svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
++ctnetlink_counters_size_26898 ctnetlink_counters_size 0 26898 NULL
++slhc_uncompress_26905 slhc_uncompress 0-3 26905 NULL
++x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
++scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
++pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 NULL nohasharray
++sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 &pwr_ps_enter_read_26935
++create_bm_block_list_26940 create_bm_block_list 0 26940 NULL
++hecubafb_write_26942 hecubafb_write 3 26942 NULL
++extract_entropy_user_26952 extract_entropy_user 3 26952 NULL
++omfs_allocate_range_27034 omfs_allocate_range 3 27034 NULL
++ufs_alloc_fragments_27059 ufs_alloc_fragments 3-0-2 27059 NULL
++__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
++snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
++paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
++alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
++find_first_bit_27088 find_first_bit 0-2 27088 NULL
++btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
++__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
++spin_time_accum_total_27131 spin_time_accum_total 1 27131 NULL
++__ext4_handle_dirty_metadata_27137 __ext4_handle_dirty_metadata 0 27137 NULL
++drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
++pms_capture_27142 pms_capture 4 27142 NULL
++btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
++i2400m_net_rx_27170 i2400m_net_rx 5 27170 NULL
++ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
++get_unaligned_be32_27184 get_unaligned_be32 0 27184 NULL
++ocfs2_read_blocks_sync_27210 ocfs2_read_blocks_sync 0 27210 NULL
++write_kmem_27225 write_kmem 3 27225 NULL
++dbAllocAG_27228 dbAllocAG 0 27228 NULL
++rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
++ocfs2_journal_access_path_27243 ocfs2_journal_access_path 0 27243 NULL
++cfpkt_add_trail_27260 cfpkt_add_trail 3 27260 NULL
++nlmsg_new_27263 nlmsg_new 1 27263 NULL
++usb_submit_urb_27278 usb_submit_urb 0 27278 NULL
++__dma_map_cont_27289 __dma_map_cont 5 27289 NULL
++hpi_read_reg_27302 hpi_read_reg 0 27302 NULL
++copy_from_buf_27308 copy_from_buf 2-4 27308 NULL
++ath6kl_wmi_test_cmd_27312 ath6kl_wmi_test_cmd 3 27312 NULL
++ocfs2_blocks_to_clusters_27327 ocfs2_blocks_to_clusters 0-2 27327 NULL
++snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
++afs_cell_create_27346 afs_cell_create 2 27346 NULL
++iwl_dbgfs_csr_write_27363 iwl_dbgfs_csr_write 3 27363 NULL
++pcbit_stat_27364 pcbit_stat 2 27364 NULL
++if_nlmsg_size_27404 if_nlmsg_size 0 27404 NULL
++seq_read_27411 seq_read 3 27411 NULL
++ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
++ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0 27422 NULL
++cypress_write_27423 cypress_write 4 27423 NULL
++pack_sg_list_27425 pack_sg_list 0-2 27425 NULL
++sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
++hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
++ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
++garmin_read_process_27509 garmin_read_process 3 27509 NULL
++xfs_buf_read_uncached_27519 xfs_buf_read_uncached 4 27519 NULL
++ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
++intel_gtt_map_memory_27539 intel_gtt_map_memory 0 27539 NULL
++snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
++libipw_alloc_txb_27579 libipw_alloc_txb 1-3-2 27579 NULL
++tipc_cfg_reply_alloc_27606 tipc_cfg_reply_alloc 1 27606 NULL
++iwl4965_rs_sta_dbgfs_rate_scale_data_read_27619 iwl4965_rs_sta_dbgfs_rate_scale_data_read 3 27619 NULL
++read_flush_procfs_27642 read_flush_procfs 3 27642 NULL nohasharray
++nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &read_flush_procfs_27642 nohasharray
++ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 &nl80211_send_connect_result_27642
++add_new_gdb_27643 add_new_gdb 3 27643 NULL
++ieee80211_build_probe_req_27660 ieee80211_build_probe_req 7-5 27660 NULL
++cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
++qword_get_27670 qword_get 0 27670 NULL
++ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
++l2cap_sar_segment_sdu_27701 l2cap_sar_segment_sdu 3 27701 NULL
++cxio_hal_pblpool_alloc_27714 cxio_hal_pblpool_alloc 2 27714 NULL
++evm_write_key_27715 evm_write_key 3 27715 NULL
++ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL
++pstore_write_27724 pstore_write 3 27724 NULL nohasharray
++iwl_dbgfs_traffic_log_write_27724 iwl_dbgfs_traffic_log_write 3 27724 &pstore_write_27724 nohasharray
++reg_w_buf_27724 reg_w_buf 3 27724 &iwl_dbgfs_traffic_log_write_27724
++xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
++kcalloc_27770 kcalloc 2-1 27770 NULL
++ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL
++hpt374_read_freq_27828 hpt374_read_freq 0 27828 NULL
++init_header_complete_27833 init_header_complete 0 27833 NULL nohasharray
++sys_listxattr_27833 sys_listxattr 3 27833 &init_header_complete_27833
++read_profile_27859 read_profile 3 27859 NULL
++sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
++mangle_packet_27864 mangle_packet 6-8 27864 NULL
++ocfs2_file_splice_read_27870 ocfs2_file_splice_read 4 27870 NULL
++paranoid_check_ec_hdr_27872 paranoid_check_ec_hdr 0 27872 NULL
++unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
++ubi_eba_write_leb_st_27896 ubi_eba_write_leb_st 0 27896 NULL
++bm_find_next_27929 bm_find_next 2 27929 NULL
++check_mapped_name_27943 check_mapped_name 3 27943 NULL
++sctp_make_abort_violation_27959 sctp_make_abort_violation 4 27959 NULL
++tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
++device_register_27972 device_register 0 27972 NULL nohasharray
++mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 &device_register_27972
++snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
++get_packet_pg_28023 get_packet_pg 4 28023 NULL
++raid_status_28025 raid_status 4 28025 NULL
++sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
++cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2-4 28053 NULL
++init_rs_non_canonical_28059 init_rs_non_canonical 1 28059 NULL
++lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
++GetRecvByte_28082 GetRecvByte 0 28082 NULL
++mmc_test_alloc_mem_28102 mmc_test_alloc_mem 2-3 28102 NULL
++vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
++tipc_msg_init_28128 tipc_msg_init 4-2 28128 NULL
++video_read_28148 video_read 3 28148 NULL
++snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
++stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
++vread_28173 vread 0-3 28173 NULL
++c4iw_reject_cr_28174 c4iw_reject_cr 3 28174 NULL
++pipe_fcntl_28181 pipe_fcntl 3 28181 NULL
++macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
++ocfs2_cow_sync_writeback_28221 ocfs2_cow_sync_writeback 0 28221 NULL
++line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL
++amd_nb_num_28228 amd_nb_num 0 28228 NULL
++c4iw_rqtpool_alloc_28271 c4iw_rqtpool_alloc 2 28271 NULL
++usemap_size_28281 usemap_size 0 28281 NULL
++dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
++kstrtos16_from_user_28300 kstrtos16_from_user 2 28300 NULL
++__hidp_send_ctrl_message_28303 __hidp_send_ctrl_message 4 28303 NULL
++acpi_register_gsi_xen_28305 acpi_register_gsi_xen 2 28305 NULL nohasharray
++nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 &acpi_register_gsi_xen_28305
++snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
++bm_entry_write_28338 bm_entry_write 3 28338 NULL
++tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
++snapshot_write_28351 snapshot_write 3 28351 NULL
++orig_node_del_if_28371 orig_node_del_if 2 28371 NULL
++sys_writev_28384 sys_writev 3 28384 NULL
++dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
++subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
++get_extent_allocation_hint_28423 get_extent_allocation_hint 0 28423 NULL
++snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
++alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
++ocfs2_backup_super_blkno_28484 ocfs2_backup_super_blkno 0-2 28484 NULL
++__filemap_fdatawrite_28485 __filemap_fdatawrite 0 28485 NULL
++max_response_pages_28492 max_response_pages 0 28492 NULL
++__next_node_28521 __next_node 1-0 28521 NULL
++i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
++capinc_tty_write_28539 capinc_tty_write 3 28539 NULL
++sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
++run_delalloc_range_28545 run_delalloc_range 3-4 28545 NULL nohasharray
++mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 &run_delalloc_range_28545
++sysfs_create_bin_file_28551 sysfs_create_bin_file 0 28551 NULL
++b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
++i2o_msg_post_wait_mem_28558 i2o_msg_post_wait_mem 0 28558 NULL
++inet_dccp_listen_28565 inet_dccp_listen 2 28565 NULL
++cfg80211_send_rx_auth_28580 cfg80211_send_rx_auth 3 28580 NULL
++oxygen_read32_28582 oxygen_read32 0 28582 NULL
++ocfs2_read_dir_block_28587 ocfs2_read_dir_block 2 28587 NULL
++extract_entropy_28604 extract_entropy 5-3 28604 NULL
++kfifo_unused_28612 kfifo_unused 0 28612 NULL
++mp_override_legacy_irq_28618 mp_override_legacy_irq 4 28618 NULL
++snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
++_set_range_28627 _set_range 3 28627 NULL
++setup_usemap_28636 setup_usemap 3-4 28636 NULL
++qib_handle_6120_hwerrors_28642 qib_handle_6120_hwerrors 3 28642 NULL
++read_nic_io_byte_28654 read_nic_io_byte 0 28654 NULL
++btrfs_previous_item_28667 btrfs_previous_item 0 28667 NULL
++blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
++posix_acl_from_xattr_28675 posix_acl_from_xattr 2 28675 NULL
++__dev_alloc_skb_28681 __dev_alloc_skb 1 28681 NULL
++nl80211_send_new_peer_candidate_28692 nl80211_send_new_peer_candidate 5 28692 NULL
++balance_level_28707 balance_level 0 28707 NULL
++spi_execute_28736 spi_execute 5 28736 NULL
++snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
++cxio_init_resource_fifo_28764 cxio_init_resource_fifo 3 28764 NULL
++rpc_pipe_generic_upcall_28766 rpc_pipe_generic_upcall 4 28766 NULL
++atomic_inc_return_unchecked_28778 atomic_inc_return_unchecked 0 28778 NULL
++ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
++dvb_net_sec_callback_28786 dvb_net_sec_callback 2 28786 NULL
++sel_write_member_28800 sel_write_member 3 28800 NULL
++ocfs2_cow_contig_clusters_28803 ocfs2_cow_contig_clusters 0 28803 NULL
++cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
++memory_bm_create_28814 memory_bm_create 0 28814 NULL
++iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
++vp_request_msix_vectors_28849 vp_request_msix_vectors 2-0 28849 NULL
++paranoid_check_peb_vid_hdr_28866 paranoid_check_peb_vid_hdr 0 28866 NULL
++ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
++max_io_len_target_boundary_28879 max_io_len_target_boundary 0-1 28879 NULL
++iwl3945_sta_dbgfs_stats_table_read_28882 iwl3945_sta_dbgfs_stats_table_read 3 28882 NULL
++packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
++ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
++iwl_dbgfs_sleep_level_override_write_28925 iwl_dbgfs_sleep_level_override_write 3 28925 NULL
++ocfs2_frozen_trigger_28929 ocfs2_frozen_trigger 4 28929 NULL
++push_rx_28939 push_rx 3 28939 NULL
++btrfs_trim_block_group_28963 btrfs_trim_block_group 3 28963 NULL
++alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
++ext4_mb_add_groupinfo_28988 ext4_mb_add_groupinfo 2 28988 NULL
++hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
++bin_uuid_28999 bin_uuid 3 28999 NULL
++sys_fcntl64_29031 sys_fcntl64 3 29031 NULL
++rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL nohasharray
++ProcessGetHostMibs_29049 ProcessGetHostMibs 0 29049 &rxrpc_sendmsg_29049
++tso_fragment_29050 tso_fragment 3 29050 NULL
++split_bvec_29058 split_bvec 5 29058 NULL
++iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
++lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
++ieee80211_probereq_get_29069 ieee80211_probereq_get 4-6 29069 NULL
++mark_extents_written_29082 mark_extents_written 2-3 29082 NULL
++iwl_dbgfs_log_event_write_29088 iwl_dbgfs_log_event_write 3 29088 NULL
++isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
++rbd_req_sync_op_29115 rbd_req_sync_op 10-9 29115 NULL
++snprintf_29125 snprintf 0 29125 NULL
++iov_shorten_29130 iov_shorten 0 29130 NULL
++proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
++reshape_ring_29147 reshape_ring 2 29147 NULL
++wusb_prf_256_29203 wusb_prf_256 7 29203 NULL
++do_shrinker_shrink_29208 do_shrinker_shrink 0 29208 NULL
++rds_iw_inc_copy_to_user_29214 rds_iw_inc_copy_to_user 3 29214 NULL
++iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL nohasharray
++security_socket_recvmsg_29224 security_socket_recvmsg 0 29224 &iwl_dbgfs_temperature_read_29224
++recover_peb_29238 recover_peb 0 29238 NULL
++security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
++prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
++bitmap_ord_to_pos_29279 bitmap_ord_to_pos 3 29279 NULL
++ext4_fiemap_29296 ext4_fiemap 4 29296 NULL
++sn9c102_read_29305 sn9c102_read 3 29305 NULL
++__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
++ide_read_altstatus_29343 ide_read_altstatus 0 29343 NULL
++l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
++alloc_and_copy_ftrace_hash_29368 alloc_and_copy_ftrace_hash 1 29368 NULL
++pca953x_irq_setup_29407 pca953x_irq_setup 3 29407 NULL
++mempool_create_29437 mempool_create 1 29437 NULL
++iscsi_sw_tcp_session_create_29443 iscsi_sw_tcp_session_create 2 29443 NULL
++crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
++p9_client_prepare_req_29448 p9_client_prepare_req 3 29448 NULL
++validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
++ubi_scan_add_used_29468 ubi_scan_add_used 0 29468 NULL
++do_register_entry_29478 do_register_entry 4 29478 NULL
++simple_strtoul_29480 simple_strtoul 0 29480 NULL
++sched_clock_local_29498 sched_clock_local 0 29498 NULL
++btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
++btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
++write_file_regidx_29517 write_file_regidx 3 29517 NULL
++atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
++pci_enable_msix_29524 pci_enable_msix 0 29524 NULL
++idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
++leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
++kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL
++lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
++iwl_dbgfs_missed_beacon_write_29586 iwl_dbgfs_missed_beacon_write 3 29586 NULL
++pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4-0 29589 NULL
++slots_per_page_29601 slots_per_page 0 29601 NULL
++nla_get_u16_29624 nla_get_u16 0 29624 NULL
++sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
++br_send_bpdu_29669 br_send_bpdu 3 29669 NULL
++new_lockspace_29674 new_lockspace 2 29674 NULL
++sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
++tracepoint_probe_register_29688 tracepoint_probe_register 0 29688 NULL
++jbd2_journal_restart_29692 jbd2_journal_restart 0 29692 NULL
++sd_alloc_ctl_entry_29708 sd_alloc_ctl_entry 1 29708 NULL
++probes_write_29711 probes_write 3 29711 NULL
++emi62_writememory_29731 emi62_writememory 4 29731 NULL
++read_cis_cache_29735 read_cis_cache 4 29735 NULL
++cxio_hal_init_resource_29771 cxio_hal_init_resource 7-6-2 29771 NULL nohasharray
++ip_vs_conn_fill_param_sync_29771 ip_vs_conn_fill_param_sync 6 29771 &cxio_hal_init_resource_29771
++cifs_ucs2_bytes_29790 cifs_ucs2_bytes 0 29790 NULL
++dbAlloc_29794 dbAlloc 0 29794 NULL
++ext4_trim_all_free_29806 ext4_trim_all_free 4-3-2 29806 NULL
++efx_wanted_channels_29813 efx_wanted_channels 0 29813 NULL
++tcp_sendpage_29829 tcp_sendpage 4-3 29829 NULL
++scan_bitmap_block_29840 scan_bitmap_block 4 29840 NULL
++__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
++count_partial_29850 count_partial 0 29850 NULL
++ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
++scsi_end_request_29876 scsi_end_request 3 29876 NULL
++crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
++nfc_targets_found_29886 nfc_targets_found 3 29886 NULL
++pin_code_reply_29893 pin_code_reply 4 29893 NULL
++write_file_queue_29922 write_file_queue 3 29922 NULL
++ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
++__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
++ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947
++xfrm_count_auth_supported_29957 xfrm_count_auth_supported 0 29957 NULL
++irias_add_octseq_attrib_29983 irias_add_octseq_attrib 4 29983 NULL
++arch_setup_dmar_msi_29992 arch_setup_dmar_msi 1-0 29992 NULL
++alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
++scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
++wrmalt_30043 wrmalt 0 30043 NULL
++__pci_request_selected_regions_30058 __pci_request_selected_regions 0 30058 NULL
++cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
++snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
++tg3_run_loopback_30093 tg3_run_loopback 2 30093 NULL
++skb_pagelen_30113 skb_pagelen 0 30113 NULL
++spi_async_locked_30117 spi_async_locked 0 30117 NULL
++calgary_unmap_page_30130 calgary_unmap_page 2-3 30130 NULL
++_osd_req_sizeof_alist_header_30134 _osd_req_sizeof_alist_header 0 30134 NULL
++recv_stream_30138 recv_stream 4 30138 NULL
++u_memcpya_30139 u_memcpya 2-3 30139 NULL
++i915_gem_object_get_pages_gtt_30154 i915_gem_object_get_pages_gtt 0 30154 NULL
++i915_gem_object_wait_rendering_30173 i915_gem_object_wait_rendering 0 30173 NULL
++cx25821_video_ioctl_30188 cx25821_video_ioctl 2 30188 NULL
++mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
++snd_pcm_playback_forward_30201 snd_pcm_playback_forward 0-2 30201 NULL
++usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
++preallocate_pcm_pages_30209 preallocate_pcm_pages 2 30209 NULL
++read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
++snd_ac97_pcm_assign_30218 snd_ac97_pcm_assign 2 30218 NULL
++dccp_manip_pkt_30229 dccp_manip_pkt 2 30229 NULL
++rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
++isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
++compat_readv_30273 compat_readv 3 30273 NULL
++lapic_register_intr_30279 lapic_register_intr 1 30279 NULL
++skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
++ir_create_table_30303 ir_create_table 4 30303 NULL
++ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
++resource_from_user_30341 resource_from_user 3 30341 NULL
++kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL
++inet_getid_30365 inet_getid 2 30365 NULL
++sys_get_mempolicy_30379 sys_get_mempolicy 3 30379 NULL
++blkdev_issue_zeroout_30392 blkdev_issue_zeroout 0 30392 NULL
++c4iw_init_resource_30393 c4iw_init_resource 3-2 30393 NULL
++_drbd_bm_find_next_zero_30415 _drbd_bm_find_next_zero 2 30415 NULL
++ext4_ext_create_new_leaf_30428 ext4_ext_create_new_leaf 0 30428 NULL
++enable_write_30456 enable_write 3 30456 NULL
++urandom_read_30462 urandom_read 3 30462 NULL
++zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
++i2c_ctrl_read_30467 i2c_ctrl_read 0 30467 NULL
++i915_mutex_lock_interruptible_30474 i915_mutex_lock_interruptible 0 30474 NULL
++adu_write_30487 adu_write 3 30487 NULL
++dtim_interval_write_30489 dtim_interval_write 3 30489 NULL
++nouveau_vm_new_30495 nouveau_vm_new 3-2 30495 NULL
++set_config_30526 set_config 0 30526 NULL
++disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
++blk_init_tags_30592 blk_init_tags 1 30592 NULL
++sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
++macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
++compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
++nfsd_nrpools_30651 nfsd_nrpools 0 30651 NULL
++jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
++dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
++wled_proc_write_30709 wled_proc_write 3 30709 NULL
++lbs_wrbbp_write_30712 lbs_wrbbp_write 3 30712 NULL
++ocfs2_find_cpos_for_left_leaf_30713 ocfs2_find_cpos_for_left_leaf 0 30713 NULL
++l2cap_build_conf_rsp_30719 l2cap_build_conf_rsp 0 30719 NULL
++lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
++snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
++ath6kl_wmi_send_action_cmd_30735 ath6kl_wmi_send_action_cmd 6 30735 NULL
++fuse_conn_limit_write_30777 fuse_conn_limit_write 3 30777 NULL nohasharray
++tcf_csum_ipv4_udp_30777 tcf_csum_ipv4_udp 4 30777 &fuse_conn_limit_write_30777
++smk_read_doi_30813 smk_read_doi 3 30813 NULL
++get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
++sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
++ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL
++pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
++f1x_match_to_this_node_30888 f1x_match_to_this_node 3 30888 NULL
++get_params_30899 get_params 0 30899 NULL
++fc_host_post_vendor_event_30903 fc_host_post_vendor_event 3 30903 NULL
++sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
++find_free_dev_extent_30963 find_free_dev_extent 0 30963 NULL
++tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
++huge_page_mask_30981 huge_page_mask 0 30981 NULL
++nlmsg_put_answer_30988 nlmsg_put_answer 4 30988 NULL
++i2400mu_rx_size_grow_30989 i2400mu_rx_size_grow 0 30989 NULL
++lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
++compat_sys_mq_timedsend_31060 compat_sys_mq_timedsend 3 31060 NULL
++lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
++find_next_bit_le_31064 find_next_bit_le 0-2-3 31064 NULL
++sys_mincore_31079 sys_mincore 2-1 31079 NULL
++sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
++find_mergeable_31093 find_mergeable 2 31093 NULL
++compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
++depth_read_31112 depth_read 3 31112 NULL
++kvm_mmu_pte_write_31120 kvm_mmu_pte_write 2-4 31120 NULL
++ssb_read16_31139 ssb_read16 0 31139 NULL
++kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
++size_inside_page_31141 size_inside_page 0-1-2 31141 NULL
++w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
++ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
++input_mt_init_slots_31183 input_mt_init_slots 2 31183 NULL
++r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
++cpumask_weight_31215 cpumask_weight 0 31215 NULL
++__read_reg_31216 __read_reg 0 31216 NULL
++atm_get_addr_31221 atm_get_addr 3 31221 NULL
++tcp_recvmsg_31238 tcp_recvmsg 4 31238 NULL
++cyy_readb_31240 cyy_readb 0 31240 NULL
++_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
++ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
++ceph_copy_page_vector_to_user_31270 ceph_copy_page_vector_to_user 0-4-3 31270 NULL
++sctp_tsnmap_find_gap_ack_31272 sctp_tsnmap_find_gap_ack 3-2 31272 NULL
++uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
++push_leaf_left_31306 push_leaf_left 0 31306 NULL
++sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
++command_file_write_31318 command_file_write 3 31318 NULL
++hwerr_crcbits_31334 hwerr_crcbits 4 31334 NULL
++__cpu_to_node_31345 __cpu_to_node 0 31345 NULL
++rbd_do_op_31366 rbd_do_op 8-9 31366 NULL
++native_setup_msi_irqs_31367 native_setup_msi_irqs 2 31367 NULL
++buffDnld_31372 buffDnld 0 31372 NULL nohasharray
++xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 &buffDnld_31372
++trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
++inb_31388 inb 0 31388 NULL
++key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
++i915_gem_object_put_fence_31413 i915_gem_object_put_fence 0 31413 NULL nohasharray
++mcs7830_set_reg_31413 mcs7830_set_reg 3 31413 &i915_gem_object_put_fence_31413
++TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
++snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL
++acpi_sci_ioapic_setup_31445 acpi_sci_ioapic_setup 4 31445 NULL
++opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
++register_ftrace_graph_31456 register_ftrace_graph 0 31456 NULL
++do_fcntl_31468 do_fcntl 3 31468 NULL
++xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
++__ext4_journal_get_write_access_31482 __ext4_journal_get_write_access 0 31482 NULL
++alg_setkey_31485 alg_setkey 3 31485 NULL
++rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
++qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
++__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
++hidraw_write_31536 hidraw_write 3 31536 NULL
++normalize_31566 normalize 0-1-2 31566 NULL
++inet6_ifaddr_msgsize_31568 inet6_ifaddr_msgsize 0 31568 NULL
++osst_write_31581 osst_write 3 31581 NULL
++iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
++arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
++iwl_legacy_dbgfs_traffic_log_read_31625 iwl_legacy_dbgfs_traffic_log_read 3 31625 NULL
++xfs_log_move_tail_31628 xfs_log_move_tail 2 31628 NULL
++videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
++pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
++xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
++__lgread_31668 __lgread 4 31668 NULL
++xfs_ail_min_lsn_31684 xfs_ail_min_lsn 0 31684 NULL
++fst_recover_rx_error_31687 fst_recover_rx_error 3 31687 NULL
++handle_interrupt_31689 handle_interrupt 0 31689 NULL nohasharray
++reiserfs_in_journal_31689 reiserfs_in_journal 3 31689 &handle_interrupt_31689
++iwl_legacy_dbgfs_chain_noise_read_31692 iwl_legacy_dbgfs_chain_noise_read 3 31692 NULL
++audit_log_n_string_31705 audit_log_n_string 3 31705 NULL
++sctp_make_asconf_ack_31726 sctp_make_asconf_ack 3 31726 NULL
++ata_tport_add_31733 ata_tport_add 0 31733 NULL
++utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL
++NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 NULL nohasharray
++input_abs_get_max_31742 input_abs_get_max 0 31742 &NCR_700_change_queue_depth_31742
++bcm_char_read_31750 bcm_char_read 3 31750 NULL
++snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
++usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
++get_count_order_31800 get_count_order 0 31800 NULL
++ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
++isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
++strnlen_user_31815 strnlen_user 0-2 31815 NULL
++sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
++iwl_dbgfs_disable_ht40_write_31876 iwl_dbgfs_disable_ht40_write 3 31876 NULL
++ddb_output_write_31902 ddb_output_write 3-0 31902 NULL
++xattr_permission_31907 xattr_permission 0 31907 NULL
++kmem_alloc_31920 kmem_alloc 1 31920 NULL
++guestwidth_to_adjustwidth_31937 guestwidth_to_adjustwidth 0-1 31937 NULL
++iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4-0 31942 NULL nohasharray
++read_mem_31942 read_mem 3 31942 &iov_iter_copy_from_user_31942
++vb2_write_31948 vb2_write 3 31948 NULL
++pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
++copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
++vx_read_status_31982 vx_read_status 0 31982 NULL
++find_next_zero_bit_31990 find_next_zero_bit 0-2-3 31990 NULL
++sysfs_create_file_31996 sysfs_create_file 0 31996 NULL
++calc_hmac_32010 calc_hmac 3 32010 NULL
++aer_init_32021 aer_init 0 32021 NULL nohasharray
++aead_len_32021 aead_len 0 32021 &aer_init_32021
++ocfs2_remove_extent_32032 ocfs2_remove_extent 0-4-3 32032 NULL
++posix_acl_set_32037 posix_acl_set 4 32037 NULL
++ocfs2_update_edge_lengths_32046 ocfs2_update_edge_lengths 0 32046 NULL nohasharray
++sys_sched_setaffinity_32046 sys_sched_setaffinity 2 32046 &ocfs2_update_edge_lengths_32046
++proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
++nlmsg_put_32069 nlmsg_put 5 32069 NULL
++cfg80211_send_unprot_deauth_32080 cfg80211_send_unprot_deauth 3 32080 NULL
++ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
++set_discoverable_32102 set_discoverable 4 32102 NULL
++disk_status_32120 disk_status 4 32120 NULL
++kobject_add_internal_32133 kobject_add_internal 0 32133 NULL
++alloc_tx_32143 alloc_tx 2 32143 NULL
++norm_maxh_32151 norm_maxh 0 32151 NULL
++venus_link_32165 venus_link 5 32165 NULL
++drbd_new_dev_size_32171 drbd_new_dev_size 0 32171 NULL
++do_writepages_32173 do_writepages 0 32173 NULL
++load_header_32183 load_header 0 32183 NULL
++ubi_wl_scrub_peb_32196 ubi_wl_scrub_peb 0 32196 NULL
++wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
++riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
++caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
++lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
++ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
++l3_alloc_skb_32289 l3_alloc_skb 1 32289 NULL
++cas_calc_tabort_32316 cas_calc_tabort 0 32316 NULL
++nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
++t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
++dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL nohasharray
++rx_streaming_always_write_32357 rx_streaming_always_write 3 32357 &dispatch_ioctl_32357
++f1x_translate_sysaddr_to_cs_32359 f1x_translate_sysaddr_to_cs 2 32359 NULL
++sel_read_initcon_32362 sel_read_initcon 3 32362 NULL
++send_mpa_reply_32372 send_mpa_reply 3 32372 NULL nohasharray
++_drbd_bm_find_next_32372 _drbd_bm_find_next 2 32372 &send_mpa_reply_32372
++variax_set_raw2_32374 variax_set_raw2 4 32374 NULL
++usbtmc_read_32377 usbtmc_read 3 32377 NULL
++intel_iommu_map_32384 intel_iommu_map 4-3 32384 NULL
++local_clock_32385 local_clock 0 32385 NULL
++xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
++hid_input_report_32458 hid_input_report 4 32458 NULL
++snd_pcm_sync_ptr_32461 snd_pcm_sync_ptr 0 32461 NULL
++fill_readbuf_32464 fill_readbuf 3 32464 NULL
++ieee80211_fill_mesh_addresses_32465 ieee80211_fill_mesh_addresses 0 32465 NULL
++ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
++ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
++ocfs2_local_alloc_reserve_for_window_32518 ocfs2_local_alloc_reserve_for_window 0 32518 NULL
++qsfp_read_32522 qsfp_read 0-2-4 32522 NULL
++ilo_read_32531 ilo_read 3 32531 NULL
++ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
++format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
++__first_node_32558 __first_node 0 32558 NULL
++aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
++mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
++read_file_beacon_32595 read_file_beacon 3 32595 NULL
++ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
++sys_set_mempolicy_32608 sys_set_mempolicy 3 32608 NULL
++__iter_shared_inline_ref_32610 __iter_shared_inline_ref 0 32610 NULL
++irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
++cfg80211_roamed_32632 cfg80211_roamed 7-5 32632 NULL
++ieee80211_hdrlen_32637 ieee80211_hdrlen 0 32637 NULL
++ite_decode_bytes_32642 ite_decode_bytes 3 32642 NULL
++kvmalloc_32646 kvmalloc 1 32646 NULL
++ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
++generic_readlink_32654 generic_readlink 3 32654 NULL nohasharray
++ftrace_startup_32654 ftrace_startup 0 32654 &generic_readlink_32654
++move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
++apei_res_add_32674 apei_res_add 0 32674 NULL
++rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL
++slhc_remember_32741 slhc_remember 3-0 32741 NULL
++megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
++stats_read_ul_32751 stats_read_ul 3 32751 NULL
++write_file_disable_ani_32761 write_file_disable_ani 3 32761 NULL
++sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
++ocfs2_read_inode_block_full_32790 ocfs2_read_inode_block_full 0 32790 NULL
++firmwareUpload_32794 firmwareUpload 3 32794 NULL
++get_register_page_interruptible_32809 get_register_page_interruptible 5 32809 NULL
++orig_node_add_if_32833 orig_node_add_if 2 32833 NULL
++nlmsg_validate_32861 nlmsg_validate 2 32861 NULL
++new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
++io_apic_setup_irq_pin_32868 io_apic_setup_irq_pin 1 32868 NULL
++blkio_fill_stat_32874 blkio_fill_stat 2 32874 NULL
++vp702x_usb_inout_cmd_32884 vp702x_usb_inout_cmd 4-6 32884 NULL
++zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
++irq_reserve_irqs_32946 irq_reserve_irqs 1-2 32946 NULL
++ext4_valid_block_bitmap_32958 ext4_valid_block_bitmap 3 32958 NULL
++compat_filldir_32999 compat_filldir 3 32999 NULL
++ext3_alloc_blocks_33007 ext3_alloc_blocks 3-0 33007 NULL
++br_multicast_set_hash_max_33012 br_multicast_set_hash_max 2 33012 NULL
++snd_pcm_prepare_33036 snd_pcm_prepare 0 33036 NULL
++xfrm_mapping_msgsize_33044 xfrm_mapping_msgsize 0 33044 NULL
++ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
++stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
++sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
++acl_permission_check_33083 acl_permission_check 0 33083 NULL
++ieee80211_fragment_33112 ieee80211_fragment 4 33112 NULL
++fb_sys_write_33130 fb_sys_write 3 33130 NULL
++nfs4_init_slot_table_33152 nfs4_init_slot_table 2 33152 NULL
++tun_get_user_33178 tun_get_user 3 33178 NULL
++dataflash_read_fact_otp_33204 dataflash_read_fact_otp 3-2 33204 NULL
++pp_read_33210 pp_read 3 33210 NULL
++xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
++__vb2_wait_for_done_vb_33246 __vb2_wait_for_done_vb 0 33246 NULL
++snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
++sched_find_first_bit_33270 sched_find_first_bit 0 33270 NULL
++cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
++i915_gem_object_flush_fence_33304 i915_gem_object_flush_fence 0 33304 NULL
++mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
++ceph_msgpool_init_33312 ceph_msgpool_init 3 33312 NULL
++vx_send_irq_dsp_33329 vx_send_irq_dsp 0 33329 NULL
++gsm_mux_rx_netchar_33336 gsm_mux_rx_netchar 3 33336 NULL
++joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
++create_xattr_datum_33356 create_xattr_datum 5 33356 NULL
++pvscsi_allocate_sg_33357 pvscsi_allocate_sg 0 33357 NULL
++read_file_regidx_33370 read_file_regidx 3 33370 NULL
++ceph_osdc_writepages_33375 ceph_osdc_writepages 5 33375 NULL
++sctp_ulpevent_new_33377 sctp_ulpevent_new 1 33377 NULL
++ocfs2_quota_read_33382 ocfs2_quota_read 5 33382 NULL
++ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
++scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
++ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 3-2 33394 NULL
++snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 NULL
++ufs_getfrag_block_33409 ufs_getfrag_block 2 33409 NULL
++filemap_fdatawrite_33415 filemap_fdatawrite 0 33415 NULL
++sys_dup3_33421 sys_dup3 2 33421 NULL
++ubh_scanc_33436 ubh_scanc 0-3-4 33436 NULL
++create_entry_33479 create_entry 2 33479 NULL
++ip_setsockopt_33487 ip_setsockopt 5 33487 NULL nohasharray
++elf_map_33487 elf_map 0-2 33487 &ip_setsockopt_33487
++ol_dqblk_chunk_off_33489 ol_dqblk_chunk_off 2 33489 NULL
++res_counter_read_33499 res_counter_read 4 33499 NULL
++fb_read_33506 fb_read 3 33506 NULL
++ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
++nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
++acpi_gsi_to_irq_33533 acpi_gsi_to_irq 1 33533 NULL
++tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL
++dup_array_33551 dup_array 3 33551 NULL
++solo_enc_read_33553 solo_enc_read 3 33553 NULL
++scsi_execute_33596 scsi_execute 5 33596 NULL
++comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
++xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL nohasharray
++ip6_find_1stfragopt_33608 ip6_find_1stfragopt 0 33608 &xt_compat_target_offset_33608
++inw_p_33668 inw_p 0 33668 NULL
++arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
++rbd_alloc_coll_33678 rbd_alloc_coll 1 33678 NULL
++sys_keyctl_33708 sys_keyctl 4 33708 NULL nohasharray
++netlink_sendmsg_33708 netlink_sendmsg 4 33708 &sys_keyctl_33708
++get_free_de_33714 get_free_de 2 33714 NULL
++pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
++ocfs2_extent_map_get_blocks_33720 ocfs2_extent_map_get_blocks 2 33720 NULL
++ocfs2_lock_allocators_move_extents_33723 ocfs2_lock_allocators_move_extents 0 33723 NULL
++__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL
++Read_hfc_33755 Read_hfc 0 33755 NULL
++hashtab_create_33769 hashtab_create 3 33769 NULL
++midibuf_message_length_33770 midibuf_message_length 0 33770 NULL
++i8042_create_aux_port_33777 i8042_create_aux_port 0 33777 NULL
++if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
++find_next_offset_33804 find_next_offset 3-0 33804 NULL nohasharray
++apei_estatus_len_33804 apei_estatus_len 0 33804 &find_next_offset_33804
++sky2_rx_pad_33819 sky2_rx_pad 0 33819 NULL nohasharray
++filter_write_33819 filter_write 3 33819 &sky2_rx_pad_33819
++ext4_journal_extend_33835 ext4_journal_extend 0 33835 NULL
++snd_pcm_action_nonatomic_33844 snd_pcm_action_nonatomic 0 33844 NULL
++get_user_pages_33908 get_user_pages 0 33908 NULL
++queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
++max8649_read_device_33930 max8649_read_device 3 33930 NULL
++sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
++lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
++read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
++ocfs2_create_new_meta_bhs_33955 ocfs2_create_new_meta_bhs 0 33955 NULL
++btrfs_delalloc_reserve_metadata_33963 btrfs_delalloc_reserve_metadata 0 33963 NULL
++vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
++snd_interval_refine_33987 snd_interval_refine 0 33987 NULL
++uio_dev_add_attributes_34003 uio_dev_add_attributes 0 34003 NULL
++select_size_34004 select_size 0 34004 NULL
++lbs_lowrssi_write_34025 lbs_lowrssi_write 3 34025 NULL
++ppp_write_34034 ppp_write 3 34034 NULL
++tty_insert_flip_string_34042 tty_insert_flip_string 3-0 34042 NULL
++__domain_flush_pages_34045 __domain_flush_pages 2-3 34045 NULL
++islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
++mtu2blksize_34139 mtu2blksize 0 34139 NULL
++ocfs2_xattr_list_entry_34165 ocfs2_xattr_list_entry 0 34165 NULL
++skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
++iwl_legacy_dbgfs_tx_queue_read_34192 iwl_legacy_dbgfs_tx_queue_read 3 34192 NULL
++mtd_write_34207 mtd_write 3 34207 NULL
++setup_nodes_for_search_34248 setup_nodes_for_search 0 34248 NULL
++bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
++ocfs2_dlm_lock_34265 ocfs2_dlm_lock 0 34265 NULL
++rw_copy_check_uvector_34271 rw_copy_check_uvector 3-0 34271 NULL
++device_private_init_34279 device_private_init 0 34279 NULL
++zone_spanned_pages_in_node_34299 zone_spanned_pages_in_node 0 34299 NULL
++iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 NULL nohasharray
++pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 &iov_iter_single_seg_count_34326
++crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL
++rngapi_reset_34366 rngapi_reset 3 34366 NULL nohasharray
++p54_alloc_skb_34366 p54_alloc_skb 3 34366 &rngapi_reset_34366
++ea_read_34378 ea_read 0 34378 NULL
++av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
++usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
++read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
++ivtv_read_pos_34400 ivtv_read_pos 3 34400 NULL
++sctp_make_heartbeat_ack_34411 sctp_make_heartbeat_ack 4 34411 NULL
++nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
++usbtest_alloc_urb_34446 usbtest_alloc_urb 5-3 34446 NULL
++sctp_make_abort_34459 sctp_make_abort 3 34459 NULL
++mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
++line6_dumpreq_init_34473 line6_dumpreq_init 3 34473 NULL
++skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
++i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
++ocfs2_block_group_clear_bits_34484 ocfs2_block_group_clear_bits 0 34484 NULL
++security_inode_permission_34488 security_inode_permission 0 34488 NULL
++snd_pcm_hw_param_value_34525 snd_pcm_hw_param_value 0 34525 NULL
++alloc_buf_34532 alloc_buf 1 34532 NULL
++tracing_stats_read_34537 tracing_stats_read 3 34537 NULL
++hugetlbfs_read_actor_34547 hugetlbfs_read_actor 4-5-2-0 34547 NULL
++intel_alloc_coherent_34551 intel_alloc_coherent 2 34551 NULL
++dbBackSplit_34561 dbBackSplit 0 34561 NULL
++alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
++velocity_rx_copy_34583 velocity_rx_copy 2 34583 NULL
++init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
++inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
++iwl_legacy_dbgfs_disable_ht40_write_34605 iwl_legacy_dbgfs_disable_ht40_write 3 34605 NULL
++__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL
++__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
++cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
++isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
++alloc_irq_and_cfg_at_34706 alloc_irq_and_cfg_at 1 34706 NULL
++ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
++platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
++reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL nohasharray
++fib_rule_nlmsg_size_34736 fib_rule_nlmsg_size 0 34736 &reg_w_ixbuf_34736
++sctp_make_datafrag_empty_34737 sctp_make_datafrag_empty 3 34737 NULL
++solos_param_store_34755 solos_param_store 4 34755 NULL
++device_add_34766 device_add 0 34766 NULL
++qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
++tipc_log_resize_34803 tipc_log_resize 1 34803 NULL
++drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
++sep_prepare_input_dma_table_34832 sep_prepare_input_dma_table 3-2 34832 NULL
++ext4_groupinfo_create_slab_34837 ext4_groupinfo_create_slab 1 34837 NULL
++b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
++bl_mark_for_commit_34852 bl_mark_for_commit 3-2 34852 NULL
++acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
++usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL
++ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
++write_msg_34916 write_msg 3 34916 NULL
++iwl_dbgfs_force_reset_write_34930 iwl_dbgfs_force_reset_write 3 34930 NULL
++snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL
++skb_gro_header_slow_34958 skb_gro_header_slow 2 34958 NULL nohasharray
++i2c_transfer_34958 i2c_transfer 0 34958 &skb_gro_header_slow_34958
++Realloc_34961 Realloc 2 34961 NULL
++iwl_legacy_dbgfs_missed_beacon_write_34966 iwl_legacy_dbgfs_missed_beacon_write 3 34966 NULL
++l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
++sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
++snd_pcm_hw_params_35020 snd_pcm_hw_params 0 35020 NULL
++generic_file_llseek_size_35024 generic_file_llseek_size 2 35024 NULL
++paranoid_check_peb_ec_hdr_35027 paranoid_check_peb_ec_hdr 0 35027 NULL
++coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
++xfs_rtallocate_extent_35052 xfs_rtallocate_extent 2-4-8 35052 NULL
++btmrvl_gpiogap_write_35053 btmrvl_gpiogap_write 3 35053 NULL
++ext4_split_unwritten_extents_35063 ext4_split_unwritten_extents 0 35063 NULL
++store_ifalias_35088 store_ifalias 4 35088 NULL
++__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
++capi_write_35104 capi_write 3 35104 NULL
++ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
++ceph_osdc_start_request_35122 ceph_osdc_start_request 0 35122 NULL
++gntdev_alloc_map_35145 gntdev_alloc_map 2 35145 NULL
++iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
++ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
++bat_ogm_aggr_packet_35202 bat_ogm_aggr_packet 3 35202 NULL
++unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
++_osd_req_alist_elem_size_35216 _osd_req_alist_elem_size 0-2 35216 NULL
++striped_read_35218 striped_read 0-3-2-8 35218 NULL nohasharray
++security_key_getsecurity_35218 security_key_getsecurity 0 35218 &striped_read_35218
++video_register_device_no_warn_35226 video_register_device_no_warn 3 35226 NULL
++may_commit_transaction_35234 may_commit_transaction 0 35234 NULL
++set_fd_set_35249 set_fd_set 1 35249 NULL
++ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
++jbd2_journal_get_write_access_35263 jbd2_journal_get_write_access 0 35263 NULL
++dma_show_regs_35266 dma_show_regs 3 35266 NULL
++irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
++i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
++isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
++__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 NULL
++tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
++sys_setsockopt_35320 sys_setsockopt 5 35320 NULL
++new_bind_ctl_35324 new_bind_ctl 2 35324 NULL
++pskb_network_may_pull_35336 pskb_network_may_pull 2 35336 NULL
++mlx4_alloc_hwq_res_35339 mlx4_alloc_hwq_res 3 35339 NULL
++hpi_alloc_control_cache_35351 hpi_alloc_control_cache 1 35351 NULL
++compat_filldir64_35354 compat_filldir64 3 35354 NULL
++tt_update_orig_35361 tt_update_orig 4 35361 NULL
++read_kmem_35372 read_kmem 3 35372 NULL
++ocfs2_journal_access_di_35393 ocfs2_journal_access_di 0 35393 NULL
++rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
++buffer_to_user_35439 buffer_to_user 3 35439 NULL
++i915_wedged_read_35474 i915_wedged_read 3 35474 NULL
++async_setkey_35521 async_setkey 3 35521 NULL
++__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
++iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
++rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
++ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
++ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
++vb2_dqbuf_35559 vb2_dqbuf 0 35559 NULL
++sysfs_create_subdir_35567 sysfs_create_subdir 0 35567 NULL
++xfs_mount_log_sb_35576 xfs_mount_log_sb 2 35576 NULL
++ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
++ReadZReg_35604 ReadZReg 0 35604 NULL
++rbd_req_sync_read_35615 rbd_req_sync_read 6-5 35615 NULL
++kernel_readv_35617 kernel_readv 3 35617 NULL
++pci_request_regions_35635 pci_request_regions 0 35635 NULL
++scrub_stripe_35637 scrub_stripe 4-3 35637 NULL
++spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
++store_debug_level_35652 store_debug_level 3 35652 NULL
++rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
++compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
++rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
++btrfs_commit_transaction_35725 btrfs_commit_transaction 0 35725 NULL
++fixup_low_keys_35734 fixup_low_keys 0 35734 NULL
++ext4_truncate_restart_trans_35750 ext4_truncate_restart_trans 0 35750 NULL
++iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
++send_wqe_overhead_35780 send_wqe_overhead 0 35780 NULL
++udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
++store_fan1_input_35793 store_fan1_input 4 35793 NULL
++read_file_stations_35795 read_file_stations 3 35795 NULL
++pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
++vx_query_hbuffer_size_35859 vx_query_hbuffer_size 0 35859 NULL
++mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
++fls64_35862 fls64 0-1 35862 NULL
++wait_mgsl_event_35872 wait_mgsl_event 0 35872 NULL
++kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
++ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL
++uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
++tcp_mark_head_lost_35895 tcp_mark_head_lost 2 35895 NULL
++igmpv3_newpack_35912 igmpv3_newpack 2 35912 NULL
++kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
++dccp_listen_start_35918 dccp_listen_start 2 35918 NULL
++balance_node_right_35920 balance_node_right 0 35920 NULL
++put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
++ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL
++acl_alloc_35979 acl_alloc 1 35979 NULL
++device_add_class_symlinks_35985 device_add_class_symlinks 0 35985 NULL
++generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
++koneplus_sysfs_write_35993 koneplus_sysfs_write 6 35993 NULL
++write_file_antenna_35998 write_file_antenna 3 35998 NULL
++console_store_36007 console_store 4 36007 NULL
++i965_write_fence_reg_36017 i965_write_fence_reg 0 36017 NULL
++sys_init_module_36047 sys_init_module 2 36047 NULL
++gpio_power_read_36059 gpio_power_read 3 36059 NULL
++snd_pcm_playback_hw_avail_36061 snd_pcm_playback_hw_avail 0 36061 NULL
++write_emulate_36065 write_emulate 2-4 36065 NULL
++stack_max_size_write_36068 stack_max_size_write 3 36068 NULL
++ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
++ext3_new_blocks_36073 ext3_new_blocks 3-0 36073 NULL
++ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
++snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
++genlmsg_new_36094 genlmsg_new 1 36094 NULL
++vga_arb_write_36112 vga_arb_write 3 36112 NULL
++rx_enable_36125 rx_enable 0 36125 NULL
++iwl_trans_txq_alloc_36147 iwl_trans_txq_alloc 3 36147 NULL
++b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
++btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
++snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
++FTL_Get_Block_Table_Flash_Size_Bytes_36187 FTL_Get_Block_Table_Flash_Size_Bytes 0 36187 NULL
++__ip_append_data_36191 __ip_append_data 7-8 36191 NULL
++ubifs_read_nnode_36221 ubifs_read_nnode 0 36221 NULL
++atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
++viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
++compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
++usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL
++modem_input_wait_36278 modem_input_wait 0 36278 NULL
++mangle_sdp_packet_36279 mangle_sdp_packet 9 36279 NULL
++codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
++lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
++ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
++fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
++jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
++qla4xxx_session_create_36350 qla4xxx_session_create 2 36350 NULL
++ath6kl_regwrite_write_36351 ath6kl_regwrite_write 3 36351 NULL
++v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL
++to_sector_36361 to_sector 0-1 36361 NULL
++mtd_do_writeoob_36373 mtd_do_writeoob 4 36373 NULL
++vring_new_virtqueue_36374 vring_new_virtqueue 1 36374 NULL
++tunables_read_36385 tunables_read 3 36385 NULL
++afs_alloc_flat_call_36399 afs_alloc_flat_call 3-2 36399 NULL
++sierra_write_36402 sierra_write 4 36402 NULL
++sys_vm86_36421 sys_vm86 2 36421 NULL
++rtnl_link_get_size_36436 rtnl_link_get_size 0 36436 NULL
++sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
++alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
++b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
++ip6_append_data_36490 ip6_append_data 4-5 36490 NULL
++cmd_loop_36491 cmd_loop 0 36491 NULL
++iwl_legacy_dbgfs_power_save_status_read_36492 iwl_legacy_dbgfs_power_save_status_read 3 36492 NULL
++__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
++mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
++ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
++crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
++cpu_type_read_36540 cpu_type_read 3 36540 NULL
++__kfifo_to_user_36555 __kfifo_to_user 3-0 36555 NULL nohasharray
++macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
++__erst_read_36579 __erst_read 0 36579 NULL
++put_cmsg_36589 put_cmsg 4 36589 NULL
++pcnet32_realloc_rx_ring_36598 pcnet32_realloc_rx_ring 3 36598 NULL
++fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
++vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
++cxio_hal_rqtpool_alloc_36648 cxio_hal_rqtpool_alloc 2 36648 NULL nohasharray
++lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 &cxio_hal_rqtpool_alloc_36648
++osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
++iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
++ext4_mb_discard_group_preallocations_36685 ext4_mb_discard_group_preallocations 2 36685 NULL
++get_txidle_36698 get_txidle 0 36698 NULL
++gsmtty_write_36702 gsmtty_write 3 36702 NULL
++sched_clock_36717 sched_clock 0 36717 NULL
++ocfs2_rotate_tree_right_36723 ocfs2_rotate_tree_right 0 36723 NULL
++saa7134_i2c_eeprom_36729 saa7134_i2c_eeprom 3 36729 NULL
++extract_icmp6_fields_36732 extract_icmp6_fields 2 36732 NULL
++snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4-0 36740 NULL
++cxgbi_device_register_36746 cxgbi_device_register 2-1 36746 NULL
++i915_gem_evict_inactive_36767 i915_gem_evict_inactive 0 36767 NULL
++ip4ip6_err_36772 ip4ip6_err 5 36772 NULL
++llc_mac_header_len_36776 llc_mac_header_len 0 36776 NULL
++proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
++do_dmabuf_dirty_sou_36807 do_dmabuf_dirty_sou 7 36807 NULL
++hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
++ocfs2_journal_access_rb_36823 ocfs2_journal_access_rb 0 36823 NULL
++int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
++fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
++keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
++cm_write_36858 cm_write 3 36858 NULL
++svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
++ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
++selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
++OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
++genlmsg_total_size_36938 genlmsg_total_size 0-1 36938 NULL
++crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
++div_u64_36951 div_u64 0 36951 NULL
++write_leb_36957 write_leb 0 36957 NULL
++call_usermodehelper_exec_36960 call_usermodehelper_exec 0 36960 NULL
++ntfs_external_attr_find_36963 ntfs_external_attr_find 0 36963 NULL
++sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
++setxattr_37006 setxattr 4 37006 NULL
++ondemand_readahead_37015 ondemand_readahead 6-5 37015 NULL
++command_file_read_37038 command_file_read 3 37038 NULL
++em28xx_gpio_set_37040 em28xx_gpio_set 0 37040 NULL
++ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 NULL
++find_next_chunk_37067 find_next_chunk 0 37067 NULL
++parse_command_37079 parse_command 2 37079 NULL
++snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL
++xfrm_expire_msgsize_37133 xfrm_expire_msgsize 0 37133 NULL
++msg_word_37164 msg_word 0 37164 NULL
++BeceemNVMRead_37166 BeceemNVMRead 0 37166 NULL
++can_set_xattr_37182 can_set_xattr 4 37182 NULL
++store_wimax_37196 store_wimax 4 37196 NULL
++vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
++sysfs_add_file_37200 sysfs_add_file 0 37200 NULL
++crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL
++uapsd_queues_read_37217 uapsd_queues_read 3 37217 NULL
++regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
++__do_replace_37227 __do_replace 5 37227 NULL
++produce_free_peb_37232 produce_free_peb 0 37232 NULL
++ctnetlink_secctx_size_37236 ctnetlink_secctx_size 0 37236 NULL
++ReadLEDInformationFromEEPROM_37247 ReadLEDInformationFromEEPROM 0 37247 NULL
++BeceemFlashBulkWrite_37255 BeceemFlashBulkWrite 0 37255 NULL
++prot_queue_del_37258 prot_queue_del 0 37258 NULL
++exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
++srp_target_alloc_37288 srp_target_alloc 3 37288 NULL
++request_threaded_irq_37303 request_threaded_irq 0 37303 NULL
++jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
++send_msg_37323 send_msg 4 37323 NULL
++brcmf_sdbrcm_membytes_37324 brcmf_sdbrcm_membytes 3-5 37324 NULL
++scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
++rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
++nf_bridge_pad_37351 nf_bridge_pad 0 37351 NULL
++security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
++iommu_num_pages_37391 iommu_num_pages 0-2-3-1 37391 NULL
++sys_getxattr_37418 sys_getxattr 4 37418 NULL
++hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
++acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL nohasharray
++find_next_bit_37422 find_next_bit 0-2-3 37422 &acpi_os_allocate_zeroed_37422
++ocfs2_insert_path_37425 ocfs2_insert_path 0 37425 NULL
++tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4-0 37428 NULL
++iwl_print_last_event_logs_37433 iwl_print_last_event_logs 7-9-0 37433 NULL
++tcp_established_options_37450 tcp_established_options 0 37450 NULL
++cmd_input_size_37457 cmd_input_size 0-1 37457 NULL
++ufs_data_ptr_to_cpu_37475 ufs_data_ptr_to_cpu 0 37475 NULL
++get_est_timing_37484 get_est_timing 0 37484 NULL
++kmem_realloc_37489 kmem_realloc 2 37489 NULL
++xz_dec_test_write_37527 xz_dec_test_write 3 37527 NULL
++hdr_size_37536 hdr_size 0 37536 NULL
++xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
++ocfs2_add_branch_37588 ocfs2_add_branch 0 37588 NULL
++alloc_descs_37593 alloc_descs 0-1 37593 NULL
++qla2x00_debounce_register_37597 qla2x00_debounce_register 0 37597 NULL
++btrfs_write_and_wait_marked_extents_37604 btrfs_write_and_wait_marked_extents 0 37604 NULL
++kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
++ocfs2_split_refcount_rec_37622 ocfs2_split_refcount_rec 0 37622 NULL
++alloc_fd_37637 alloc_fd 1 37637 NULL
++tcp_dma_try_early_copy_37651 tcp_dma_try_early_copy 3 37651 NULL
++bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
++rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
++vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
++iwl_legacy_dbgfs_rxon_filter_flags_read_37666 iwl_legacy_dbgfs_rxon_filter_flags_read 3 37666 NULL
++regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
++__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
++netxen_validate_ringparam_37740 netxen_validate_ringparam 1-2-3 37740 NULL
++read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
++ocfs2_duplicate_clusters_by_jbd_37749 ocfs2_duplicate_clusters_by_jbd 5-4-6 37749 NULL
++ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
++ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
++dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
++smk_read_logging_37804 smk_read_logging 3 37804 NULL
++deny_write_access_37813 deny_write_access 0 37813 NULL
++bitmap_find_next_zero_area_37827 bitmap_find_next_zero_area 2-3-0-5-4 37827 NULL
++jbd2_journal_get_undo_access_37837 jbd2_journal_get_undo_access 0 37837 NULL
++o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
++xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
++iwmct_fw_parser_init_37876 iwmct_fw_parser_init 4 37876 NULL
++sys_setxattr_37880 sys_setxattr 4 37880 NULL
++dvb_net_sec_37884 dvb_net_sec 3 37884 NULL
++tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
++xfs_highbit32_37921 xfs_highbit32 0 37921 NULL
++pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL
++read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
++write_file_bool_37957 write_file_bool 3 37957 NULL
++ext3_free_blocks_sb_37967 ext3_free_blocks_sb 4-3 37967 NULL
++rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
++vfs_readv_38011 vfs_readv 3 38011 NULL
++aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
++store_wlan_38040 store_wlan 4 38040 NULL
++klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 NULL
++sysfs_do_create_link_38051 sysfs_do_create_link 0 38051 NULL
++nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
++alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
++uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
++em28xx_set_mode_38088 em28xx_set_mode 0 38088 NULL
++request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
++proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
++ep0_read_38095 ep0_read 3 38095 NULL
++snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
++vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
++__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 0-4-3 38153 NULL
++kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
++cdev_add_38176 cdev_add 2-3 38176 NULL
++rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
++get_ucode_user_38202 get_ucode_user 3 38202 NULL
++ext3_new_block_38208 ext3_new_block 3-0 38208 NULL
++osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL nohasharray
++xfs_rtallocate_range_38223 xfs_rtallocate_range 4-3 38223 &osd_req_list_partition_collections_38223
++inet_csk_listen_start_38233 inet_csk_listen_start 2 38233 NULL
++ceph_decode_16_38239 ceph_decode_16 0 38239 NULL
++_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
++snd_pcm_playback_rewind_38249 snd_pcm_playback_rewind 0-2 38249 NULL
++ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 NULL nohasharray
++mthca_alloc_icm_table_38268 mthca_alloc_icm_table 3-4 38268 &ieee80211_if_read_auto_open_plinks_38268
++xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
++xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
++zd_mac_rx_38296 zd_mac_rx 3 38296 NULL
++isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
++ida_simple_get_38326 ida_simple_get 2 38326 NULL
++ocfs2_rotate_rightmost_leaf_left_38330 ocfs2_rotate_rightmost_leaf_left 0 38330 NULL
++__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
++ocfs2_replace_extent_rec_38357 ocfs2_replace_extent_rec 0 38357 NULL
++btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
++sctp_sf_abort_violation_38380 sctp_sf_abort_violation 6 38380 NULL
++norm_maxh_38387 norm_maxh 0 38387 NULL
++dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
++ttm_put_pages_38411 ttm_put_pages 2 38411 NULL
++get_valid_node_allowed_38412 get_valid_node_allowed 1-0 38412 NULL
++ocfs2_which_cluster_group_38413 ocfs2_which_cluster_group 0-2 38413 NULL
++iwm_wdev_alloc_38415 iwm_wdev_alloc 1 38415 NULL
++ht_destroy_irq_38418 ht_destroy_irq 1 38418 NULL
++ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
++pcnet32_realloc_tx_ring_38428 pcnet32_realloc_tx_ring 3 38428 NULL
++pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
++var_name_strnsize_38447 var_name_strnsize 0-2 38447 NULL
++kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
++blk_end_bidi_request_38482 blk_end_bidi_request 4-3 38482 NULL
++cpu_to_mem_38501 cpu_to_mem 0 38501 NULL
++dev_names_read_38509 dev_names_read 3 38509 NULL
++iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
++sdhci_resume_host_38512 sdhci_resume_host 0 38512 NULL
++event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
++ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
++cpu_to_node_38561 cpu_to_node 0 38561 NULL
++si_domain_work_fn_38562 si_domain_work_fn 1-2 38562 NULL
++irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
++_ipw_read32_38565 _ipw_read32 0 38565 NULL
++snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
++sctp_tsnmap_num_dups_38578 sctp_tsnmap_num_dups 0 38578 NULL
++copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
++cosa_net_setup_rx_38594 cosa_net_setup_rx 2 38594 NULL
++reportdesc_callback_38603 reportdesc_callback 3 38603 NULL
++pep_indicate_38611 pep_indicate 5 38611 NULL
++__css_put_38613 __css_put 2 38613 NULL
++icn_writecmd_38629 icn_writecmd 2 38629 NULL
++write_enabled_file_bool_38630 write_enabled_file_bool 3 38630 NULL
++receive_extralen_38634 receive_extralen 0 38634 NULL
++audit_init_entry_38644 audit_init_entry 1 38644 NULL
++mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
++nfs_dns_resolve_name_38670 nfs_dns_resolve_name 2 38670 NULL
++snd_es1371_wait_src_ready_38673 snd_es1371_wait_src_ready 0 38673 NULL
++cfg80211_send_disassoc_38678 cfg80211_send_disassoc 3 38678 NULL
++iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
++validate_vid_hdr_38699 validate_vid_hdr 0 38699 NULL
++find_next_usable_block_38716 find_next_usable_block 3-1-0 38716 NULL
++v4l2_ctrl_new_38725 v4l2_ctrl_new 7 38725 NULL
++w83977af_sir_interrupt_38738 w83977af_sir_interrupt 0 38738 NULL
++iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
++snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
++gre_manip_pkt_38785 gre_manip_pkt 2 38785 NULL
++do_pci_enable_device_38802 do_pci_enable_device 0 38802 NULL
++err_decode_38804 err_decode 2 38804 NULL
++ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
++sys_select_38827 sys_select 1 38827 NULL
++b43_txhdr_size_38832 b43_txhdr_size 0 38832 NULL
++direct_entry_38836 direct_entry 3 38836 NULL
++compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
++read_nic_io_word_38853 read_nic_io_word 0 38853 NULL
++interfaces_38859 interfaces 2 38859 NULL
++pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
++sizeof_gpio_leds_priv_38882 sizeof_gpio_leds_priv 0-1 38882 NULL
++reserve_metadata_bytes_38886 reserve_metadata_bytes 0 38886 NULL
++dbgfs_state_38894 dbgfs_state 3 38894 NULL
++traverse_38897 traverse 0 38897 NULL
++__fswab16_38898 __fswab16 0 38898 NULL
++ext3_trim_all_free_38929 ext3_trim_all_free 2-4-3 38929 NULL
++usb_maxpacket_38977 usb_maxpacket 0 38977 NULL
++OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL
++lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL
++udf_new_block_38999 udf_new_block 4 38999 NULL
++t4vf_pktgl_to_skb_39005 t4vf_pktgl_to_skb 2 39005 NULL
++get_nodes_39012 get_nodes 3 39012 NULL
++disp_proc_write_39024 disp_proc_write 3 39024 NULL
++acpi_install_gpe_block_39031 acpi_install_gpe_block 4 39031 NULL
++_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
++do_write_kmem_39051 do_write_kmem 1-3-0 39051 NULL
++line6_midibuf_read_39067 line6_midibuf_read 0-3 39067 NULL
++ext4_init_block_bitmap_39071 ext4_init_block_bitmap 3 39071 NULL
++ReadHFC_39104 ReadHFC 0 39104 NULL
++tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
++leb_write_lock_39111 leb_write_lock 0 39111 NULL
++__kfifo_to_user_r_39123 __kfifo_to_user_r 5-3 39123 NULL
++ttm_mem_global_alloc_zone_39125 ttm_mem_global_alloc_zone 0 39125 NULL
++i915_gem_evict_something_39130 i915_gem_evict_something 0 39130 NULL
++ea_foreach_39133 ea_foreach 0 39133 NULL
++generic_permission_39150 generic_permission 0 39150 NULL
++alloc_ring_39151 alloc_ring 4-2 39151 NULL
++proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
++ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
++init_list_set_39188 init_list_set 3-2 39188 NULL
++ubi_more_update_data_39189 ubi_more_update_data 4-0 39189 NULL
++qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
++ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
++drm_order_39244 drm_order 0 39244 NULL
++snd_pcm_capture_forward_39248 snd_pcm_capture_forward 0-2 39248 NULL
++r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL
++__skb_cow_39254 __skb_cow 2 39254 NULL
++bitmap_set_bits_39272 bitmap_set_bits 3 39272 NULL
++expand_fdtable_39273 expand_fdtable 2 39273 NULL
++pohmelfs_setxattr_39281 pohmelfs_setxattr 4 39281 NULL
++mei_registration_cdev_39284 mei_registration_cdev 2 39284 NULL
++__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
++wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
++__cfg80211_send_deauth_39344 __cfg80211_send_deauth 3 39344 NULL
++ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
++vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
++user_power_read_39414 user_power_read 3 39414 NULL
++alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
++sys_semop_39457 sys_semop 3 39457 NULL
++setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
++btrfs_mksubvol_39479 btrfs_mksubvol 3 39479 NULL
++ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
++atomic64_read_unchecked_39505 atomic64_read_unchecked 0 39505 NULL
++wm8350_i2c_read_device_39542 wm8350_i2c_read_device 3 39542 NULL nohasharray
++int_proc_write_39542 int_proc_write 3 39542 &wm8350_i2c_read_device_39542
++pp_write_39554 pp_write 3 39554 NULL
++ol_dqblk_block_39558 ol_dqblk_block 2-0-3 39558 NULL
++datablob_format_39571 datablob_format 2 39571 NULL nohasharray
++ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
++handle_response_icmp_39574 handle_response_icmp 7 39574 NULL
++ext_depth_39607 ext_depth 0 39607 NULL
++sdio_readb_39618 sdio_readb 0 39618 NULL
++fm_send_cmd_39639 fm_send_cmd 5 39639 NULL
++snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
++prism2_info_hostscanresults_39657 prism2_info_hostscanresults 3 39657 NULL
++pfkey_sockaddr_size_39661 pfkey_sockaddr_size 0 39661 NULL
++kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
++v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
++sd_completed_bytes_39705 sd_completed_bytes 0 39705 NULL
++ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
++tcf_csum_ipv4_tcp_39713 tcf_csum_ipv4_tcp 4 39713 NULL
++mlx4_ib_resize_cq_39744 mlx4_ib_resize_cq 2 39744 NULL
++tcp_write_xmit_39755 tcp_write_xmit 2 39755 NULL
++usb_hcd_map_urb_for_dma_39774 usb_hcd_map_urb_for_dma 0 39774 NULL
++ocfs2_pages_per_cluster_39790 ocfs2_pages_per_cluster 0 39790 NULL
++security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
++snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
++sys_migrate_pages_39825 sys_migrate_pages 2 39825 NULL
++get_priv_size_39828 get_priv_size 0-1 39828 NULL
++beiscsi_process_async_pdu_39834 beiscsi_process_async_pdu 7 39834 NULL
++pkt_add_39897 pkt_add 3 39897 NULL
++read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
++gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
++dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
++dma_to_mm_pfn_39916 dma_to_mm_pfn 0-1 39916 NULL
++aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
++exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
++oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
++__spi_async_39932 __spi_async 0 39932 NULL
++iwl_legacy_dbgfs_missed_beacon_read_39939 iwl_legacy_dbgfs_missed_beacon_read 3 39939 NULL
++fwnet_pd_new_39947 fwnet_pd_new 4 39947 NULL
++tty_prepare_flip_string_39955 tty_prepare_flip_string 3-0 39955 NULL
++dma_push_rx_39973 dma_push_rx 2 39973 NULL
++broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
++mthca_array_init_39987 mthca_array_init 2 39987 NULL
++fw_device_op_read_39990 fw_device_op_read 3 39990 NULL
++i2c_readn_40001 i2c_readn 0 40001 NULL
++xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
++ivtvfb_write_40023 ivtvfb_write 3 40023 NULL
++ea_foreach_i_40028 ea_foreach_i 0 40028 NULL
++datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
++ocfs2_claim_clusters_40050 ocfs2_claim_clusters 0 40050 NULL
++atomic_xchg_40070 atomic_xchg 0 40070 NULL
++snd_pcm_sw_params_user_40095 snd_pcm_sw_params_user 0 40095 NULL
++netlink_broadcast_filtered_40105 netlink_broadcast_filtered 0 40105 NULL
++sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
++iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
++pt_write_40159 pt_write 3 40159 NULL
++scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
++ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL nohasharray
++devnode_find_40199 devnode_find 3-2 40199 &ipr_alloc_ucode_buffer_40199
++allocate_probes_40204 allocate_probes 1 40204 NULL
++au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
++compress_file_range_40225 compress_file_range 3-4 40225 NULL
++osst_read_40237 osst_read 3 40237 NULL
++brcmf_sdioh_request_buffer_40239 brcmf_sdioh_request_buffer 7 40239 NULL
++ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL
++rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL nohasharray
++fuse_update_attributes_40262 fuse_update_attributes 0 40262 &rs_sta_dbgfs_scale_table_read_40262
++ext2_fiemap_40271 ext2_fiemap 4 40271 NULL
++reqsk_queue_alloc_40272 reqsk_queue_alloc 2 40272 NULL
++rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
++ubi_io_write_data_40305 ubi_io_write_data 0 40305 NULL
++nfs_file_llseek_40306 nfs_file_llseek 2 40306 NULL
++ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
++bat_ogm_queue_add_40337 bat_ogm_queue_add 3 40337 NULL
++mmio_read_40348 mmio_read 4 40348 NULL
++ocfs2_release_clusters_40355 ocfs2_release_clusters 0-4 40355 NULL
++event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
++ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 3-2 40365 NULL
++get_chars_40373 get_chars 3 40373 NULL
++usb_gadget_config_buf_40374 usb_gadget_config_buf 0 40374 NULL
++fwnet_incoming_packet_40380 fwnet_incoming_packet 3 40380 NULL
++brcmf_sdbrcm_get_image_40397 brcmf_sdbrcm_get_image 0-2 40397 NULL
++fb_prepare_extra_logos_40429 fb_prepare_extra_logos 0-2 40429 NULL
++atmel_rmem16_40450 atmel_rmem16 0 40450 NULL
++tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
++zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
++afs_fs_store_data_40484 afs_fs_store_data 3-4-5-6 40484 NULL
++devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
++tty_write_room_40495 tty_write_room 0 40495 NULL
++sg_phys_40507 sg_phys 0 40507 NULL
++__ethtool_get_sset_count_40511 __ethtool_get_sset_count 0 40511 NULL
++TSS_checkhmac2_40520 TSS_checkhmac2 7-5 40520 NULL
++i915_gem_execbuffer_relocate_object_slow_40546 i915_gem_execbuffer_relocate_object_slow 0 40546 NULL
++ima_write_policy_40548 ima_write_policy 3 40548 NULL
++esp_alloc_tmp_40558 esp_alloc_tmp 2-3 40558 NULL
++ufs_inode_getfrag_40560 ufs_inode_getfrag 2-4 40560 NULL
++arch_setup_hpet_msi_40584 arch_setup_hpet_msi 1 40584 NULL
++b1_get_byte_40597 b1_get_byte 0 40597 NULL
++skge_rx_get_40598 skge_rx_get 3 40598 NULL
++get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
++sctp_manip_pkt_40620 sctp_manip_pkt 2 40620 NULL
++pid_nr_ns_40654 pid_nr_ns 0 40654 NULL
++fops_read_40672 fops_read 3 40672 NULL
++ext4_mark_inode_dirty_40673 ext4_mark_inode_dirty 0 40673 NULL
++videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 4-3 40678 NULL
++pci_enable_resources_40680 pci_enable_resources 0 40680 NULL
++regulator_enable_40689 regulator_enable 0 40689 NULL
++__seq_open_private_40715 __seq_open_private 3 40715 NULL
++xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL nohasharray
++find_next_zero_bit_le_40744 find_next_zero_bit_le 2-3-0 40744 &xfs_iext_remove_direct_40744
++security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
++card_send_command_40757 card_send_command 3 40757 NULL
++ad1889_readl_40765 ad1889_readl 0 40765 NULL
++pg_write_40766 pg_write 3 40766 NULL
++ecryptfs_readlink_40775 ecryptfs_readlink 3 40775 NULL nohasharray
++show_list_40775 show_list 3-0 40775 &ecryptfs_readlink_40775
++kfifo_out_copy_r_40784 kfifo_out_copy_r 3 40784 NULL
++bitmap_weight_40791 bitmap_weight 0-2 40791 NULL
++idr_get_new_40797 idr_get_new 0 40797 NULL
++netdev_alloc_skb_ip_align_40811 netdev_alloc_skb_ip_align 2 40811 NULL nohasharray
++paranoid_check_not_bad_40811 paranoid_check_not_bad 0 40811 &netdev_alloc_skb_ip_align_40811
++nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
++nilfs_mdt_init_40849 nilfs_mdt_init 3 40849 NULL
++__shared_list_add_40850 __shared_list_add 0 40850 NULL
++ocfs2_zero_partial_clusters_40856 ocfs2_zero_partial_clusters 2-3 40856 NULL
++v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
++iwch_alloc_pbl_40885 iwch_alloc_pbl 2 40885 NULL
++read_file_queue_40895 read_file_queue 3 40895 NULL
++waiters_read_40902 waiters_read 3 40902 NULL
++isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
++iwl_legacy_dbgfs_disable_ht40_read_40910 iwl_legacy_dbgfs_disable_ht40_read 3 40910 NULL
++gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL
++vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
++__kfifo_init_40918 __kfifo_init 4-3 40918 NULL
++iterate_extent_inodes_40923 iterate_extent_inodes 0 40923 NULL
++btrfs_setsize_40931 btrfs_setsize 2 40931 NULL
++snd_vx_create_40948 snd_vx_create 4 40948 NULL
++tcp_skb_mss_40964 tcp_skb_mss 0 40964 NULL
++rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
++econet_recvmsg_40978 econet_recvmsg 4 40978 NULL
++insert_old_idx_40987 insert_old_idx 0 40987 NULL
++mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
++_req_append_segment_41031 _req_append_segment 2 41031 NULL
++mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
++ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
++BcmFlash2xBulkWrite_41054 BcmFlash2xBulkWrite 0 41054 NULL
++vfs_listxattr_41062 vfs_listxattr 0 41062 NULL nohasharray
++beacon_filtering_write_41062 beacon_filtering_write 3 41062 &vfs_listxattr_41062
++cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
++roccat_read_41093 roccat_read 3 41093 NULL
++provide_user_output_41105 provide_user_output 3 41105 NULL
++f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
++ocfs2_extend_trans_41116 ocfs2_extend_trans 0 41116 NULL nohasharray
++oom_adjust_write_41116 oom_adjust_write 3 41116 &ocfs2_extend_trans_41116
++dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
++ol_quota_chunk_block_41177 ol_quota_chunk_block 0-2 41177 NULL
++request_irq_41192 request_irq 0 41192 NULL
++compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
++dfs_file_write_41196 dfs_file_write 3 41196 NULL
++UpdateRegs_41200 UpdateRegs 0 41200 NULL nohasharray
++xfs_readdir_41200 xfs_readdir 3 41200 &UpdateRegs_41200
++ocfs2_read_quota_block_41207 ocfs2_read_quota_block 2 41207 NULL
++ceph_calc_raw_layout_41212 ceph_calc_raw_layout 4 41212 NULL
++tun_alloc_skb_41216 tun_alloc_skb 2-4-3 41216 NULL
++nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
++hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
++create_dir_41256 create_dir 0 41256 NULL
++erst_read_41260 erst_read 0 41260 NULL
++alloc_context_41283 alloc_context 1 41283 NULL
++ewma_init_41305 ewma_init 2-3 41305 NULL
++objio_alloc_io_state_41316 objio_alloc_io_state 7 41316 NULL
++create_bounce_buffer_41330 create_bounce_buffer 3 41330 NULL
++user_update_41332 user_update 3 41332 NULL
++twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
++irq_expand_nr_irqs_41351 irq_expand_nr_irqs 0 41351 NULL
++cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
++jbd2_alloc_41359 jbd2_alloc 1 41359 NULL
++kmp_init_41373 kmp_init 2 41373 NULL
++tifm_add_adapter_41390 tifm_add_adapter 0 41390 NULL
++isr_commands_read_41398 isr_commands_read 3 41398 NULL
++sys_flistxattr_41407 sys_flistxattr 3 41407 NULL
++xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
++isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
++lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
++ext4_trim_extent_41436 ext4_trim_extent 4 41436 NULL
++ntfs_file_buffered_write_41442 ntfs_file_buffered_write 6-4 41442 NULL
++pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
++layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
++snd_pcm_status_41472 snd_pcm_status 0 41472 NULL
++wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
++hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
++xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
++ldisc_receive_41516 ldisc_receive 4 41516 NULL
++rng_dev_read_41581 rng_dev_read 3 41581 NULL
++map_offset_to_paddr_41592 map_offset_to_paddr 0-1 41592 NULL
++read_file_rx_chainmask_41605 read_file_rx_chainmask 3 41605 NULL
++vga_io_r_41609 vga_io_r 0 41609 NULL
++tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL
++lbs_bcnmiss_write_41613 lbs_bcnmiss_write 3 41613 NULL nohasharray
++usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 &lbs_bcnmiss_write_41613
++lis3l02dq_read_accel_from_buffer_41615 lis3l02dq_read_accel_from_buffer 2 41615 NULL
++mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
++get_std_timing_41654 get_std_timing 0 41654 NULL
++start_graph_tracing_41656 start_graph_tracing 0 41656 NULL nohasharray
++squashfs_cache_init_41656 squashfs_cache_init 2 41656 &start_graph_tracing_41656
++ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
++uapsd_max_sp_len_write_41683 uapsd_max_sp_len_write 3 41683 NULL
++apei_exec_for_each_entry_41717 apei_exec_for_each_entry 0 41717 NULL
++sys_pwritev_41722 sys_pwritev 3 41722 NULL
++hc_gpa_41744 hc_gpa 0-2-3 41744 NULL
++fillonedir_41746 fillonedir 3 41746 NULL
++get_slab_41770 get_slab 1 41770 NULL
++ocfs2_dx_dir_rebalance_41793 ocfs2_dx_dir_rebalance 7 41793 NULL
++bat_socket_read_41813 bat_socket_read 3 41813 NULL
++sco_send_frame_41815 sco_send_frame 3 41815 NULL
++do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
++tcp_packets_in_flight_41853 tcp_packets_in_flight 0 41853 NULL
++keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
++spin_time_start_41857 spin_time_start 0 41857 NULL
++pci_map_single_41869 pci_map_single 0 41869 NULL
++usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL
++get_packet_41914 get_packet 3 41914 NULL
++get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
++ceph_get_direct_page_vector_41917 ceph_get_direct_page_vector 2 41917 NULL
++find_ge_pid_41918 find_ge_pid 1 41918 NULL
++build_inv_iotlb_pages_41922 build_inv_iotlb_pages 4-5 41922 NULL
++ReadConfigFileStructure_41929 ReadConfigFileStructure 0 41929 NULL
++nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
++iscsi_iser_recv_41948 iscsi_iser_recv 4 41948 NULL
++ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
++efx_tx_queue_insert_41955 efx_tx_queue_insert 2 41955 NULL
++portnames_read_41958 portnames_read 3 41958 NULL
++dst_mtu_41969 dst_mtu 0 41969 NULL
++cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
++ubi_io_is_bad_41983 ubi_io_is_bad 0 41983 NULL
++_get_slice_41991 _get_slice 0 41991 NULL
++em28xx_write_regs_41996 em28xx_write_regs 0 41996 NULL
++flakey_status_42000 flakey_status 4 42000 NULL
++pool_allocate_42012 pool_allocate 3 42012 NULL
++spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
++rs_sta_dbgfs_scale_table_write_42017 rs_sta_dbgfs_scale_table_write 3 42017 NULL
++ensure_wear_leveling_42029 ensure_wear_leveling 0 42029 NULL
++acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
++__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
++irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
++jffs2_do_link_42048 jffs2_do_link 6 42048 NULL
++InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
++brcmf_sdbrcm_downloadvars_42064 brcmf_sdbrcm_downloadvars 3 42064 NULL
++scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
++sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
++submit_inquiry_42108 submit_inquiry 3 42108 NULL
++sysfs_read_file_42113 sysfs_read_file 3 42113 NULL
++store_gps_42118 store_gps 4 42118 NULL
++ext4_do_update_inode_42127 ext4_do_update_inode 0 42127 NULL
++tipc_createport_raw_42129 tipc_createport_raw 4 42129 NULL
++Read_hfc16_stable_42131 Read_hfc16_stable 0 42131 NULL
++ttm_agp_populate_42144 ttm_agp_populate 2 42144 NULL
++v9fs_alloc_rdir_buf_42150 v9fs_alloc_rdir_buf 2 42150 NULL
++mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
++read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
++oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
++write_file_beacon_42185 write_file_beacon 3 42185 NULL
++get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
++xfs_rtfree_range_42244 xfs_rtfree_range 4-3 42244 NULL
++btmrvl_hsmode_write_42252 btmrvl_hsmode_write 3 42252 NULL
++find_last_bit_42260 find_last_bit 2 42260 NULL
++ctnetlink_proto_size_42270 ctnetlink_proto_size 0 42270 NULL
++__pcpu_size_to_slot_42271 __pcpu_size_to_slot 0 42271 NULL
++snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
++rtnl_link_get_af_size_42296 rtnl_link_get_af_size 0 42296 NULL
++sel_read_perm_42302 sel_read_perm 3 42302 NULL nohasharray
++crypt_status_42302 crypt_status 4 42302 &sel_read_perm_42302
++sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
++ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
++tracing_ctrl_write_42324 tracing_ctrl_write 3 42324 NULL nohasharray
++hysdn_conf_read_42324 hysdn_conf_read 3 42324 &tracing_ctrl_write_42324 nohasharray
++lpfc_config_msi_42324 lpfc_config_msi 0 42324 &hysdn_conf_read_42324
++tcp_sync_mss_42330 tcp_sync_mss 0-2 42330 NULL
++ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
++msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
++brn_proc_write_42407 brn_proc_write 3 42407 NULL
++krng_get_random_42420 krng_get_random 3 42420 NULL
++gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
++key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
++snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
++tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
++ext3_valid_block_bitmap_42459 ext3_valid_block_bitmap 3 42459 NULL
++neigh_nlmsg_size_42464 neigh_nlmsg_size 0 42464 NULL
++kernel_recvmsg_42482 kernel_recvmsg 0 42482 NULL
++follow_hugetlb_page_42486 follow_hugetlb_page 0-7 42486 NULL
++brcmf_sdbrcm_bus_txctl_42492 brcmf_sdbrcm_bus_txctl 3 42492 NULL
++jbd2_log_wait_commit_42519 jbd2_log_wait_commit 0 42519 NULL
++kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
++smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
++snd_emux_create_port_42533 snd_emux_create_port 3 42533 NULL
++__register_ftrace_function_42543 __register_ftrace_function 0 42543 NULL
++dbAllocNear_42546 dbAllocNear 0 42546 NULL
++udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
++iwl_print_event_log_42566 iwl_print_event_log 7-5-0 42566 NULL
++ocfs2_reserve_suballoc_bits_42569 ocfs2_reserve_suballoc_bits 0 42569 NULL
++xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
++oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
++map_state_42602 map_state 1 42602 NULL nohasharray
++__pskb_pull_42602 __pskb_pull 2 42602 &map_state_42602
++sys_move_pages_42626 sys_move_pages 2 42626 NULL
++ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL
++scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
++br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
++parport_pc_compat_write_block_pio_42644 parport_pc_compat_write_block_pio 3 42644 NULL
++_regmap_raw_write_42652 _regmap_raw_write 4 42652 NULL
++ocfs2_search_chain_42655 ocfs2_search_chain 0 42655 NULL
++l2tp_xmit_skb_42672 l2tp_xmit_skb 3 42672 NULL
++request_key_and_link_42693 request_key_and_link 4 42693 NULL
++vb2_read_42703 vb2_read 3 42703 NULL
++__ocfs2_decrease_refcount_42717 __ocfs2_decrease_refcount 0-5-4 42717 NULL
++read_status_42722 read_status 0 42722 NULL
++dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
++set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
++ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
++xen_bind_pirq_gsi_to_irq_42750 xen_bind_pirq_gsi_to_irq 1 42750 NULL
++dpm_sysfs_add_42756 dpm_sysfs_add 0 42756 NULL
++qla2x00_get_ctx_bsg_sp_42768 qla2x00_get_ctx_bsg_sp 3 42768 NULL
++x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
++snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
++cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL
++koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
++ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0-2 42796 NULL
++fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
++drm_ioctl_42813 drm_ioctl 2 42813 NULL
++iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
++set_arg_42824 set_arg 3 42824 NULL
++ocfs2_desc_bitmap_to_cluster_off_42831 ocfs2_desc_bitmap_to_cluster_off 2 42831 NULL
++prandom_u32_42853 prandom_u32 0 42853 NULL
++ntfs_mapping_pairs_build_42859 ntfs_mapping_pairs_build 0 42859 NULL
++ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
++pskb_expand_head_42881 pskb_expand_head 3-2 42881 NULL
++tipc_port_recv_sections_42890 tipc_port_recv_sections 4 42890 NULL
++xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
++SendTxCommandPacket_42901 SendTxCommandPacket 3 42901 NULL
++hd_end_request_42904 hd_end_request 2 42904 NULL
++sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
++get_unmapped_area_42944 get_unmapped_area 0 42944 NULL
++vx_reset_chk_42946 vx_reset_chk 0 42946 NULL
++sys_sethostname_42962 sys_sethostname 2 42962 NULL
++ixj_enhanced_read_42980 ixj_enhanced_read 3 42980 NULL
++compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL nohasharray
++pfkey_xfrm_policy2sec_ctx_size_42981 pfkey_xfrm_policy2sec_ctx_size 0 42981 &compat_udpv6_setsockopt_42981
++nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL
++mlx4_qp_reserve_range_43000 mlx4_qp_reserve_range 2-3 43000 NULL
++isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
++add_bytes_to_bitmap_43026 add_bytes_to_bitmap 3-0-4 43026 NULL
++store_lssw_43035 store_lssw 4 43035 NULL nohasharray
++wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 &store_lssw_43035
++uapsd_queues_write_43040 uapsd_queues_write 3 43040 NULL
++sep_prepare_input_output_dma_table_in_dcb_43064 sep_prepare_input_output_dma_table_in_dcb 4-5-3-2 43064 NULL
++_xfer_secondary_pool_43089 _xfer_secondary_pool 2 43089 NULL
++ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
++e1000_request_msix_43134 e1000_request_msix 0 43134 NULL
++usb_string_sub_43164 usb_string_sub 0 43164 NULL
++ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
++teiup_create_43201 teiup_create 3 43201 NULL
++uio_write_43202 uio_write 3 43202 NULL
++iso_callback_43208 iso_callback 3 43208 NULL
++atomic_long_add_return_43217 atomic_long_add_return 1-0 43217 NULL
++vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
++store_wwan_43264 store_wwan 4 43264 NULL
++ide_end_rq_43269 ide_end_rq 4 43269 NULL
++parport_pc_ecp_write_block_pio_43278 parport_pc_ecp_write_block_pio 3 43278 NULL nohasharray
++evtchn_write_43278 evtchn_write 3 43278 &parport_pc_ecp_write_block_pio_43278
++filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
++alloc_subdevices_43300 alloc_subdevices 2 43300 NULL
++store_ledd_43312 store_ledd 4 43312 NULL
++get_nr_irqs_gsi_43315 get_nr_irqs_gsi 0 43315 NULL
++__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
++svc_pool_map_get_43386 svc_pool_map_get 0 43386 NULL
++xenfb_write_43412 xenfb_write 3 43412 NULL
++__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL nohasharray
++msi_capability_init_43423 msi_capability_init 0 43423 &__alloc_bootmem_low_43423
++usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
++ocfs2_rotate_tree_left_43442 ocfs2_rotate_tree_left 0 43442 NULL
++usb_string_43443 usb_string 0 43443 NULL nohasharray
++usemap_size_43443 usemap_size 0-2-1 43443 &usb_string_43443
++__data_list_add_eb_43472 __data_list_add_eb 0 43472 NULL
++alloc_new_reservation_43480 alloc_new_reservation 4-0-2 43480 NULL
++nf_nat_ftp_fmt_cmd_43495 nf_nat_ftp_fmt_cmd 0 43495 NULL
++ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL
++do_readlink_43518 do_readlink 2 43518 NULL
++dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
++cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
++ufs_alloccg_block_43540 ufs_alloccg_block 3-0 43540 NULL
++request_resource_43548 request_resource 0 43548 NULL
++ath_rx_init_43564 ath_rx_init 2 43564 NULL nohasharray
++_send_control_msg_43564 _send_control_msg 6 43564 &ath_rx_init_43564
++_fc_frame_alloc_43568 _fc_frame_alloc 1 43568 NULL
++rpc_malloc_43573 rpc_malloc 2 43573 NULL
++handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL
++lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
++proc_read_43614 proc_read 3 43614 NULL
++prison_create_43623 prison_create 1 43623 NULL
++random_write_43656 random_write 3 43656 NULL
++bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
++ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL
++dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4 43679 NULL
++calgary_map_page_43686 calgary_map_page 4 43686 NULL
++hidp_send_ctrl_message_43702 hidp_send_ctrl_message 4 43702 NULL
++user_confirm_reply_43708 user_confirm_reply 4 43708 NULL
++wait_for_completion_interruptible_43723 wait_for_completion_interruptible 0 43723 NULL
++drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
++reset_card_proc_43731 reset_card_proc 0 43731 NULL
++snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
++ocfs2_replace_clusters_43733 ocfs2_replace_clusters 0-5 43733 NULL
++fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736 NULL
++osdv1_attr_list_elem_size_43747 osdv1_attr_list_elem_size 0-1 43747 NULL
++gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
++sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
++ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
++byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787 nohasharray
++xfs_highbit64_43787 xfs_highbit64 1 43787 &byte_pos_43787
++btrfs_copy_from_user_43806 btrfs_copy_from_user 3-1-0 43806 NULL
++store_cpufv_disabled_43809 store_cpufv_disabled 4 43809 NULL
++hci_send_cmd_43810 hci_send_cmd 3 43810 NULL
++ext4_read_block_bitmap_43814 ext4_read_block_bitmap 2 43814 NULL
++ext4_split_extent_43818 ext4_split_extent 0 43818 NULL
++i915_gem_execbuffer_relocate_entry_43822 i915_gem_execbuffer_relocate_entry 0 43822 NULL
++ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
++ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
++atomic64_cmpxchg_unchecked_43840 atomic64_cmpxchg_unchecked 0 43840 NULL
++p54_download_eeprom_43842 p54_download_eeprom 4 43842 NULL
++read_flush_43851 read_flush 3 43851 NULL
++ocfs2_block_group_find_clear_bits_43874 ocfs2_block_group_find_clear_bits 4 43874 NULL
++idmap_update_entry_43885 idmap_update_entry 3 43885 NULL
++prism2_sta_send_mgmt_43916 prism2_sta_send_mgmt 5 43916 NULL
++xen_register_gsi_43946 xen_register_gsi 2-1 43946 NULL
++stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
++i915_ring_idle_43969 i915_ring_idle 0 43969 NULL
++__get_required_blob_size_43980 __get_required_blob_size 0-3-2 43980 NULL
++nla_reserve_43984 nla_reserve 3 43984 NULL
++scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
++kvm_read_guest_virt_43992 kvm_read_guest_virt 4-2 43992 &scsi_command_size_43992 nohasharray
++bcm_recvmsg_43992 bcm_recvmsg 4 43992 &kvm_read_guest_virt_43992
++write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
++btrfs_prev_leaf_44083 btrfs_prev_leaf 0 44083 NULL
++xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
++skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL
++tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
++em28xx_read_reg_req_44130 em28xx_read_reg_req 0 44130 NULL
++scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
++ubifs_find_dirty_idx_leb_44169 ubifs_find_dirty_idx_leb 0 44169 NULL
++ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
++readreg_ipac_44186 readreg_ipac 0 44186 NULL
++handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
++IO_APIC_get_PCI_irq_vector_44198 IO_APIC_get_PCI_irq_vector 0 44198 NULL
++claim_ptd_buffers_44213 claim_ptd_buffers 3 44213 NULL
++srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
++ioapic_register_intr_44238 ioapic_register_intr 1 44238 NULL
++scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
++enlarge_skb_44248 enlarge_skb 2 44248 NULL
++apei_resources_sub_44252 apei_resources_sub 0 44252 NULL
++device_create_file_44285 device_create_file 0 44285 NULL
++ufs_clusteracct_44293 ufs_clusteracct 3 44293 NULL
++ocfs2_zero_range_for_truncate_44294 ocfs2_zero_range_for_truncate 3 44294 NULL
++iwl3945_statistics_flag_44310 iwl3945_statistics_flag 3-0 44310 NULL
++bitmap_scnprintf_44318 bitmap_scnprintf 2-0 44318 NULL
++dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
++rs_init_44327 rs_init 1 44327 NULL
++count_ah_combs_44334 count_ah_combs 0 44334 NULL
++blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 NULL
++ipx_recvmsg_44366 ipx_recvmsg 4 44366 NULL
++rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
++aoedev_flush_44398 aoedev_flush 2 44398 NULL
++strlcpy_44400 strlcpy 3 44400 NULL
++drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
++osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
++write_file_debug_44476 write_file_debug 3 44476 NULL
++btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
++sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
++ath6kl_tm_rx_report_44494 ath6kl_tm_rx_report 3 44494 NULL
++ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
++xfrm_sa_len_44502 xfrm_sa_len 0 44502 NULL
++ac_register_board_44504 ac_register_board 3 44504 NULL
++security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
++iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
++spidev_write_44510 spidev_write 3 44510 NULL
++sys_msgsnd_44537 sys_msgsnd 3 44537 NULL nohasharray
++comm_write_44537 comm_write 3 44537 &sys_msgsnd_44537
++snd_pcm_drop_44542 snd_pcm_drop 0 44542 NULL
++dbg_chk_pnode_44555 dbg_chk_pnode 0 44555 NULL
++sysfs_add_one_44629 sysfs_add_one 0 44629 NULL
++cfpkt_add_body_44630 cfpkt_add_body 3 44630 NULL
++ext2_new_block_44645 ext2_new_block 2-0 44645 NULL
++alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
++sysfs_create_link_44685 sysfs_create_link 0 44685 NULL
++ts_read_44687 ts_read 3 44687 NULL
++i915_wait_request_44703 i915_wait_request 0 44703 NULL
++__ocfs2_rotate_tree_left_44705 __ocfs2_rotate_tree_left 0 44705 NULL
++__generic_block_fiemap_44713 __generic_block_fiemap 4 44713 NULL
++mempool_create_node_44715 mempool_create_node 1 44715 NULL
++_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
++clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
++fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
++key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
++tnode_new_44757 tnode_new 3 44757 NULL nohasharray
++pty_write_44757 pty_write 3 44757 &tnode_new_44757
++__videobuf_copy_stream_44769 __videobuf_copy_stream 4-0 44769 NULL
++sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
++rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
++x25_pacsize_to_bytes_44812 x25_pacsize_to_bytes 0 44812 NULL
++sisusb_write_44834 sisusb_write 3 44834 NULL
++nl80211_send_unprot_disassoc_44846 nl80211_send_unprot_disassoc 4 44846 NULL
++cubic_root_44848 cubic_root 1 44848 NULL
++qib_verbs_send_dma_44850 qib_verbs_send_dma 6 44850 NULL
++init_rs_44873 init_rs 1 44873 NULL
++skb_availroom_44883 skb_availroom 0 44883 NULL
++nf_bridge_encap_header_len_44890 nf_bridge_encap_header_len 0 44890 NULL
++ocfs2_wait_for_mask_44893 ocfs2_wait_for_mask 0 44893 NULL
++do_tty_write_44896 do_tty_write 5 44896 NULL
++_snd_pcm_hw_param_last_44947 _snd_pcm_hw_param_last 0 44947 NULL
++tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
++ftdi_process_packet_45005 ftdi_process_packet 5 45005 NULL
++i915_gem_do_execbuffer_45012 i915_gem_do_execbuffer 0 45012 NULL
++read_block_bitmap_45021 read_block_bitmap 2 45021 NULL nohasharray
++ptrace_writedata_45021 ptrace_writedata 4 45021 &read_block_bitmap_45021
++vhci_get_user_45039 vhci_get_user 3 45039 NULL
++sel_write_user_45060 sel_write_user 3 45060 NULL
++snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
++ata_tdev_add_45079 ata_tdev_add 0 45079 NULL
++orig_hash_del_if_45080 orig_hash_del_if 2 45080 NULL
++usbdev_read_45114 usbdev_read 3 45114 NULL
++send_to_tty_45141 send_to_tty 3 45141 NULL
++crypto_aead_blocksize_45148 crypto_aead_blocksize 0 45148 NULL
++gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
++ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 NULL nohasharray
++device_write_45156 device_write 3 45156 &ocfs2_remove_inode_range_45156
++ocfs2_dq_frozen_trigger_45159 ocfs2_dq_frozen_trigger 4 45159 NULL
++tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
++sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
++sctp_pack_cookie_45190 sctp_pack_cookie 6 45190 NULL nohasharray
++snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 &sctp_pack_cookie_45190
++num_clusters_in_group_45194 num_clusters_in_group 2 45194 NULL
++add_child_45201 add_child 4 45201 NULL
++iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
++spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
++ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
++event_enable_write_45238 event_enable_write 3 45238 NULL
++gfs2_fiemap_45282 gfs2_fiemap 4 45282 NULL
++snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
++e1000_tx_map_45309 e1000_tx_map 5 45309 NULL
++copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
++lane2_associate_req_45398 lane2_associate_req 4 45398 NULL
++__data_list_add_45403 __data_list_add 0 45403 NULL
++keymap_store_45406 keymap_store 4 45406 NULL
++ath6kl_wmi_send_probe_response_cmd_45422 ath6kl_wmi_send_probe_response_cmd 5 45422 NULL
++tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
++do_mmap_pgoff_45441 do_mmap_pgoff 0 45441 NULL
++__node_remap_45458 __node_remap 4 45458 NULL
++rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
++tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
++snd_pcm_hwsync_45479 snd_pcm_hwsync 0 45479 NULL
++rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
++sys_lgetxattr_45531 sys_lgetxattr 4 45531 NULL
++cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
++copy_macs_45534 copy_macs 4 45534 NULL
++nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
++v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
++cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
++ext3_group_first_block_no_45555 ext3_group_first_block_no 0-2 45555 NULL
++stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
++posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
++venus_rmdir_45564 venus_rmdir 4 45564 NULL
++rdma_set_ib_paths_45592 rdma_set_ib_paths 3 45592 NULL
++hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
++audit_log_n_hex_45617 audit_log_n_hex 3 45617 NULL
++i915_gem_evict_everything_45629 i915_gem_evict_everything 0 45629 NULL
++ebitmap_next_positive_45651 ebitmap_next_positive 3-0 45651 NULL
++ext4_reserve_inode_write_45654 ext4_reserve_inode_write 0 45654 NULL
++dma_map_cont_45668 dma_map_cont 5 45668 NULL
++compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
++dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
++smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
++ip_nat_sip_expect_45693 ip_nat_sip_expect 7 45693 NULL
++unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
++bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699 nohasharray
++__spin_time_accum_45699 __spin_time_accum 1 45699 &bscnl_emit_45699
++sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL
++dvb_ca_en50221_init_45718 dvb_ca_en50221_init 4 45718 NULL
++snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL
++v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 NULL
++lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
++i915_gem_object_flush_gpu_write_domain_45755 i915_gem_object_flush_gpu_write_domain 0 45755 NULL
++alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
++dma_alloc_coherent_mask_45787 dma_alloc_coherent_mask 0 45787 NULL
++nfs_idmap_request_key_45791 nfs_idmap_request_key 2 45791 NULL
++raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
++rds_tcp_inc_copy_to_user_45804 rds_tcp_inc_copy_to_user 3 45804 NULL
++lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
++pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
++fm_v4l2_init_video_device_45821 fm_v4l2_init_video_device 2 45821 NULL
++r600_texture_size_45823 r600_texture_size 6-5-4 45823 NULL
++ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL
++amthi_read_45831 amthi_read 4 45831 NULL
++audit_make_reply_45835 audit_make_reply 7 45835 NULL
++__ip_select_ident_45851 __ip_select_ident 3 45851 NULL
++smp_build_cmd_45853 smp_build_cmd 3 45853 NULL
++isdn_write_45863 isdn_write 3 45863 NULL
++rbd_get_num_segments_45864 rbd_get_num_segments 0-2-3 45864 NULL
++unpack_orig_pfns_45867 unpack_orig_pfns 0 45867 NULL
++tpm_config_in_45880 tpm_config_in 0 45880 NULL
++get_rdac_req_45882 get_rdac_req 3 45882 NULL
++ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
++__svc_create_45903 __svc_create 3 45903 NULL
++dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
++alloc_mr_45935 alloc_mr 1 45935 NULL
++cma_user_data_offset_45954 cma_user_data_offset 0 45954 NULL
++ndisc_opt_addr_space_45959 ndisc_opt_addr_space 0 45959 NULL
++rb_simple_read_45972 rb_simple_read 3 45972 NULL
++ezusb_writememory_45976 ezusb_writememory 4 45976 NULL
++ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
++sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
++get_free_entries_46030 get_free_entries 1 46030 NULL
++__access_remote_vm_46031 __access_remote_vm 0 46031 NULL
++snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
++acpi_register_gsi_xen_hvm_46052 acpi_register_gsi_xen_hvm 2 46052 NULL
++run_card_proc_46057 run_card_proc 0 46057 NULL
++line6_midibuf_bytes_used_46059 line6_midibuf_bytes_used 0 46059 NULL
++__ocfs2_move_extent_46060 __ocfs2_move_extent 0-3-4-6-5 46060 NULL nohasharray
++dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
++slhc_toss_46066 slhc_toss 0 46066 NULL
++mgmt_event_46069 mgmt_event 4 46069 NULL
++xfrm_sadinfo_msgsize_46073 xfrm_sadinfo_msgsize 0 46073 NULL
++sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
++ata_host_alloc_46094 ata_host_alloc 2 46094 NULL
++mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
++ddp_clear_map_46152 ddp_clear_map 4 46152 NULL
++__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
++qlcnic_alloc_msix_entries_46160 qlcnic_alloc_msix_entries 2 46160 NULL nohasharray
++alloc_iova_46160 alloc_iova 2 46160 &qlcnic_alloc_msix_entries_46160
++vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
++i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
++tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
++dsp_write_46218 dsp_write 2 46218 NULL
++tx_abort_46232 tx_abort 0 46232 NULL
++xen_setup_msi_irqs_46245 xen_setup_msi_irqs 2 46245 NULL
++__le64_to_cpup_46257 __le64_to_cpup 0 46257 NULL
++ReadReg_46277 ReadReg 0 46277 NULL
++pep_alloc_skb_46303 pep_alloc_skb 3 46303 NULL
++pvclock_clocksource_read_46308 pvclock_clocksource_read 0 46308 NULL
++sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL
++__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL
++iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
++smk_write_direct_46363 smk_write_direct 3 46363 NULL
++__iommu_calculate_agaw_46366 __iommu_calculate_agaw 2 46366 NULL
++fib_nlmsg_size_46383 fib_nlmsg_size 0 46383 NULL
++fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
++crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
++ttm_page_pool_get_pages_46431 ttm_page_pool_get_pages 0-5 46431 NULL
++cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
++parport_pc_fifo_write_block_46455 parport_pc_fifo_write_block 3 46455 NULL
++filldir64_46469 filldir64 3 46469 NULL
++ocfs2_reserve_clusters_with_limit_46479 ocfs2_reserve_clusters_with_limit 0 46479 NULL
++p9pdu_vreadf_46500 p9pdu_vreadf 0 46500 NULL
++mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
++uio_get_minor_46522 uio_get_minor 0 46522 NULL
++nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
++mv_get_hc_count_46554 mv_get_hc_count 0 46554 NULL
++link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
++ubi_wl_put_peb_46565 ubi_wl_put_peb 0 46565 NULL
++dn_current_mss_46574 dn_current_mss 0 46574 NULL
++serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
++vscnprintf_46617 vscnprintf 0-2 46617 NULL
++__kfifo_out_r_46623 __kfifo_out_r 3 46623 NULL
++request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
++aircable_process_packet_46639 aircable_process_packet 5 46639 NULL
++pci_enable_device_46642 pci_enable_device 0 46642 NULL
++cx18_v4l2_ioctl_46647 cx18_v4l2_ioctl 2 46647 NULL
++e1000_tx_map_46672 e1000_tx_map 4 46672 NULL
++iwl4965_ucode_rx_stats_read_46676 iwl4965_ucode_rx_stats_read 3 46676 NULL
++l2cap_parse_conf_rsp_46683 l2cap_parse_conf_rsp 0 46683 NULL
++alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
++__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
++erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
++ide_read_status_46719 ide_read_status 0 46719 NULL
++ctnetlink_nlmsg_size_46736 ctnetlink_nlmsg_size 0 46736 NULL
++snd_ymfpci_readl_46738 snd_ymfpci_readl 0 46738 NULL
++hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
++int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
++_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
++xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
++ieee80211_rx_radiotap_len_46846 ieee80211_rx_radiotap_len 0 46846 NULL
++spi_async_46857 spi_async 0 46857 NULL
++vsnprintf_46863 vsnprintf 0 46863 NULL
++hpi_read_word_nolock_46881 hpi_read_word_nolock 0 46881 NULL
++sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
++ol_dqblk_off_46904 ol_dqblk_off 2-3 46904 NULL
++tracing_ctrl_read_46922 tracing_ctrl_read 3 46922 NULL
++fb_write_46924 fb_write 3 46924 NULL
++btmrvl_curpsmode_read_46939 btmrvl_curpsmode_read 3 46939 NULL
++kvm_register_read_46948 kvm_register_read 0 46948 NULL
++__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
++calculate_alignment_46958 calculate_alignment 0-2 46958 NULL
++crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
++ath6kl_add_bss_if_needed_46978 ath6kl_add_bss_if_needed 5 46978 NULL
++strlcat_46985 strlcat 3 46985 NULL
++gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 NULL nohasharray
++sel_write_bool_46996 sel_write_bool 3 46996 &gfs2_xattr_system_set_46996
++ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
++blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
++__map_single_47020 __map_single 3-4-7-0 47020 NULL
++cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2-4 47024 NULL
++ufs_new_fragments_47070 ufs_new_fragments 4-3-5 47070 NULL nohasharray
++ext4_xattr_list_entries_47070 ext4_xattr_list_entries 0 47070 &ufs_new_fragments_47070
++xfrm_report_msgsize_47077 xfrm_report_msgsize 0 47077 NULL
++scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
++set_params_47113 set_params 0 47113 NULL
++mousedev_read_47123 mousedev_read 3 47123 NULL
++ses_recv_diag_47143 ses_recv_diag 4 47143 NULL nohasharray
++acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 &ses_recv_diag_47143
++cxio_init_resource_fifo_random_47151 cxio_init_resource_fifo_random 3 47151 NULL
++rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
++svc_pool_map_alloc_arrays_47181 svc_pool_map_alloc_arrays 2 47181 NULL
++can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
++l2headersize_47238 l2headersize 0 47238 NULL
++options_write_47243 options_write 3 47243 NULL
++portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
++p9pdu_readf_47269 p9pdu_readf 0 47269 NULL
++ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
++tty_audit_log_47280 tty_audit_log 8 47280 NULL
++vsnprintf_47291 vsnprintf 0 47291 NULL
++tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
++channel_type_read_47308 channel_type_read 3 47308 NULL
++ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
++avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
++find_first_zero_bit_le_47369 find_first_zero_bit_le 2 47369 NULL
++__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
++trace_options_core_read_47390 trace_options_core_read 3 47390 NULL
++pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
++lbs_wrmac_write_47400 lbs_wrmac_write 3 47400 NULL
++ocfs2_resv_end_47408 ocfs2_resv_end 0 47408 NULL
++crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
++lbs_wrrf_write_47418 lbs_wrrf_write 3 47418 NULL
++posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
++newpart_47485 newpart 6-4 47485 NULL
++core_sys_select_47494 core_sys_select 1 47494 NULL
++alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
++unlink_simple_47506 unlink_simple 3 47506 NULL
++ufs_inode_getblock_47512 ufs_inode_getblock 4 47512 NULL
++snd_pcm_resume_47530 snd_pcm_resume 0 47530 NULL
++vscnprintf_47533 vscnprintf 0-2 47533 NULL nohasharray
++process_vm_rw_47533 process_vm_rw 3-5 47533 &vscnprintf_47533
++einj_check_trigger_header_47534 einj_check_trigger_header 0 47534 NULL
++ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL
++set_printer_interface_47551 set_printer_interface 0 47551 NULL
++read_ldt_47570 read_ldt 2 47570 NULL
++rpipe_get_idx_47579 rpipe_get_idx 2-0 47579 NULL
++vendorextnReadSection_47583 vendorextnReadSection 0 47583 NULL
++ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
++sctp_ssnmap_new_47608 sctp_ssnmap_new 2-1 47608 NULL
++uea_request_47613 uea_request 4 47613 NULL
++cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
++kvm_pv_mmu_write_47630 kvm_pv_mmu_write 2 47630 NULL
++ivtv_serialized_ioctl_47632 ivtv_serialized_ioctl 3 47632 NULL
++irq_set_chip_47638 irq_set_chip 1 47638 NULL
++__build_packet_message_47643 __build_packet_message 3-9 47643 NULL
++snd_pcm_info_47699 snd_pcm_info 0 47699 NULL
++packet_recvmsg_47700 packet_recvmsg 4 47700 NULL nohasharray
++ipath_format_hwmsg_47700 ipath_format_hwmsg 2 47700 &packet_recvmsg_47700
++bits_to_user_47733 bits_to_user 3-2 47733 NULL
++carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
++ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
++mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
++ext3_find_near_47752 ext3_find_near 0 47752 NULL
++alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
++i915_wedged_write_47771 i915_wedged_write 3 47771 NULL
++uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
++sst_prepare_output_buffers_47781 sst_prepare_output_buffers 4 47781 NULL
++tt_len_47789 tt_len 0-1 47789 NULL
++stmmac_set_bfsize_47834 stmmac_set_bfsize 0 47834 NULL
++ath6kl_wmi_set_appie_cmd_47855 ath6kl_wmi_set_appie_cmd 4 47855 NULL
++ubifs_unpack_nnode_47866 ubifs_unpack_nnode 0 47866 NULL
++vhci_read_47878 vhci_read 3 47878 NULL
++keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
++osd_req_read_sg_47905 osd_req_read_sg 5 47905 NULL
++comedi_write_47926 comedi_write 3 47926 NULL
++nf_nat_ftp_47948 nf_nat_ftp 5 47948 NULL
++cfg80211_testmode_alloc_reply_skb_47966 cfg80211_testmode_alloc_reply_skb 2 47966 NULL
++iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 NULL nohasharray
++mempool_resize_47983 mempool_resize 2 47983 &iwl_dbgfs_ucode_tracing_read_47983
++pnpacpi_parse_allocated_irqresource_47986 pnpacpi_parse_allocated_irqresource 2 47986 NULL
++mgmt_pending_add_47990 mgmt_pending_add 5 47990 NULL nohasharray
++dbg_port_buf_47990 dbg_port_buf 2 47990 &mgmt_pending_add_47990
++ib_umad_write_47993 ib_umad_write 3 47993 NULL
++ocfs2_find_refcount_split_pos_48001 ocfs2_find_refcount_split_pos 0 48001 NULL
++ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
++bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
++pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
++wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
++ocfs2_change_refcount_rec_48059 ocfs2_change_refcount_rec 0 48059 NULL
++posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
++c4iw_init_resource_fifo_48090 c4iw_init_resource_fifo 3 48090 NULL
++mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
++skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
++vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
++dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
++__tcp_push_pending_frames_48148 __tcp_push_pending_frames 2 48148 NULL
++bitmap_onto_48152 bitmap_onto 4 48152 NULL
++isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
++ocfs2_find_next_zero_bit_unaligned_48170 ocfs2_find_next_zero_bit_unaligned 2-3 48170 NULL
++init_ipath_48187 init_ipath 1 48187 NULL
++snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
++is_block_in_journal_48223 is_block_in_journal 3 48223 NULL
++uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
++read_file_recv_48232 read_file_recv 3 48232 NULL
++blk_rq_pos_48233 blk_rq_pos 0 48233 NULL
++nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
++usb_hcd_submit_urb_48256 usb_hcd_submit_urb 0 48256 NULL
++cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
++trace_options_write_48275 trace_options_write 3 48275 NULL
++pkt_bio_alloc_48284 pkt_bio_alloc 1 48284 NULL
++lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
++timblogiw_read_48305 timblogiw_read 3 48305 NULL
++hash_setkey_48310 hash_setkey 3 48310 NULL
++bcm_download_config_file_48313 bcm_download_config_file 0 48313 NULL
++skb_add_data_48363 skb_add_data 3 48363 NULL
++iscsi_complete_pdu_48372 iscsi_complete_pdu 4 48372 NULL
++lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
++snd_power_wait_48422 snd_power_wait 0 48422 NULL
++pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
++nfs4_alloc_pages_48426 nfs4_alloc_pages 1 48426 NULL
++wm8994_write_48439 wm8994_write 3 48439 NULL
++tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
++ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
++r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
++send_control_msg_48498 send_control_msg 6 48498 NULL
++mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
++diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL nohasharray
++iwl_legacy_dbgfs_status_read_48508 iwl_legacy_dbgfs_status_read 3 48508 &diva_os_copy_to_user_48508
++phantom_get_free_48514 phantom_get_free 0 48514 NULL
++ubi_dbg_check_write_48525 ubi_dbg_check_write 0 48525 NULL
++wiimote_hid_send_48528 wiimote_hid_send 3 48528 NULL
++drbd_bm_capacity_48530 drbd_bm_capacity 0 48530 NULL
++ext3_splice_branch_48531 ext3_splice_branch 6 48531 NULL
++ext_sd_execute_read_data_48589 ext_sd_execute_read_data 9 48589 NULL
++ufs_dtogd_48616 ufs_dtogd 0-2 48616 NULL
++do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
++lc_create_48662 lc_create 3 48662 NULL
++aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
++ore_get_rw_state_48667 ore_get_rw_state 5 48667 NULL
++sm501_create_subdev_48668 sm501_create_subdev 4-3 48668 NULL nohasharray
++sys_setgroups_48668 sys_setgroups 1 48668 &sm501_create_subdev_48668
++ubi_eba_unmap_leb_48671 ubi_eba_unmap_leb 0 48671 NULL
++l2cap_build_cmd_48676 l2cap_build_cmd 4 48676 NULL
++hysdn_log_write_48694 hysdn_log_write 3 48694 NULL
++altera_drscan_48698 altera_drscan 2 48698 NULL
++kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
++recv_msg_48709 recv_msg 4 48709 NULL
++lpfc_idiag_drbacc_write_48712 lpfc_idiag_drbacc_write 3 48712 NULL
++RFTrackingFiltersCorrection_48722 RFTrackingFiltersCorrection 0 48722 NULL
++disconnect_48738 disconnect 4 48738 NULL
++ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
++event_buffer_read_48772 event_buffer_read 3 48772 NULL
++icmp_manip_pkt_48801 icmp_manip_pkt 2 48801 NULL
++twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
++register_ftrace_profiler_48816 register_ftrace_profiler 0 48816 NULL nohasharray
++tcp_push_one_48816 tcp_push_one 2 48816 &register_ftrace_profiler_48816
++atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
++azx_get_position_48841 azx_get_position 0 48841 NULL
++vc_do_resize_48842 vc_do_resize 4-3 48842 NULL
++viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL
++__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
++sys_setgroups16_48882 sys_setgroups16 1 48882 NULL
++get_num_ops_48886 get_num_ops 0 48886 NULL
++ext2_alloc_branch_48889 ext2_alloc_branch 4 48889 NULL
++crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
++mac_drv_rx_init_48898 mac_drv_rx_init 2 48898 NULL nohasharray
++joydev_handle_JSIOCSAXMAP_48898 joydev_handle_JSIOCSAXMAP 3 48898 &mac_drv_rx_init_48898
++xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
++msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
++snd_pcm_update_hw_ptr_48925 snd_pcm_update_hw_ptr 0 48925 NULL
++lpfc_sli4_get_els_iocb_cnt_48926 lpfc_sli4_get_els_iocb_cnt 0 48926 NULL
++event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
++_alloc_set_attr_list_48991 _alloc_set_attr_list 4 48991 NULL
++rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
++sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
++xd_rw_49020 xd_rw 3-4 49020 NULL
++aic_inb_49023 aic_inb 0 49023 NULL
++transient_status_49027 transient_status 4 49027 NULL
++ubi_read_49061 ubi_read 0 49061 NULL
++l2cap_bredr_sig_cmd_49065 l2cap_bredr_sig_cmd 3 49065 NULL
++mirror_status_49073 mirror_status 4 49073 NULL
++vmx_set_msr_49090 vmx_set_msr 3 49090 NULL
++scsi_register_49094 scsi_register 2 49094 NULL
++compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
++receive_client_update_packet_49104 receive_client_update_packet 3 49104 NULL
++xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL
++pt_read_49136 pt_read 3 49136 NULL
++iwl_legacy_dbgfs_fh_reg_read_49144 iwl_legacy_dbgfs_fh_reg_read 3 49144 NULL nohasharray
++tipc_multicast_49144 tipc_multicast 5 49144 &iwl_legacy_dbgfs_fh_reg_read_49144
++ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
++ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
++ext4_free_clusters_after_init_49174 ext4_free_clusters_after_init 2 49174 NULL
++__jfs_setxattr_49175 __jfs_setxattr 5 49175 NULL
++dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
++root_nfs_cat_49192 root_nfs_cat 3 49192 NULL
++iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
++do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
++osd_req_add_get_attr_list_49278 osd_req_add_get_attr_list 3 49278 NULL
++__ext4_ext_dirty_49284 __ext4_ext_dirty 0 49284 NULL
++viafb_dfph_proc_write_49288 viafb_dfph_proc_write 3 49288 NULL
++uio_read_49300 uio_read 3 49300 NULL
++ocfs2_resmap_find_free_bits_49301 ocfs2_resmap_find_free_bits 3 49301 NULL
++beiscsi_session_create_49304 beiscsi_session_create 2 49304 NULL
++__intel_map_single_49338 __intel_map_single 3-2 49338 NULL
++cfpkt_setlen_49343 cfpkt_setlen 2 49343 NULL
++joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
++ocfs2_remove_btree_range_49370 ocfs2_remove_btree_range 4-3-5 49370 NULL
++px_raw_event_49371 px_raw_event 4 49371 NULL
++iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
++applesmc_create_nodes_49392 applesmc_create_nodes 2 49392 NULL
++rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
++iwl_legacy_dbgfs_nvm_read_49405 iwl_legacy_dbgfs_nvm_read 3 49405 NULL
++tnode_alloc_49407 tnode_alloc 1 49407 NULL
++samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
++md_domain_init_49432 md_domain_init 2 49432 NULL
++i915_gem_object_set_to_gtt_domain_49450 i915_gem_object_set_to_gtt_domain 0 49450 NULL
++ocfs2_merge_rec_left_49455 ocfs2_merge_rec_left 0 49455 NULL
++agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
++xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
++dn_nsp_do_disc_49474 dn_nsp_do_disc 6 49474 NULL
++esp4_get_mtu_49483 esp4_get_mtu 0-2 49483 NULL
++isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
++emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL nohasharray
++__sock_recvmsg_nosec_49520 __sock_recvmsg_nosec 0 49520 &emulator_write_phys_49520
++smk_write_access_49561 smk_write_access 3 49561 NULL
++alloc_chunk_49575 alloc_chunk 1 49575 NULL
++sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
++readfifo_49583 readfifo 1 49583 NULL
++isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
++heap_init_49617 heap_init 2 49617 NULL
++smk_write_doi_49621 smk_write_doi 3 49621 NULL
++port_fops_read_49626 port_fops_read 3 49626 NULL
++svm_set_msr_49643 svm_set_msr 3 49643 NULL
++aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
++__setup_irq_49696 __setup_irq 0 49696 NULL
++sys_gethostname_49698 sys_gethostname 2 49698 NULL
++cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
++get_key_haup_common_49709 get_key_haup_common 4 49709 NULL
++write_pool_49718 write_pool 3 49718 NULL
++sys_fsetxattr_49736 sys_fsetxattr 4 49736 NULL
++check_frame_49741 check_frame 0 49741 NULL
++zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
++btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
++nci_skb_alloc_49757 nci_skb_alloc 2 49757 NULL
++key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
++fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
++w83977af_fir_interrupt_49775 w83977af_fir_interrupt 0 49775 NULL
++pohmelfs_send_xattr_req_49783 pohmelfs_send_xattr_req 6 49783 NULL
++ceph_osdc_readpages_49789 ceph_osdc_readpages 0-10-4 49789 NULL
++be_num_rxqs_want_49794 be_num_rxqs_want 0 49794 NULL
++nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
++ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
++b1dma_tolink_49834 b1dma_tolink 0 49834 NULL
++iraw_loop_49842 iraw_loop 0-1 49842 NULL
++vmw_execbuf_process_49845 vmw_execbuf_process 5 49845 NULL
++scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
++timeradd_entry_49850 timeradd_entry 3 49850 NULL
++ubifs_destroy_tnc_subtree_49853 ubifs_destroy_tnc_subtree 0 49853 NULL
++sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
++snd_mask_eq_49889 snd_mask_eq 0 49889 NULL
++ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
++config_ep_by_speed_49939 config_ep_by_speed 0 49939 NULL
++hpi_stream_estimate_buffer_size_49965 hpi_stream_estimate_buffer_size 2 49965 NULL
++b43legacy_pio_read_49978 b43legacy_pio_read 0 49978 NULL
++ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
++drm_buffer_copy_from_user_49990 drm_buffer_copy_from_user 3 49990 NULL
++dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
++isdn_read_50021 isdn_read 3 50021 NULL
++rbd_req_write_50041 rbd_req_write 4-5 50041 NULL
++alloc_ebda_hpc_50046 alloc_ebda_hpc 2-1 50046 NULL
++fuse_conn_max_background_write_50061 fuse_conn_max_background_write 3 50061 NULL
++arch_setup_ht_irq_50073 arch_setup_ht_irq 1 50073 NULL
++call_usermodehelper_fns_50078 call_usermodehelper_fns 0 50078 NULL
++__kfifo_dma_in_prepare_50081 __kfifo_dma_in_prepare 4 50081 NULL
++dev_set_alias_50084 dev_set_alias 3 50084 NULL
++pcpu_get_vm_areas_50085 pcpu_get_vm_areas 3 50085 NULL
++sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
++altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
++read_file_slot_50111 read_file_slot 3 50111 NULL
++pn544_fw_read_50112 pn544_fw_read 0 50112 NULL
++rx_streaming_interval_write_50120 rx_streaming_interval_write 3 50120 NULL
++ocfs2_search_one_group_50125 ocfs2_search_one_group 0 50125 NULL
++copy_items_50140 copy_items 6 50140 NULL
++kmalloc_node_50163 kmalloc_node 1 50163 NULL
++ahd_probe_stack_size_50168 ahd_probe_stack_size 0 50168 NULL
++odev_update_50169 odev_update 2 50169 NULL
++ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 NULL nohasharray
++ubi_resize_volume_50172 ubi_resize_volume 2 50172 &ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172
++ib_send_cm_drep_50186 ib_send_cm_drep 3 50186 NULL
++mthca_buddy_init_50206 mthca_buddy_init 2 50206 NULL
++l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
++ieee80211_skb_resize_50211 ieee80211_skb_resize 3 50211 NULL
++mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
++sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
++afs_extract_data_50261 afs_extract_data 5 50261 NULL
++qfq_calc_index_50263 qfq_calc_index 2-1 50263 NULL
++rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL nohasharray
++gart_free_coherent_50286 gart_free_coherent 4-2 50286 &rxrpc_setsockopt_50286
++soc_codec_reg_show_50302 soc_codec_reg_show 0-3 50302 NULL
++iterate_irefs_50313 iterate_irefs 0 50313 NULL
++cifs_readdata_alloc_50318 cifs_readdata_alloc 1 50318 NULL
++do_launder_page_50329 do_launder_page 0 50329 NULL
++lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
++ocfs2_block_to_cluster_group_50337 ocfs2_block_to_cluster_group 2 50337 NULL nohasharray
++snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 &ocfs2_block_to_cluster_group_50337
++tpm_read_50344 tpm_read 3 50344 NULL
++sched_clock_remote_50347 sched_clock_remote 0 50347 NULL
++isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
++unpack_u16_chunk_50357 unpack_u16_chunk 0 50357 NULL
++ocfs2_figure_insert_type_50362 ocfs2_figure_insert_type 0 50362 NULL nohasharray
++iwl_dbgfs_echo_test_write_50362 iwl_dbgfs_echo_test_write 3 50362 &ocfs2_figure_insert_type_50362
++xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
++sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
++inet_nlmsg_size_50399 inet_nlmsg_size 0 50399 NULL
++snd_mask_refine_last_50406 snd_mask_refine_last 0 50406 NULL
++l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
++iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
++pgctrl_write_50453 pgctrl_write 3 50453 NULL
++device_create_sys_dev_entry_50458 device_create_sys_dev_entry 0 50458 NULL
++cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
++pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
++fwnet_receive_packet_50537 fwnet_receive_packet 9 50537 NULL
++ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
++usbat_flash_write_data_50553 usbat_flash_write_data 4 50553 NULL
++hme_read_desc32_50574 hme_read_desc32 0 50574 NULL
++pep_reply_50582 pep_reply 5 50582 NULL
++iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
++build_inv_iommu_pages_50589 build_inv_iommu_pages 2-3 50589 NULL
++sge_rx_50594 sge_rx 3 50594 NULL
++ocfs2_split_extent_50618 ocfs2_split_extent 0 50618 NULL
++add_uevent_var_50620 add_uevent_var 0 50620 NULL
++GET_WORD_50624 GET_WORD 0 50624 NULL
++__ffs_50625 __ffs 0 50625 NULL
++macvtap_alloc_skb_50629 macvtap_alloc_skb 2-4-3 50629 NULL
++simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
++ocfs2_readlink_50656 ocfs2_readlink 3 50656 NULL
++ocfs2_do_insert_extent_50658 ocfs2_do_insert_extent 0 50658 NULL
++sys_readv_50664 sys_readv 3 50664 NULL
++ext2_try_to_allocate_with_rsv_50669 ext2_try_to_allocate_with_rsv 2-4-0 50669 NULL
++btmrvl_psstate_read_50683 btmrvl_psstate_read 3 50683 NULL
++prism2_read_fid_reg_50689 prism2_read_fid_reg 0 50689 NULL
++get_wear_leveling_table_len_50712 get_wear_leveling_table_len 0 50712 NULL
++__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
++skb_padto_50759 skb_padto 2 50759 NULL
++udp_manip_pkt_50770 udp_manip_pkt 2 50770 NULL
++ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
++tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL nohasharray
++pipe_handler_request_50774 pipe_handler_request 5 50774 &tm6000_read_write_usb_50774
++bio_alloc_map_data_50782 bio_alloc_map_data 2-1 50782 NULL
++ixgbe_acquire_msix_vectors_50789 ixgbe_acquire_msix_vectors 2 50789 NULL
++tpm_write_50798 tpm_write 3 50798 NULL
++tun_do_read_50800 tun_do_read 4 50800 NULL
++write_flush_50803 write_flush 3 50803 NULL
++dvb_play_50814 dvb_play 3 50814 NULL
++acpi_ev_install_gpe_block_50829 acpi_ev_install_gpe_block 2 50829 NULL
++pstore_mkfile_50830 pstore_mkfile 5 50830 NULL
++create_mem_extents_50835 create_mem_extents 0 50835 NULL
++videobuf_dma_init_user_50839 videobuf_dma_init_user 4-3 50839 NULL
++ChannelConfiguration_50853 ChannelConfiguration 0 50853 NULL
++carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
++netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
++osd_req_write_sg_50908 osd_req_write_sg 5 50908 NULL
++xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
++blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
++hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
++chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
++sock_bindtodevice_50942 sock_bindtodevice 3 50942 NULL
++mld_newpack_50950 mld_newpack 2 50950 NULL
++ocfs2_add_refcount_flag_50952 ocfs2_add_refcount_flag 6 50952 NULL
++sdio_uart_write_50954 sdio_uart_write 3 50954 NULL
++iwl_statistics_flag_50981 iwl_statistics_flag 3-0 50981 NULL
++timeout_write_50991 timeout_write 3 50991 NULL
++proc_write_51003 proc_write 3 51003 NULL
++jbd2_journal_extend_51012 jbd2_journal_extend 0 51012 NULL
++lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
++ntfs_attr_find_51028 ntfs_attr_find 0 51028 NULL nohasharray
++fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 &ntfs_attr_find_51028
++BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
++dump_midi_51040 dump_midi 3 51040 NULL
++usb_get_descriptor_51041 usb_get_descriptor 0 51041 NULL
++do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
++wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
++jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
++__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
++read_file_wiphy_51103 read_file_wiphy 3 51103 NULL
++iscsi_nop_out_rsp_51117 iscsi_nop_out_rsp 4 51117 NULL
++xfs_file_splice_read_51121 xfs_file_splice_read 4 51121 NULL
++nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
++alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
++wl1271_cmd_build_probe_req_51141 wl1271_cmd_build_probe_req 3-5 51141 NULL
++xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
++walk_page_buffers_51170 walk_page_buffers 0 51170 NULL
++snd_pcm_unlink_51210 snd_pcm_unlink 0 51210 NULL
++snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
++tipc_send_51238 tipc_send 4 51238 NULL
++drm_property_create_51239 drm_property_create 4 51239 NULL
++st_read_51251 st_read 3 51251 NULL
++compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
++dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
++ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
++zone_reclaimable_pages_51283 zone_reclaimable_pages 0 51283 NULL
++pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
++xfrm_count_enc_supported_51290 xfrm_count_enc_supported 0 51290 NULL
++buffDnldVerify_51297 buffDnldVerify 0 51297 NULL
++ocfs2_read_inode_block_51319 ocfs2_read_inode_block 0 51319 NULL
++alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
++ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
++alloc_smp_req_51337 alloc_smp_req 1 51337 NULL
++ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
++ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
++radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
++ieee80211_wx_set_gen_ie_51399 ieee80211_wx_set_gen_ie 3 51399 NULL
++ceph_sync_read_51410 ceph_sync_read 3-0 51410 NULL
++blk_register_region_51424 blk_register_region 1-2 51424 NULL
++mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
++econet_sendmsg_51430 econet_sendmsg 4 51430 NULL
++ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
++print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL
++____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
++xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
++btrfs_find_space_cluster_51482 btrfs_find_space_cluster 5 51482 NULL
++kvm_fetch_guest_virt_51493 kvm_fetch_guest_virt 4-2 51493 NULL
++__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
++ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
++snd_interval_refine_first_51589 snd_interval_refine_first 0 51589 NULL
++rfcomm_tty_write_51603 rfcomm_tty_write 3 51603 NULL
++table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
++dns_resolve_server_name_to_ip_51632 dns_resolve_server_name_to_ip 0 51632 NULL
++sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
++iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
++get_new_cssid_51665 get_new_cssid 2 51665 NULL
++ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
++sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
++sel_write_access_51704 sel_write_access 3 51704 NULL
++gem_alloc_skb_51715 gem_alloc_skb 2 51715 NULL
++drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
++sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
++msg_set_51725 msg_set 3 51725 NULL
++dbg_check_lpt_nodes_51727 dbg_check_lpt_nodes 0 51727 NULL
++cm4040_read_51732 cm4040_read 3 51732 NULL
++pwc_video_read_51735 pwc_video_read 3 51735 NULL
++hid_parse_report_51737 hid_parse_report 3 51737 NULL
++get_user_pages_fast_51751 get_user_pages_fast 0 51751 NULL
++ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
++if_write_51756 if_write 3 51756 NULL
++iio_buffer_add_channel_sysfs_51766 iio_buffer_add_channel_sysfs 0 51766 NULL
++spin_time_accum_blocked_51769 spin_time_accum_blocked 1 51769 NULL
++swiotlb_init_with_tbl_51770 swiotlb_init_with_tbl 2 51770 NULL
++__fswab32_51781 __fswab32 0 51781 NULL
++l2cap_create_iframe_pdu_51801 l2cap_create_iframe_pdu 3 51801 NULL
++qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
++buffer_from_user_51826 buffer_from_user 3 51826 NULL
++ioread32_51847 ioread32 0 51847 NULL nohasharray
++read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
++do_readv_writev_51849 do_readv_writev 4 51849 NULL
++pointer_size_read_51863 pointer_size_read 3 51863 NULL
++mlx4_alloc_db_from_pgdir_51865 mlx4_alloc_db_from_pgdir 3 51865 NULL
++get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
++user_read_51881 user_read 3 51881 NULL
++dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
++virt_to_phys_51896 virt_to_phys 0 51896 NULL
++iio_read_first_n_sw_rb_51911 iio_read_first_n_sw_rb 2 51911 NULL
++tipc_createport_51914 tipc_createport 2 51914 NULL
++dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
++xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
++scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
++ab8500_bank_write_51960 ab8500_bank_write 3 51960 NULL
++snd_mask_min_51969 snd_mask_min 0 51969 NULL
++__blkdev_get_51972 __blkdev_get 0 51972 NULL
++twl6030_init_irq_51979 twl6030_init_irq 2 51979 NULL
++ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
++scsi_sysfs_add_host_52010 scsi_sysfs_add_host 0 52010 NULL
++skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
++rdmalt_52022 rdmalt 0 52022 NULL
++vxge_rx_alloc_52024 vxge_rx_alloc 3 52024 NULL
++override_release_52032 override_release 2 52032 NULL
++end_port_52042 end_port 0 52042 NULL
++dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
++msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
++dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
++nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
++o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
++bcm_compare_buff_contents_52124 bcm_compare_buff_contents 0 52124 NULL
++retry_count_read_52129 retry_count_read 3 52129 NULL
++snd_pcm_channel_info_user_52135 snd_pcm_channel_info_user 0 52135 NULL
++hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL nohasharray
++ext2_alloc_blocks_52145 ext2_alloc_blocks 2 52145 &hysdn_conf_write_52145
++wait_gpio_52146 wait_gpio 0 52146 NULL
++__le16_to_cpup_52155 __le16_to_cpup 0 52155 NULL
++ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
++mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
++proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
++iscsi_if_send_reply_52219 iscsi_if_send_reply 7 52219 NULL nohasharray
++iwl_dbgfs_wd_timeout_write_52219 iwl_dbgfs_wd_timeout_write 3 52219 &iscsi_if_send_reply_52219
++_alloc_mISDN_skb_52232 _alloc_mISDN_skb 3 52232 NULL
++ocfs2_try_to_merge_extent_52244 ocfs2_try_to_merge_extent 0 52244 NULL
++shrink_slab_52261 shrink_slab 2-3 52261 NULL
++sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
++smk_write_load_list_52280 smk_write_load_list 3 52280 NULL
++handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL
++ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
++jbd2_free_52306 jbd2_free 2 52306 NULL
++kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL
++hwflags_read_52318 hwflags_read 3 52318 NULL
++snd_pcm_hw_free_52327 snd_pcm_hw_free 0 52327 NULL
++test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
++iwl3945_ucode_rx_stats_read_52340 iwl3945_ucode_rx_stats_read 3 52340 NULL
++bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
++copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
++iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL
++ocfs2_journal_access_eb_52377 ocfs2_journal_access_eb 0 52377 NULL
++pfkey_sockaddr_pair_size_52378 pfkey_sockaddr_pair_size 0 52378 NULL
++isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
++jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
++aer_inject_write_52399 aer_inject_write 3 52399 NULL
++cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
++line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
++hso_serial_common_create_52428 hso_serial_common_create 4 52428 NULL
++ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
++nl80211_send_mgmt_tx_status_52445 nl80211_send_mgmt_tx_status 5 52445 NULL
++alauda_read_data_52452 alauda_read_data 3 52452 NULL
++ip6_skb_dst_mtu_52457 ip6_skb_dst_mtu 0 52457 NULL
++ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1-2 52477 NULL
++usb_tranzport_write_52479 usb_tranzport_write 3 52479 NULL
++ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
++skb_cow_head_52495 skb_cow_head 2 52495 NULL
++int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
++netlbl_unlabel_init_52506 netlbl_unlabel_init 1 52506 NULL
++pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
++bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
++dup_variable_bug_52525 dup_variable_bug 3 52525 NULL
++raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
++x86_setup_msi_irqs_52535 x86_setup_msi_irqs 0 52535 NULL
++dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
++ocfs2_make_right_split_rec_52562 ocfs2_make_right_split_rec 3 52562 NULL
++debug_level_proc_write_52572 debug_level_proc_write 3 52572 NULL
++snd_pcm_sw_params_52594 snd_pcm_sw_params 0 52594 NULL
++xfs_file_buffered_aio_write_52609 xfs_file_buffered_aio_write 4 52609 NULL
++iwl_legacy_dbgfs_channels_read_52619 iwl_legacy_dbgfs_channels_read 3 52619 NULL
++__iter_shared_inline_ref_inodes_52668 __iter_shared_inline_ref_inodes 0 52668 NULL
++dirty_poll_interval_52669 dirty_poll_interval 1-2 52669 NULL
++ntfs_get_nr_significant_bytes_52688 ntfs_get_nr_significant_bytes 0 52688 NULL
++vendorextnWriteSection_52698 vendorextnWriteSection 0 52698 NULL
++cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
++blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
++relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
++alloc_irte_52741 alloc_irte 3 52741 NULL
++carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
++radeon_get_ib_value_52757 radeon_get_ib_value 0 52757 NULL
++debug_lpm_write_52830 debug_lpm_write 3 52830 NULL
++bl_mark_sectors_init_52831 bl_mark_sectors_init 2-3 52831 NULL
++pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
++ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
++mon_bin_get_event_52863 mon_bin_get_event 6-4 52863 NULL
++iwl_legacy_dbgfs_clear_traffic_statistics_write_52866 iwl_legacy_dbgfs_clear_traffic_statistics_write 3 52866 NULL
++qib_decode_6120_err_52876 qib_decode_6120_err 3 52876 NULL
++pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
++cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
++ubi_wl_flush_52900 ubi_wl_flush 0 52900 NULL
++create_vtbl_52909 create_vtbl 0 52909 NULL
++__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
++__iio_device_attr_init_52936 __iio_device_attr_init 0 52936 NULL
++ip_nat_sdp_port_52938 ip_nat_sdp_port 6 52938 NULL
++norm_maxw_52951 norm_maxw 0 52951 NULL nohasharray
++__nodes_remap_52951 __nodes_remap 5 52951 &norm_maxw_52951
++store_disp_52952 store_disp 4 52952 NULL
++send_packet_52960 send_packet 4 52960 NULL
++ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
++num_node_state_52989 num_node_state 0 52989 NULL
++xfs_rtfree_extent_53024 xfs_rtfree_extent 2-3 53024 NULL
++ocfs2_new_leaf_refcount_block_53036 ocfs2_new_leaf_refcount_block 0 53036 NULL
++bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
++ValidateHWParmStructure_53048 ValidateHWParmStructure 0 53048 NULL
++kobject_uevent_53065 kobject_uevent 0 53065 NULL
++cfi_read_query_53066 cfi_read_query 0 53066 NULL
++iwl_dbgfs_interrupt_write_53069 iwl_dbgfs_interrupt_write 3 53069 NULL
++mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
++pcbit_readw_53084 pcbit_readw 0 53084 NULL
++insert_new_root_53097 insert_new_root 0 53097 NULL
++line6_dumpreq_initbuf_53123 line6_dumpreq_initbuf 3 53123 NULL
++clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
++__pci_enable_device_flags_53213 __pci_enable_device_flags 0 53213 NULL
++sctp_make_fwdtsn_53265 sctp_make_fwdtsn 3 53265 NULL
++btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
++pn544_i2c_read_53270 pn544_i2c_read 0 53270 NULL
++lirc_buffer_init_53282 lirc_buffer_init 3-2 53282 NULL
++determine_dirtyable_memory_53290 determine_dirtyable_memory 0 53290 NULL
++ftrace_profile_write_53327 ftrace_profile_write 3 53327 NULL
++gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
++vendorextnIoctl_53350 vendorextnIoctl 0 53350 NULL
++mmc_resume_host_53353 mmc_resume_host 0 53353 NULL nohasharray
++bnx2i_send_nl_mesg_53353 bnx2i_send_nl_mesg 4 53353 &mmc_resume_host_53353
++get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
++tsi721_open_outb_mbox_53397 tsi721_open_outb_mbox 4 53397 NULL
++roccat_common_receive_53407 roccat_common_receive 4 53407 NULL
++i915_gem_execbuffer_relocate_object_53435 i915_gem_execbuffer_relocate_object 0 53435 NULL
++isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
++mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL nohasharray
++snd_dma_alloc_pages_53447 snd_dma_alloc_pages 3 53447 &mwifiex_info_read_53447
++apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
++rds_tcp_data_recv_53476 rds_tcp_data_recv 3-4 53476 NULL
++iowarrior_read_53483 iowarrior_read 3 53483 NULL
++osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
++do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
++ext4_ext_grow_indepth_53503 ext4_ext_grow_indepth 0 53503 NULL
++snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
++dbAllocNext_53506 dbAllocNext 0 53506 NULL
++ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
++check_acl_53512 check_acl 0 53512 NULL
++set_registers_53582 set_registers 3 53582 NULL
++__readw_53594 __readw 0 53594 NULL
++pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
++___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
++xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
++ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
++uapsd_max_sp_len_read_53651 uapsd_max_sp_len_read 3 53651 NULL
++nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
++orig_hash_add_if_53676 orig_hash_add_if 2 53676 NULL nohasharray
++_preload_range_53676 _preload_range 2-3 53676 &orig_hash_add_if_53676
++fuse_fill_write_pages_53682 fuse_fill_write_pages 4 53682 NULL
++bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL
++i830_write_fence_reg_53695 i830_write_fence_reg 0 53695 NULL
++phy_read_1bit_53708 phy_read_1bit 0 53708 NULL
++find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
++bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
++__ocfs2_resv_find_window_53721 __ocfs2_resv_find_window 3 53721 NULL
++subscr_named_msg_event_53723 subscr_named_msg_event 6 53723 NULL
++wdm_write_53735 wdm_write 3 53735 NULL
++ext3_try_to_allocate_with_rsv_53737 ext3_try_to_allocate_with_rsv 3-5-0 53737 NULL
++lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 NULL nohasharray
++amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 &lpfc_idiag_queacc_read_qe_53755
++ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
++__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
++regmap_raw_write_53803 regmap_raw_write 4 53803 NULL
++lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
++nls_nullsize_53815 nls_nullsize 0 53815 NULL
++setup_data_read_53822 setup_data_read 3 53822 NULL
++multipath_status_53836 multipath_status 4 53836 NULL
++i915_gem_flush_ring_53843 i915_gem_flush_ring 0 53843 NULL
++pms_read_53873 pms_read 3 53873 NULL
++ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL
++ocfs2_rm_xattr_cluster_53900 ocfs2_rm_xattr_cluster 4-5-3 53900 NULL
++proc_file_read_53905 proc_file_read 3 53905 NULL
++azx_via_get_position_53916 azx_via_get_position 0 53916 NULL
++tcp_mss_split_point_53925 tcp_mss_split_point 0-4-3 53925 NULL
++usb_serial_generic_write_53927 usb_serial_generic_write 4 53927 NULL
++ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 0-5-4 53938 NULL
++xfs_mod_sb_53960 xfs_mod_sb 2 53960 NULL
++mlx4_num_eq_uar_53965 mlx4_num_eq_uar 0 53965 NULL
++idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
++__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
++snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 5-2-4 54018 NULL
++cmpk_message_handle_tx_54024 cmpk_message_handle_tx 4 54024 NULL
++ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
++nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
++wl12xx_rx_get_buf_size_54070 wl12xx_rx_get_buf_size 0 54070 NULL
++_malloc_54077 _malloc 1 54077 NULL
++bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
++altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL
++create_xattr_54106 create_xattr 5 54106 NULL
++strn_len_54122 strn_len 0 54122 NULL
++store_sys_acpi_54129 store_sys_acpi 4 54129 NULL
++isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
++c4iw_pblpool_alloc_54148 c4iw_pblpool_alloc 2 54148 NULL
++i2400m_zrealloc_2x_54166 i2400m_zrealloc_2x 3 54166 NULL nohasharray
++memcpy_toiovec_54166 memcpy_toiovec 3 54166 &i2400m_zrealloc_2x_54166
++p9_client_prepare_req_54175 p9_client_prepare_req 3 54175 NULL
++devm_request_threaded_irq_54215 devm_request_threaded_irq 0 54215 NULL
++do_sys_poll_54221 do_sys_poll 2 54221 NULL
++__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
++_format_mac_addr_54229 _format_mac_addr 2-0 54229 NULL
++pi_read_regr_54231 pi_read_regr 0 54231 NULL
++jbd2__journal_restart_54249 jbd2__journal_restart 0 54249 NULL
++xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
++ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
++wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
++audio_write_54261 audio_write 4 54261 &wusb_prf_54261
++mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
++kstrtou16_from_user_54274 kstrtou16_from_user 2 54274 NULL
++altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
++dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
++ttm_mem_global_alloc_54299 ttm_mem_global_alloc 0 54299 NULL
++sprintf_54306 sprintf 0 54306 NULL
++pn_raw_send_54330 pn_raw_send 2 54330 NULL
++br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
++__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
++_osd_realloc_seg_54352 _osd_realloc_seg 3 54352 NULL nohasharray
++__get_free_pages_54352 __get_free_pages 0 54352 &_osd_realloc_seg_54352
++tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
++read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
++vfs_readlink_54368 vfs_readlink 3 54368 NULL
++do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL
++ah_alloc_tmp_54378 ah_alloc_tmp 2-3 54378 NULL
++gart_unmap_page_54379 gart_unmap_page 3-2 54379 NULL
++sysfs_dir_llseek_54385 sysfs_dir_llseek 2 54385 NULL
++snd_pcm_oss_read2_54387 snd_pcm_oss_read2 3-0 54387 NULL
++iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
++add_packet_54433 add_packet 3 54433 NULL
++__btrfs_alloc_chunk_54445 __btrfs_alloc_chunk 0 54445 NULL
++do_chunk_alloc_54457 do_chunk_alloc 0 54457 NULL
++simple_strtoull_54493 simple_strtoull 0 54493 NULL
++cifs_idmap_key_instantiate_54503 cifs_idmap_key_instantiate 3 54503 NULL
++l2cap_create_basic_pdu_54508 l2cap_create_basic_pdu 3 54508 NULL
++btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
++cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
++rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
++viacam_read_54526 viacam_read 3 54526 NULL
++unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
++setsockopt_54539 setsockopt 5 54539 NULL
++lbs_lowsnr_write_54549 lbs_lowsnr_write 3 54549 NULL
++nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
++fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
++xfrm_polexpire_msgsize_54589 xfrm_polexpire_msgsize 0 54589 NULL
++fwSendNullPacket_54618 fwSendNullPacket 2 54618 NULL
++port_fops_write_54627 port_fops_write 3 54627 NULL
++setup_cluster_bitmap_54649 setup_cluster_bitmap 4 54649 NULL
++_regulator_enable_54655 _regulator_enable 0 54655 NULL
++dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
++bus_add_device_54665 bus_add_device 0 54665 NULL
++bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
++evm_read_key_54674 evm_read_key 3 54674 NULL
++addtgt_54703 addtgt 3 54703 NULL
++rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
++_add_sg_continuation_descriptor_54721 _add_sg_continuation_descriptor 3 54721 NULL
++ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
++kzalloc_54740 kzalloc 1 54740 NULL
++drm_mode_crtc_set_gamma_size_54742 drm_mode_crtc_set_gamma_size 2 54742 NULL
++wep_iv_read_54744 wep_iv_read 3 54744 NULL
++lpfc_idiag_pcicfg_write_54749 lpfc_idiag_pcicfg_write 3 54749 NULL
++xfs_rtallocate_extent_block_54791 xfs_rtallocate_extent_block 5 54791 NULL
++flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
++domain_init_54797 domain_init 2 54797 NULL
++ext3_find_goal_54801 ext3_find_goal 0 54801 NULL
++get_dev_size_54807 get_dev_size 0 54807 NULL
++nfsd_write_54809 nfsd_write 6 54809 NULL
++aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 NULL nohasharray
++crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 &aes_decrypt_fail_read_54815
++generic_perform_write_54832 generic_perform_write 3 54832 NULL
++write_rio_54837 write_rio 3 54837 NULL
++ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 NULL
++edac_mc_alloc_54846 edac_mc_alloc 1 54846 NULL
++scsi_add_host_54847 scsi_add_host 0 54847 NULL
++ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
++printer_read_54851 printer_read 3 54851 NULL
++assign_irq_vector_54852 assign_irq_vector 0 54852 NULL
++em28xx_isoc_dvb_max_packetsize_54854 em28xx_isoc_dvb_max_packetsize 0 54854 NULL
++alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
++broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
++prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
++tcf_csum_ipv6_tcp_54877 tcf_csum_ipv6_tcp 4 54877 NULL
++iscsi_pool_init_54913 iscsi_pool_init 4-2 54913 NULL nohasharray
++kobject_set_name_vargs_54913 kobject_set_name_vargs 0 54913 &iscsi_pool_init_54913
++btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
++add_port_54941 add_port 2 54941 NULL
++alauda_write_data_54967 alauda_write_data 3 54967 NULL
++c4_add_card_54968 c4_add_card 3 54968 NULL
++__proc_file_read_54978 __proc_file_read 3 54978 NULL
++ubi_change_vtbl_record_54979 ubi_change_vtbl_record 0 54979 NULL
++brcmf_sdcard_send_buf_54980 brcmf_sdcard_send_buf 6 54980 NULL
++_queue_data_54983 _queue_data 4 54983 NULL
++ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
++rds_ib_inc_copy_to_user_55007 rds_ib_inc_copy_to_user 3 55007 NULL
++cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
++ext4_ext_handle_uninitialized_extents_55059 ext4_ext_handle_uninitialized_extents 0-6 55059 NULL
++__netdev_alloc_skb_ip_align_55067 __netdev_alloc_skb_ip_align 2 55067 NULL
++apei_exec_run_55075 apei_exec_run 0 55075 NULL
++set_interface_55085 set_interface 0 55085 NULL
++snd_pcm_capture_hw_avail_55086 snd_pcm_capture_hw_avail 0 55086 NULL
++PropagateCalParamsFromFlashToMemory_55099 PropagateCalParamsFromFlashToMemory 0 55099 NULL
++rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
++kmalloc_large_55111 kmalloc_large 1 55111 NULL
++crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
++ocfs2_prepare_refcount_change_for_del_55137 ocfs2_prepare_refcount_change_for_del 0-3 55137 NULL nohasharray
++filldir_55137 filldir 3 55137 &ocfs2_prepare_refcount_change_for_del_55137
++validate_vid_hdr_55145 validate_vid_hdr 0 55145 NULL
++ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
++sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
++sched_feat_write_55202 sched_feat_write 3 55202 NULL
++ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL nohasharray
++isdn_net_ciscohdlck_alloc_skb_55209 isdn_net_ciscohdlck_alloc_skb 2 55209 &ht40allow_map_read_55209
++__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL
++do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
++sctp_abort_pkt_new_55218 sctp_abort_pkt_new 5 55218 NULL
++dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
++tipc_port_reject_sections_55229 tipc_port_reject_sections 5 55229 NULL
++ext4_ext_convert_to_initialized_55235 ext4_ext_convert_to_initialized 0 55235 NULL
++memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
++lbs_failcount_write_55276 lbs_failcount_write 3 55276 NULL
++rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
++gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
++wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
++vme_user_read_55338 vme_user_read 3 55338 NULL
++sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 NULL nohasharray
++__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 &sctp_datamsg_from_user_55342
++acpi_system_read_event_55362 acpi_system_read_event 3 55362 NULL
++__domain_mapping_55393 __domain_mapping 5 55393 NULL
++mm_to_dma_pfn_55394 mm_to_dma_pfn 0-1 55394 NULL
++iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
++alloc_skb_55439 alloc_skb 1 55439 NULL
++__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
++isdnhdlc_decode_55466 isdnhdlc_decode 0 55466 NULL
++cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
++snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
++i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
++ocfs2_rec_clusters_55501 ocfs2_rec_clusters 0 55501 NULL
++ext4_flex_bg_size_55502 ext4_flex_bg_size 0 55502 NULL
++cfpkt_pad_trail_55511 cfpkt_pad_trail 2 55511 NULL
++ea_get_55522 ea_get 3-0 55522 NULL
++set_msr_interception_55538 set_msr_interception 2 55538 NULL
++_regulator_is_enabled_55550 _regulator_is_enabled 0 55550 NULL
++add_partition_55588 add_partition 2 55588 NULL
++kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL
++macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
++selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
++snd_pcm_hw_param_last_55624 snd_pcm_hw_param_last 0 55624 NULL
++reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 NULL nohasharray
++pktgen_if_write_55628 pktgen_if_write 3 55628 &reiserfs_xattr_get_55628
++mlx4_buddy_alloc_55647 mlx4_buddy_alloc 2 55647 NULL
++xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
++read_oldmem_55658 read_oldmem 3 55658 NULL
++lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
++get_info_55681 get_info 3 55681 NULL
++iwl_dbgfs_plcp_delta_write_55682 iwl_dbgfs_plcp_delta_write 3 55682 NULL
++pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
++ocfs2_lock_refcount_tree_55719 ocfs2_lock_refcount_tree 0 55719 NULL
++prepare_reply_55734 prepare_reply 4 55734 NULL
++__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2-3 55738 NULL
++strlen_55778 strlen 0 55778 NULL nohasharray
++is_idx_node_in_tnc_55778 is_idx_node_in_tnc 0 55778 &strlen_55778
++req_bio_endio_55786 req_bio_endio 3 55786 NULL
++rtnl_vfinfo_size_55794 rtnl_vfinfo_size 0 55794 NULL
++uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
++iwl_legacy_dbgfs_sensitivity_read_55816 iwl_legacy_dbgfs_sensitivity_read 3 55816 NULL
++sb16_copy_from_user_55836 sb16_copy_from_user 7-6-10 55836 NULL
++xfs_da_buf_make_55845 xfs_da_buf_make 1 55845 NULL
++ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
++hcd_alloc_coherent_55862 hcd_alloc_coherent 5-0 55862 NULL
++shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
++__check_block_validity_55869 __check_block_validity 0 55869 NULL
++pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
++snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
++kvm_write_guest_virt_system_55944 kvm_write_guest_virt_system 4-2 55944 NULL
++sel_read_policy_55947 sel_read_policy 3 55947 NULL
++handle_response_55951 handle_response 5 55951 NULL
++simple_read_from_buffer_55957 simple_read_from_buffer 5-2 55957 NULL
++acct_stack_growth_56023 acct_stack_growth 0 56023 NULL
++dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
++pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
++em28xx_write_reg_bits_56107 em28xx_write_reg_bits 0 56107 NULL
++sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
++write_file_frameerrors_56145 write_file_frameerrors 3 56145 NULL
++ath6kl_wmi_bssinfo_event_rx_56146 ath6kl_wmi_bssinfo_event_rx 3 56146 NULL
++rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
++create_irq_nr_56180 create_irq_nr 1-0 56180 NULL
++skb_headroom_56200 skb_headroom 0 56200 NULL
++ocfs2_journal_access_gd_56209 ocfs2_journal_access_gd 0 56209 NULL
++ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
++cp210x_get_config_56229 cp210x_get_config 4 56229 NULL
++do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
++fd_copyin_56247 fd_copyin 3 56247 NULL
++p9pdu_vreadf_56271 p9pdu_vreadf 0 56271 NULL
++dvb_aplay_56296 dvb_aplay 3 56296 NULL
++btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
++speakup_file_write_56310 speakup_file_write 3 56310 NULL
++journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
++snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
++ipv6_recv_error_56347 ipv6_recv_error 3 56347 NULL
++vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
++iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4-0 56368 NULL
++dev_read_56369 dev_read 3 56369 NULL
++snd_pcm_common_ioctl1_56382 snd_pcm_common_ioctl1 0 56382 NULL
++ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
++do_get_write_access_56410 do_get_write_access 0 56410 NULL
++store_msg_56417 store_msg 3 56417 NULL
++pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
++fl_create_56435 fl_create 5 56435 NULL
++gnttab_map_56439 gnttab_map 2 56439 NULL
++ata_scsi_add_hosts_56448 ata_scsi_add_hosts 0 56448 NULL
++cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2-4 56453 NULL
++osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
++lbs_rdmac_write_56471 lbs_rdmac_write 3 56471 NULL
++calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
++crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
++cfg80211_connect_result_56515 cfg80211_connect_result 6-4 56515 NULL
++iwl_legacy_dbgfs_rx_queue_read_56533 iwl_legacy_dbgfs_rx_queue_read 3 56533 NULL
++l1oip_socket_recv_56537 l1oip_socket_recv 6 56537 NULL
++ip_options_get_56538 ip_options_get 4 56538 NULL
++tcp_cwnd_test_56547 tcp_cwnd_test 0 56547 NULL
++ocfs2_change_extent_flag_56549 ocfs2_change_extent_flag 5 56549 NULL
++alloc_apertures_56561 alloc_apertures 1 56561 NULL
++rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
++portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
++event_filter_write_56609 event_filter_write 3 56609 NULL
++gather_array_56641 gather_array 3 56641 NULL
++dlm_dir_lookup_56662 dlm_dir_lookup 4 56662 NULL
++tg3_nvram_write_block_56666 tg3_nvram_write_block 3 56666 NULL
++btrfs_cow_block_56678 btrfs_cow_block 0 56678 NULL
++snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL
++dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3-0 56702 NULL
++sta_flags_read_56710 sta_flags_read 3 56710 NULL
++ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
++__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
++__copy_from_user_ll_56738 __copy_from_user_ll 0-3 56738 NULL
++pcpu_populate_chunk_56741 pcpu_populate_chunk 3-2 56741 NULL
++drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
++mfd_add_devices_56753 mfd_add_devices 4 56753 NULL
++alloc_iommu_56778 alloc_iommu 2-3-0 56778 NULL
++__carl9170_rx_56784 __carl9170_rx 3 56784 NULL
++ttm_alloc_new_pages_56792 ttm_alloc_new_pages 5 56792 NULL
++ext4_ext_rm_idx_56827 ext4_ext_rm_idx 0 56827 NULL
++snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4-0 56847 NULL
++ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
++pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
++__bitmap_clear_bits_56912 __bitmap_clear_bits 3 56912 NULL
++__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
++journal_init_revoke_56933 journal_init_revoke 2 56933 NULL nohasharray
++CopyBufferToControlPacket_56933 CopyBufferToControlPacket 0 56933 &journal_init_revoke_56933
++diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
++vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
++btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
++snd_dma_alloc_pages_fallback_57029 snd_dma_alloc_pages_fallback 3 57029 NULL
++skb_network_offset_57043 skb_network_offset 0 57043 NULL nohasharray
++ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 &skb_network_offset_57043
++bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
++cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
++sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
++pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
++sis190_try_rx_copy_57069 sis190_try_rx_copy 3 57069 NULL
++thin_status_57084 thin_status 4 57084 NULL
++tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
++altera_get_note_57099 altera_get_note 6 57099 NULL
++snd_pcm_hw_params_old_user_57108 snd_pcm_hw_params_old_user 0 57108 NULL
++crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
++sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
++cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
++nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 NULL nohasharray
++rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 &nl80211_send_deauth_57136 nohasharray
++ima_show_htable_value_57136 ima_show_htable_value 2 57136 &rds_ib_sub_signaled_57136
++snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
++stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
++extent_from_logical_57179 extent_from_logical 0 57179 NULL nohasharray
++rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 &extent_from_logical_57179
++sys_poll_57190 sys_poll 2 57190 NULL
++ocfs2_claim_metadata_57192 ocfs2_claim_metadata 0 57192 NULL
++ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
++oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
++lbs_sleepparams_write_57283 lbs_sleepparams_write 3 57283 NULL
++pstore_file_read_57288 pstore_file_read 3 57288 NULL
++snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
++ath6kl_buf_alloc_57304 ath6kl_buf_alloc 1 57304 NULL
++ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
++write_file_regval_57313 write_file_regval 3 57313 NULL
++ocfs2_xattr_shrink_size_57328 ocfs2_xattr_shrink_size 3 57328 NULL
++usblp_read_57342 usblp_read 3 57342 NULL
++print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL
++read_file_blob_57406 read_file_blob 3 57406 NULL
++enclosure_register_57412 enclosure_register 3 57412 NULL
++compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL nohasharray
++alloc_ftrace_hash_57431 alloc_ftrace_hash 1 57431 &compat_keyctl_instantiate_key_iov_57431
++copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
++sys_pselect6_57449 sys_pselect6 1 57449 NULL
++ReadReg_57453 ReadReg 0 57453 NULL
++__roundup_pow_of_two_57461 __roundup_pow_of_two 0-1 57461 NULL
++crypto_tfm_alg_blocksize_57463 crypto_tfm_alg_blocksize 0 57463 NULL nohasharray
++send_midi_async_57463 send_midi_async 3 57463 &crypto_tfm_alg_blocksize_57463
++iwl4965_statistics_flag_57466 iwl4965_statistics_flag 3-0 57466 NULL nohasharray
++sisusb_clear_vram_57466 sisusb_clear_vram 2-3 57466 &iwl4965_statistics_flag_57466
++ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL
++ocfs2_write_cluster_57483 ocfs2_write_cluster 8-9-2 57483 NULL
++nl80211_send_mgmt_57497 nl80211_send_mgmt 6 57497 NULL
++skb_headlen_57501 skb_headlen 0 57501 NULL
++copy_in_user_57502 copy_in_user 3 57502 NULL
++ks8842_read32_57505 ks8842_read32 0 57505 NULL nohasharray
++ckhdid_printf_57505 ckhdid_printf 2 57505 &ks8842_read32_57505
++init_tag_map_57515 init_tag_map 3 57515 NULL
++cmm_read_57520 cmm_read 3 57520 NULL
++inode_permission_57531 inode_permission 0 57531 NULL
++ReadHDLCPnP_57559 ReadHDLCPnP 0 57559 NULL nohasharray
++ext4_group_first_block_no_57559 ext4_group_first_block_no 0-2 57559 &ReadHDLCPnP_57559
++snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
++get_bridge_ifindices_57579 get_bridge_ifindices 0 57579 NULL
++iwl4965_rs_sta_dbgfs_scale_table_write_57595 iwl4965_rs_sta_dbgfs_scale_table_write 3 57595 NULL
++sk_stream_alloc_skb_57622 sk_stream_alloc_skb 2 57622 NULL
++osdmap_set_max_osd_57630 osdmap_set_max_osd 2 57630 NULL nohasharray
++sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 &osdmap_set_max_osd_57630
++mem_read_57631 mem_read 3 57631 NULL
++pci_enable_msi_block_57632 pci_enable_msi_block 0 57632 NULL
++msi_compose_msg_57637 msi_compose_msg 0 57637 NULL
++sys_mq_timedsend_57661 sys_mq_timedsend 3 57661 NULL
++r3964_write_57662 r3964_write 4 57662 NULL
++__lgwrite_57669 __lgwrite 4 57669 NULL
++ieee80211_MFIE_rate_len_57692 ieee80211_MFIE_rate_len 0 57692 NULL
++i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
++snd_interval_value_57713 snd_interval_value 0 57713 NULL
++calgary_alloc_coherent_57714 calgary_alloc_coherent 2 57714 NULL
++aa_matching_read_57720 aa_matching_read 3 57720 NULL
++vm_find_vqs_57729 vm_find_vqs 0 57729 NULL
++compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 NULL nohasharray
++pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 &compat_sys_set_mempolicy_57742
++ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL
++read_block_for_search_57781 read_block_for_search 0 57781 NULL
++apei_exec_collect_resources_57788 apei_exec_collect_resources 0 57788 NULL
++ld2_57794 ld2 0 57794 NULL
++ivtv_read_57796 ivtv_read 3 57796 NULL
++bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
++copy_to_user_57835 copy_to_user 3-0 57835 NULL
++flash_read_57843 flash_read 3 57843 NULL
++tt_response_fill_table_57902 tt_response_fill_table 1 57902 NULL
++xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
++emi26_writememory_57908 emi26_writememory 4 57908 NULL
++atomic_add_return_unchecked_57910 atomic_add_return_unchecked 0-1 57910 NULL nohasharray
++iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 &atomic_add_return_unchecked_57910
++__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
++sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
++xfs_mru_cache_create_57943 xfs_mru_cache_create 3 57943 NULL
++rx_57944 rx 4 57944 NULL
++key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
++ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
++ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
++i915_cache_sharing_write_57961 i915_cache_sharing_write 3 57961 NULL
++hfc_empty_fifo_57972 hfc_empty_fifo 2 57972 NULL
++stripe_status_57985 stripe_status 4 57985 NULL
++rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
++regcache_rbtree_insert_to_block_58009 regcache_rbtree_insert_to_block 5 58009 NULL
++iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
++io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
++mce_async_out_58056 mce_async_out 3 58056 NULL
++ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
++cm4040_write_58079 cm4040_write 3 58079 NULL nohasharray
++usb_stor_acquire_resources_58079 usb_stor_acquire_resources 0 58079 &cm4040_write_58079
++rfcomm_wmalloc_58090 rfcomm_wmalloc 2 58090 NULL
++i915_add_request_58096 i915_add_request 0 58096 NULL
++savemem_58129 savemem 3 58129 NULL
++ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
++slhc_init_58135 slhc_init 2-1 58135 &ipv6_flowlabel_opt_58135
++ocfs2_reserve_clusters_58164 ocfs2_reserve_clusters 0 58164 NULL
++garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
++asix_write_cmd_58192 asix_write_cmd 5 58192 NULL
++ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
++nci_send_cmd_58206 nci_send_cmd 3 58206 NULL
++sysfs_add_file_mode_58222 sysfs_add_file_mode 0 58222 NULL
++read_file_debug_58256 read_file_debug 3 58256 NULL
++cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
++profile_load_58267 profile_load 3 58267 NULL
++kstrtos8_from_user_58268 kstrtos8_from_user 2 58268 NULL
++acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
++iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
++my_skb_head_push_58297 my_skb_head_push 2 58297 NULL
++ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
++next_pidmap_58347 next_pidmap 2-0 58347 NULL
++vmalloc_to_sg_58354 vmalloc_to_sg 2 58354 NULL
++sctp_make_init_58401 sctp_make_init 4 58401 NULL
++idetape_pad_zeros_58406 idetape_pad_zeros 2 58406 NULL
++i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
++iscsi_offload_mesg_58425 iscsi_offload_mesg 5 58425 NULL
++__iio_add_chan_devattr_58451 __iio_add_chan_devattr 0 58451 NULL
++capabilities_read_58457 capabilities_read 3 58457 NULL
++lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
++compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
++snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
++snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
++rndis_add_response_58544 rndis_add_response 2 58544 NULL
++efx_tsoh_heap_alloc_58545 efx_tsoh_heap_alloc 2 58545 NULL
++wrap_max_58548 wrap_max 0-1-2 58548 NULL
++gen_pool_alloc_58558 gen_pool_alloc 2 58558 NULL
++wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
++scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
++get_rhf_errstring_58582 get_rhf_errstring 3 58582 NULL
++ea_read_inline_58589 ea_read_inline 0 58589 NULL
++xip_file_read_58592 xip_file_read 3 58592 NULL
++ecryptfs_write_end_58594 ecryptfs_write_end 5-3 58594 NULL
++ixj_read_58615 ixj_read 3 58615 NULL
++skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
++filemap_fdatawrite_range_58630 filemap_fdatawrite_range 0 58630 NULL
++vb2_qbuf_58631 vb2_qbuf 0 58631 NULL
++module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL
++ocfs2_block_to_cluster_start_58653 ocfs2_block_to_cluster_start 2 58653 NULL
++iwl_dbgfs_rx_handlers_write_58655 iwl_dbgfs_rx_handlers_write 3 58655 NULL
++uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
++scsi_setup_command_freelist_58703 scsi_setup_command_freelist 0 58703 NULL
++vx_send_msg_58711 vx_send_msg 0 58711 NULL
++csum_exist_in_range_58730 csum_exist_in_range 2-3 58730 NULL
++frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
++ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
++agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
++__do_config_autodelink_58763 __do_config_autodelink 3 58763 NULL
++regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
++raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
++ep_read_58813 ep_read 3 58813 NULL
++command_write_58841 command_write 3 58841 NULL
++ocfs2_truncate_log_append_58850 ocfs2_truncate_log_append 0-3 58850 NULL
++iwl_dbgfs_traffic_log_read_58870 iwl_dbgfs_traffic_log_read 3 58870 NULL
++gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
++print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
++st5481_isoc_flatten_58952 st5481_isoc_flatten 0 58952 NULL
++netpoll_send_udp_58955 netpoll_send_udp 3 58955 NULL
++wait_table_hash_nr_entries_58962 wait_table_hash_nr_entries 0 58962 NULL
++crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
++max3107_handlerx_58978 max3107_handlerx 2 58978 NULL
++handle_rx_packet_58993 handle_rx_packet 3 58993 NULL
++ep_write_59008 ep_write 3 59008 NULL
++lpfc_idiag_baracc_write_59014 lpfc_idiag_baracc_write 3 59014 NULL
++receive_server_sync_packet_59021 receive_server_sync_packet 3 59021 NULL
++selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
++crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
++i8042_enable_kbd_port_59049 i8042_enable_kbd_port 0 59049 NULL
++mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
++scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
++__iio_add_event_config_attrs_59136 __iio_add_event_config_attrs 0 59136 NULL
++print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray
++framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
++ocfs2_claim_local_alloc_bits_59147 ocfs2_claim_local_alloc_bits 0 59147 NULL
++radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
++pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
++mthca_create_eq_59157 mthca_create_eq 2 59157 NULL
++setup_window_59178 setup_window 4-2-5-7 59178 NULL
++ocfs2_move_extent_59187 ocfs2_move_extent 2-5-3 59187 NULL
++InitLedSettings_59192 InitLedSettings 0 59192 NULL
++validate_exec_list_59204 validate_exec_list 0 59204 NULL
++xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
++fast_rx_path_59214 fast_rx_path 3 59214 NULL
++check_mapped_selector_name_59216 check_mapped_selector_name 5 59216 NULL nohasharray
++inftl_partscan_59216 inftl_partscan 0 59216 &check_mapped_selector_name_59216
++dt3155_read_59226 dt3155_read 3 59226 NULL
++tcp_try_rmem_schedule_59231 tcp_try_rmem_schedule 2 59231 NULL
++tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
++solo_v4l2_read_59247 solo_v4l2_read 3 59247 NULL
++nla_len_59258 nla_len 0 59258 NULL
++__push_leaf_right_59302 __push_leaf_right 0 59302 NULL
++btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
++fd_copyout_59323 fd_copyout 3 59323 NULL
++read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
++xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
++__map_request_59350 __map_request 0 59350 NULL
++xfs_dir2_sf_entsize_59366 xfs_dir2_sf_entsize 0-2 59366 NULL
++pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
++journal_init_dev_59384 journal_init_dev 5 59384 NULL
++fc_frame_alloc_fill_59394 fc_frame_alloc_fill 2 59394 NULL
++pci_ctrl_read_59424 pci_ctrl_read 0 59424 NULL
++vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
++snd_pcm_tstamp_59431 snd_pcm_tstamp 0 59431 NULL
++squashfs_read_data_59440 squashfs_read_data 6 59440 NULL
++descriptor_loc_59446 descriptor_loc 3 59446 NULL
++shrink_tnc_trees_59481 shrink_tnc_trees 0 59481 NULL
++ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
++rds_pin_pages_59507 rds_pin_pages 0 59507 NULL
++tunables_write_59563 tunables_write 3 59563 NULL
++__copy_from_user_ll_nozero_59571 __copy_from_user_ll_nozero 0-3 59571 NULL
++write_pbl_59583 write_pbl 4 59583 NULL
++memdup_user_59590 memdup_user 2 59590 NULL
++fcoe_ctlr_vn_send_59607 fcoe_ctlr_vn_send 4 59607 NULL
++mtrr_write_59622 mtrr_write 3 59622 NULL
++ocfs2_adjust_rightmost_branch_59623 ocfs2_adjust_rightmost_branch 0 59623 NULL
++ip_vs_icmp_xmit_59624 ip_vs_icmp_xmit 4 59624 NULL
++find_first_zero_bit_59636 find_first_zero_bit 0-2 59636 NULL
++dn_fib_nlmsg_size_59643 dn_fib_nlmsg_size 0 59643 NULL
++ubifs_setxattr_59650 ubifs_setxattr 4 59650 NULL nohasharray
++hidraw_read_59650 hidraw_read 3 59650 &ubifs_setxattr_59650
++v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
++paravirt_sched_clock_59660 paravirt_sched_clock 0 59660 NULL
++tcp_skb_pcount_59664 tcp_skb_pcount 0 59664 NULL
++alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
++ieee80211_mgmt_tx_59699 ieee80211_mgmt_tx 9 59699 NULL
++mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
++ioperm_get_59701 ioperm_get 4-3 59701 NULL
++snd_pcm_info_user_59711 snd_pcm_info_user 0 59711 NULL
++prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
++nfs_file_splice_read_59735 nfs_file_splice_read 4 59735 NULL
++sock_rmalloc_59740 sock_rmalloc 2 59740 NULL nohasharray
++ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 &sock_rmalloc_59740
++qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
++strnlen_59746 strnlen 0 59746 NULL nohasharray
++fuse_file_llseek_59746 fuse_file_llseek 2 59746 &strnlen_59746
++ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
++long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
++venus_remove_59781 venus_remove 4 59781 NULL
++xlog_do_recover_59789 xlog_do_recover 3 59789 NULL
++ipw_write_59807 ipw_write 3 59807 NULL
++rtllib_wx_set_gen_ie_59808 rtllib_wx_set_gen_ie 3 59808 NULL
++ubi_dbg_check_all_ff_59810 ubi_dbg_check_all_ff 0 59810 NULL
++scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
++ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
++gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
++fs64_to_cpu_59845 fs64_to_cpu 0 59845 NULL
++tun_put_user_59849 tun_put_user 4 59849 NULL
++format_array_59854 format_array 0 59854 NULL
++pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
++shmem_zero_setup_59885 shmem_zero_setup 0 59885 NULL
++l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
++ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
++ocfs2_extend_rotate_transaction_59894 ocfs2_extend_rotate_transaction 0 59894 NULL
++swiotlb_map_page_59909 swiotlb_map_page 3 59909 NULL
++ocfs2_expand_inline_ref_root_59945 ocfs2_expand_inline_ref_root 0 59945 NULL
++dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 NULL
++__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
++osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
++ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
++rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
++mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
++osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
++xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
++sys_sched_getaffinity_60033 sys_sched_getaffinity 2 60033 NULL
++bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
++do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
++vcs_size_60050 vcs_size 0 60050 NULL nohasharray
++pin_2_irq_60050 pin_2_irq 0-3 60050 &vcs_size_60050
++load_module_60056 load_module 2 60056 NULL nohasharray
++gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 &load_module_60056
++compat_writev_60063 compat_writev 3 60063 NULL
++c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
++mp_register_gsi_60079 mp_register_gsi 2 60079 NULL
++rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
++ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
++ld_usb_read_60156 ld_usb_read 3 60156 NULL
++jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
++init_state_60165 init_state 2 60165 NULL
++sg_build_sgat_60179 sg_build_sgat 3 60179 NULL nohasharray
++jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 &sg_build_sgat_60179
++ib_send_cm_mra_60202 ib_send_cm_mra 4 60202 NULL nohasharray
++qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 &ib_send_cm_mra_60202
++store_iwmct_log_level_60209 store_iwmct_log_level 4 60209 NULL
++pvclock_scale_delta_60231 pvclock_scale_delta 0 60231 NULL
++compat_sys_fcntl64_60256 compat_sys_fcntl64 3 60256 NULL
++printer_write_60276 printer_write 3 60276 NULL
++__pskb_pull_tail_60287 __pskb_pull_tail 2 60287 NULL
++do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
++ext3_dir_llseek_60298 ext3_dir_llseek 2 60298 NULL
++getDataLength_60301 getDataLength 0 60301 NULL
++usb_alphatrack_write_60341 usb_alphatrack_write 3 60341 NULL
++__kfifo_from_user_r_60345 __kfifo_from_user_r 5-3 60345 NULL
++brcmf_alloc_wdev_60347 brcmf_alloc_wdev 1 60347 NULL
++rh_call_control_60349 rh_call_control 0 60349 NULL
++dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
++mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
++ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
++tveeprom_read_60397 tveeprom_read 3 60397 NULL
++driver_names_read_60399 driver_names_read 3 60399 NULL
++simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
++excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
++tstats_write_60432 tstats_write 3 60432 NULL nohasharray
++kmalloc_60432 kmalloc 1 60432 &tstats_write_60432
++tipc_buf_acquire_60437 tipc_buf_acquire 1 60437 NULL
++rx_data_60442 rx_data 4 60442 NULL
++tcf_csum_ipv4_igmp_60446 tcf_csum_ipv4_igmp 3 60446 NULL
++iwm_ntf_rx_packet_60452 iwm_ntf_rx_packet 3 60452 NULL
++crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
++ath_tx_init_60515 ath_tx_init 2 60515 NULL
++ubi_wl_get_peb_60525 ubi_wl_get_peb 0 60525 NULL
++hysdn_sched_rx_60533 hysdn_sched_rx 3 60533 NULL
++v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
++tracing_entries_write_60563 tracing_entries_write 3 60563 NULL
++skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
++wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
++usb_control_msg_60624 usb_control_msg 0 60624 NULL
++acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
++free_dind_blocks_60635 free_dind_blocks 0 60635 NULL
++if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
++ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
++init_data_container_60709 init_data_container 1 60709 NULL
++vga_rcrt_60731 vga_rcrt 0 60731 NULL
++add_to_list_60744 add_to_list 0 60744 NULL
++snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
++sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
++opticon_write_60775 opticon_write 4 60775 NULL
++acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
++snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
++pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
++pool_status_60861 pool_status 4 60861 NULL
++ieee80211_send_auth_60865 ieee80211_send_auth 5 60865 NULL
++alloc_irq_from_60868 alloc_irq_from 1-0 60868 NULL
++generic_writepages_60871 generic_writepages 0 60871 NULL
++ubifs_read_one_lp_60882 ubifs_read_one_lp 0 60882 NULL
++mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
++xen_clocksource_read_60918 xen_clocksource_read 0 60918 NULL
++set_powered_60938 set_powered 4 60938 NULL
++xfs_rtallocate_extent_size_60939 xfs_rtallocate_extent_size 4 60939 NULL
++pti_char_write_60960 pti_char_write 3 60960 NULL
++mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
++blkio_get_key_name_61014 blkio_get_key_name 4 61014 NULL
++ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
++lpfc_idiag_queacc_write_61043 lpfc_idiag_queacc_write 3 61043 NULL
++symtab_init_61050 symtab_init 2 61050 NULL
++fuse_send_write_61053 fuse_send_write 0 61053 NULL
++snd_pcm_pause_61054 snd_pcm_pause 0 61054 NULL
++bitmap_scnlistprintf_61062 bitmap_scnlistprintf 2-4-0 61062 NULL
++ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
++snd_pcm_update_hw_ptr0_61084 snd_pcm_update_hw_ptr0 0 61084 NULL
++get_derived_key_61100 get_derived_key 4 61100 NULL
++alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
++p80211_headerlen_61119 p80211_headerlen 0 61119 NULL nohasharray
++__probe_kernel_read_61119 __probe_kernel_read 3 61119 &p80211_headerlen_61119
++proto_ports_offset_61125 proto_ports_offset 0 61125 NULL
++vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
++afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
++event_oom_late_read_61175 event_oom_late_read 3 61175 NULL
++sys_lsetxattr_61177 sys_lsetxattr 4 61177 NULL
++cfpkt_append_61206 cfpkt_append 3 61206 NULL
++arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
++pn544_write_61215 pn544_write 3 61215 NULL
++smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
++__verify_planes_array_61249 __verify_planes_array 0 61249 NULL
++find_get_pages_tag_61270 find_get_pages_tag 0 61270 NULL
++kick_a_thread_61273 kick_a_thread 0 61273 NULL
++vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL
++sys_add_key_61288 sys_add_key 4 61288 NULL
++ext4_issue_discard_61305 ext4_issue_discard 2 61305 NULL
++xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
++__fls_61340 __fls 0-1 61340 NULL nohasharray
++st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 &__fls_61340
++set_params_61373 set_params 0 61373 NULL
++change_xattr_61390 change_xattr 5 61390 NULL
++system_enable_write_61396 system_enable_write 3 61396 NULL
++pm860x_bulk_read_61415 pm860x_bulk_read 3 61415 NULL
++i915_emit_box_61436 i915_emit_box 0 61436 NULL
++dma_ops_area_alloc_61440 dma_ops_area_alloc 3-4-5-0 61440 NULL
++unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
++snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
++btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
++ocfs2_get_refcount_rec_61514 ocfs2_get_refcount_rec 0 61514 NULL
++clone_bio_61526 clone_bio 5 61526 NULL nohasharray
++erst_errno_61526 erst_errno 0 61526 &clone_bio_61526
++ntfs_attr_lookup_61539 ntfs_attr_lookup 0 61539 NULL
++trace_options_core_write_61551 trace_options_core_write 3 61551 NULL
++o2hb_pop_count_61553 o2hb_pop_count 2 61553 NULL
++dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
++rbd_do_request_61561 rbd_do_request 6-7 61561 NULL
++parport_pc_fifo_write_block_dma_61568 parport_pc_fifo_write_block_dma 3 61568 NULL
++fan_proc_write_61569 fan_proc_write 3 61569 NULL
++ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
++mip_minify_61584 mip_minify 2-1-0 61584 NULL
++seq_open_private_61589 seq_open_private 3 61589 NULL
++netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
++cx2341x_handler_init_61601 cx2341x_handler_init 2 61601 NULL
++configfs_write_file_61621 configfs_write_file 3 61621 NULL
++ieee80211_rx_bss_info_61630 ieee80211_rx_bss_info 3 61630 NULL
++i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
++snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
++resize_stripes_61650 resize_stripes 2 61650 NULL
++ttm_page_pool_free_61661 ttm_page_pool_free 2-0 61661 NULL
++insert_one_name_61668 insert_one_name 7 61668 NULL
++snd_pcm_playback_avail_61671 snd_pcm_playback_avail 0 61671 NULL
++qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
++lock_loop_61681 lock_loop 1 61681 NULL
++filter_read_61692 filter_read 3 61692 NULL
++iov_length_61716 iov_length 0 61716 NULL
++fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
++read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
++read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
++gfs2_meta_wait_61773 gfs2_meta_wait 0 61773 NULL
++num_counter_active_61789 num_counter_active 0 61789 NULL
++mls_compute_context_len_61812 mls_compute_context_len 0 61812 NULL
++btrfs_file_llseek_61838 btrfs_file_llseek 2 61838 NULL
++bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
++btrfs_bitmap_cluster_61854 btrfs_bitmap_cluster 4 61854 NULL
++evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
++get_fw_name_61874 get_fw_name 3 61874 NULL
++ieee80211_rtl_auth_challenge_61897 ieee80211_rtl_auth_challenge 3 61897 NULL
++ax25_addr_size_61899 ax25_addr_size 0 61899 NULL nohasharray
++cxgb4_pktgl_to_skb_61899 cxgb4_pktgl_to_skb 2 61899 &ax25_addr_size_61899
++roundup_ring_size_61901 roundup_ring_size 1 61901 NULL
++clear_refs_write_61904 clear_refs_write 3 61904 NULL
++au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL
++sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
++ocfs2_reserve_new_metadata_blocks_61926 ocfs2_reserve_new_metadata_blocks 0 61926 NULL
++send_bulk_static_data_61932 send_bulk_static_data 3 61932 NULL
++cluster_pages_for_defrag_61956 cluster_pages_for_defrag 0 61956 NULL
++squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
++mlx4_alloc_mtt_range_61966 mlx4_alloc_mtt_range 2 61966 NULL
++ocfs2_quota_write_61972 ocfs2_quota_write 5-4 61972 NULL
++fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
++cow_file_range_61979 cow_file_range 3 61979 NULL
++ext4_da_get_block_prep_61987 ext4_da_get_block_prep 2 61987 NULL
++module_alloc_exec_61991 module_alloc_exec 1 61991 NULL
++virtnet_send_command_61993 virtnet_send_command 5-6 61993 NULL
++dequeue_event_62000 dequeue_event 3 62000 NULL
++xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
++jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
++pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
++proc_fdinfo_read_62043 proc_fdinfo_read 3 62043 NULL
++ppp_tx_cp_62044 ppp_tx_cp 5 62044 NULL
++sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
++do_pselect_62061 do_pselect 1 62061 NULL
++pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
++get_domain_for_dev_62099 get_domain_for_dev 2 62099 NULL
++jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
++ip_recv_error_62117 ip_recv_error 3 62117 NULL
++generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL
++llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
++qib_diag_write_62133 qib_diag_write 3 62133 NULL nohasharray
++kobject_add_varg_62133 kobject_add_varg 0 62133 &qib_diag_write_62133
++ql_status_62135 ql_status 5 62135 NULL nohasharray
++device_add_attrs_62135 device_add_attrs 0 62135 &ql_status_62135
++video_usercopy_62151 video_usercopy 2 62151 NULL
++wrmWithLock_62164 wrmWithLock 0 62164 NULL
++prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
++alloc_upcall_62186 alloc_upcall 2 62186 NULL
++global_page_state_62202 global_page_state 0 62202 NULL
++btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
++sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
++check_unicast_packet_62217 check_unicast_packet 2 62217 NULL
++hash_new_62224 hash_new 1 62224 NULL
++nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
++send_control_msg_62261 send_control_msg 5 62261 NULL
++ocfs2_find_victim_alloc_group_62306 ocfs2_find_victim_alloc_group 0 62306 NULL
++subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
++udf_sb_alloc_partition_maps_62313 udf_sb_alloc_partition_maps 2 62313 NULL
++hfcpci_empty_bfifo_62323 hfcpci_empty_bfifo 4 62323 NULL
++Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
++flash_write_62354 flash_write 3 62354 NULL
++xfpregs_set_62363 xfpregs_set 4 62363 NULL
++altera_irscan_62396 altera_irscan 2 62396 NULL
++get_fn_size_62431 get_fn_size 0 62431 NULL
++udplite_manip_pkt_62433 udplite_manip_pkt 2 62433 NULL
++netdev_alloc_skb_62437 netdev_alloc_skb 2 62437 NULL
++e1000_check_copybreak_62448 e1000_check_copybreak 3 62448 NULL
++ocfs2_path_bh_journal_access_62504 ocfs2_path_bh_journal_access 0 62504 NULL
++count_open_files_62524 count_open_files 0 62524 NULL nohasharray
++pep_sendmsg_62524 pep_sendmsg 4 62524 &count_open_files_62524
++store_pwm1_62529 store_pwm1 4 62529 NULL
++test_iso_queue_62534 test_iso_queue 5 62534 NULL
++debugfs_read_62535 debugfs_read 3 62535 NULL
++sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
++qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
++xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
++packet_alloc_skb_62602 packet_alloc_skb 2-5-4 62602 NULL
++prism2_send_mgmt_62605 prism2_send_mgmt 4 62605 NULL nohasharray
++nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 &prism2_send_mgmt_62605
++iommu_area_alloc_62619 iommu_area_alloc 2-3-4-7-0 62619 NULL
++iwl_dbgfs_force_reset_read_62628 iwl_dbgfs_force_reset_read 3 62628 NULL
++lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
++tt_changes_fill_buffer_62649 tt_changes_fill_buffer 3 62649 NULL
++ima_file_mmap_62663 ima_file_mmap 0 62663 NULL
++write_62671 write 3 62671 NULL
++printer_req_alloc_62687 printer_req_alloc 2 62687 NULL nohasharray
++iwl_dbgfs_rx_statistics_read_62687 iwl_dbgfs_rx_statistics_read 3 62687 &printer_req_alloc_62687
++ext4_ind_map_blocks_62690 ext4_ind_map_blocks 0 62690 NULL
++adxl34x_i2c_read_block_62691 adxl34x_i2c_read_block 3 62691 NULL
++bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
++rdm_62719 rdm 0 62719 NULL
++key_replays_read_62746 key_replays_read 3 62746 NULL
++mwifiex_rdeeprom_write_62754 mwifiex_rdeeprom_write 3 62754 NULL
++ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
++scrub_chunk_62771 scrub_chunk 4 62771 NULL nohasharray
++page_key_alloc_62771 page_key_alloc 0 62771 &scrub_chunk_62771
++tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
++__generic_file_splice_read_62834 __generic_file_splice_read 4 62834 NULL
++BeceemEEPROMBulkRead_62835 BeceemEEPROMBulkRead 0 62835 NULL
++__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0-1 62836 NULL
++xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
++rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
++genlmsg_msg_size_62845 genlmsg_msg_size 0-1 62845 NULL
++read_nic_io_dword_62859 read_nic_io_dword 0 62859 NULL
++hpi_read_word_62862 hpi_read_word 0 62862 NULL
++nfs_writedata_alloc_62868 nfs_writedata_alloc 1 62868 NULL
++aoechr_write_62883 aoechr_write 3 62883 NULL
++resize_info_buffer_62889 resize_info_buffer 2 62889 NULL
++if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
++ocfs2_validate_gd_parent_62905 ocfs2_validate_gd_parent 0 62905 NULL
++mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
++getdqbuf_62908 getdqbuf 1 62908 NULL
++agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
++get_skb_63008 get_skb 2 63008 NULL
++kstrtoull_from_user_63026 kstrtoull_from_user 2 63026 NULL
++PTR_ERR_63033 PTR_ERR 0 63033 NULL nohasharray
++__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 &PTR_ERR_63033
++scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
++unlink1_63059 unlink1 3 63059 NULL
++ocfs2_decrease_refcount_63078 ocfs2_decrease_refcount 0-4-3 63078 NULL
++compare_lebs_63098 compare_lebs 0 63098 NULL
++brcmf_alloc_pkt_and_read_63116 brcmf_alloc_pkt_and_read 2 63116 NULL nohasharray
++iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 &brcmf_alloc_pkt_and_read_63116
++ib_send_cm_rtu_63138 ib_send_cm_rtu 3 63138 NULL
++snd_pcm_status_user_63140 snd_pcm_status_user 0 63140 NULL
++ubifs_change_one_lp_63157 ubifs_change_one_lp 0 63157 NULL
++dma_set_mask_63172 dma_set_mask 0 63172 NULL
++snd_pcm_lib_malloc_pages_63182 snd_pcm_lib_malloc_pages 2 63182 NULL
++vme_master_read_63221 vme_master_read 0 63221 NULL
++module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
++sched_domain_node_span_63234 sched_domain_node_span 1 63234 NULL
++ptp_read_63251 ptp_read 4 63251 NULL
++ntfs_attr_can_be_non_resident_63267 ntfs_attr_can_be_non_resident 0 63267 NULL
++readword_63288 readword 0 63288 NULL
++tcp_collapse_63294 tcp_collapse 5-6 63294 NULL
++isdn_ppp_ccp_xmit_reset_63297 isdn_ppp_ccp_xmit_reset 6 63297 NULL
++dns_resolver_instantiate_63314 dns_resolver_instantiate 3 63314 NULL
++proc_info_read_63344 proc_info_read 3 63344 NULL
++ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
++idmouse_read_63374 idmouse_read 3 63374 NULL
++edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL
++rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
++noack_read_63419 noack_read 3 63419 NULL
++l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
++iwl_dbgfs_debug_level_read_63430 iwl_dbgfs_debug_level_read 3 63430 NULL
++brcmu_pkttotlen_63431 brcmu_pkttotlen 0 63431 NULL
++kone_send_63435 kone_send 4 63435 NULL
++nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
++snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
++do_work_63483 do_work 0 63483 NULL
++read_kcore_63488 read_kcore 3 63488 NULL nohasharray
++get_gpio_63488 get_gpio 0 63488 &read_kcore_63488
++snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
++ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4-0 63534 NULL
++snapshot_status_63538 snapshot_status 4 63538 NULL
++if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
++append_to_buffer_63550 append_to_buffer 3 63550 NULL
++kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 NULL
++ubifs_lpt_scan_nolock_63572 ubifs_lpt_scan_nolock 0 63572 NULL
++ocfs2_calc_trunc_pos_63576 ocfs2_calc_trunc_pos 4 63576 NULL
++ext3_clear_blocks_63597 ext3_clear_blocks 4-5 63597 NULL
++mlx4_ib_alloc_cq_buf_63610 mlx4_ib_alloc_cq_buf 3 63610 NULL
++module_alloc_63630 module_alloc 1 63630 NULL
++symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
++_ubh_find_next_zero_bit__63640 _ubh_find_next_zero_bit_ 3-5-4 63640 NULL
++ext4_ext_get_access_63642 ext4_ext_get_access 0 63642 NULL
++proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
++nand_ecc_test_63654 nand_ecc_test 1 63654 NULL nohasharray
++ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3-0 63654 &nand_ecc_test_63654
++hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
++iwl4965_rs_sta_dbgfs_scale_table_read_63672 iwl4965_rs_sta_dbgfs_scale_table_read 3 63672 NULL
++vbi_read_63673 vbi_read 3 63673 NULL nohasharray
++xen_register_pirq_63673 xen_register_pirq 1-2 63673 &vbi_read_63673
++bin_search_63697 bin_search 0 63697 NULL
++ocfs2_et_root_journal_access_63713 ocfs2_et_root_journal_access 0 63713 NULL
++btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
++nfs4_reset_slot_table_63721 nfs4_reset_slot_table 2 63721 NULL
++i915_gem_execbuffer_relocate_63728 i915_gem_execbuffer_relocate 0 63728 NULL
++selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
++i915_gem_execbuffer_flush_63749 i915_gem_execbuffer_flush 0 63749 NULL
++snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
++snd_pcm_link_63772 snd_pcm_link 0 63772 NULL
++snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
++spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
++mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
++copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
++sel_write_load_63830 sel_write_load 3 63830 NULL
++kvm_init_63834 kvm_init 3 63834 NULL
++IsSectionWritable_63842 IsSectionWritable 0 63842 NULL
++proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
++ieee80211_if_fmt_channel_type_63855 ieee80211_if_fmt_channel_type 3 63855 NULL
++init_map_ipmac_63896 init_map_ipmac 4-3 63896 NULL
++IsOffsetWritable_63902 IsOffsetWritable 0 63902 NULL nohasharray
++xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 &IsOffsetWritable_63902
++pohmelfs_readpages_trans_complete_63912 pohmelfs_readpages_trans_complete 2 63912 NULL
++uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-2-3 63922 NULL
++acpi_ev_get_gpe_xrupt_block_63924 acpi_ev_get_gpe_xrupt_block 1 63924 NULL
++ledd_proc_write_63928 ledd_proc_write 3 63928 NULL
++tipc_send2port_63935 tipc_send2port 5 63935 NULL
++afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
++macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
++domain_pfn_mapping_63957 domain_pfn_mapping 4 63957 NULL
++ieee80211_authentication_req_63973 ieee80211_authentication_req 3 63973 NULL
++iwl_legacy_dbgfs_tx_statistics_read_63987 iwl_legacy_dbgfs_tx_statistics_read 3 63987 NULL
++read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
++kmemdup_64015 kmemdup 2 64015 NULL
++tcf_csum_skb_nextlayer_64025 tcf_csum_skb_nextlayer 3 64025 NULL
++dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
++frequency_read_64031 frequency_read 3 64031 NULL
++get_u8_64076 get_u8 0 64076 NULL
++sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
++clear_update_marker_64088 clear_update_marker 0 64088 NULL
++lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
++do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
++ol_quota_entries_per_block_64122 ol_quota_entries_per_block 0 64122 NULL
++i915_gem_execbuffer_reserve_64127 i915_gem_execbuffer_reserve 0 64127 NULL
++init_bch_64130 init_bch 2-1 64130 NULL
++uea_idma_write_64139 uea_idma_write 3 64139 NULL
++ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
++dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
++WriteReg_64163 WriteReg 0 64163 NULL
++cpumask_scnprintf_64170 cpumask_scnprintf 2 64170 NULL
++alloc_session_64171 alloc_session 2-1 64171 NULL
++ea_len_64229 ea_len 0 64229 NULL
++header_len_64232 header_len 0 64232 NULL
++xfrm_acquire_msgsize_64239 xfrm_acquire_msgsize 0 64239 NULL
++redrat3_transmit_ir_64244 redrat3_transmit_ir 3 64244 NULL
++fuse_do_getattr_64245 fuse_do_getattr 0 64245 NULL
++io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
++btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
++event_id_read_64288 event_id_read 3 64288 NULL nohasharray
++xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 &event_id_read_64288
++btrfs_reserve_extent_64293 btrfs_reserve_extent 6 64293 NULL
++ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
++snd_hda_get_sub_nodes_64304 snd_hda_get_sub_nodes 0 64304 NULL
++ffz_64324 ffz 0 64324 NULL
++sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
++ts_write_64336 ts_write 3 64336 NULL
++usbtmc_write_64340 usbtmc_write 3 64340 NULL
++ft1000_read_reg_64352 ft1000_read_reg 0 64352 NULL
++user_regset_copyin_64360 user_regset_copyin 7 64360 NULL
++llc_alloc_frame_64366 llc_alloc_frame 4 64366 NULL
++bnx2_enable_msix_64372 bnx2_enable_msix 2 64372 NULL
++ilo_write_64378 ilo_write 3 64378 NULL
++ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
++pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
++rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
++snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
++keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
++ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
++pfkey_sockaddr_len_64453 pfkey_sockaddr_len 0 64453 NULL
++ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
++alloc_large_system_hash_64490 alloc_large_system_hash 2-8-4-3 64490 NULL
++p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
++emulator_cmpxchg_emulated_64501 emulator_cmpxchg_emulated 5 64501 NULL
++msg_data_sz_64503 msg_data_sz 0 64503 NULL
++crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
++opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
++iwl_dbgfs_ucode_tracing_write_64524 iwl_dbgfs_ucode_tracing_write 3 64524 NULL
++ses_send_diag_64527 ses_send_diag 4 64527 NULL
++lm8323_read_64547 lm8323_read 4 64547 NULL
++__spi_sync_64561 __spi_sync 0 64561 NULL
++__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
++diva_os_alloc_message_buffer_64568 diva_os_alloc_message_buffer 1 64568 NULL
++kstrtoul_from_user_64569 kstrtoul_from_user 2 64569 NULL
++use_pool_64607 use_pool 2 64607 NULL
++fanotify_write_64623 fanotify_write 3 64623 NULL
++ocfs2_remove_refcount_extent_64631 ocfs2_remove_refcount_extent 0 64631 NULL
++ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL
++nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
++ip_select_ident_more_64707 ip_select_ident_more 4 64707 NULL
++__feat_register_sp_64712 __feat_register_sp 6 64712 NULL
++snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL
++dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
++atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
++i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
++AscGetChipVersion_64737 AscGetChipVersion 0 64737 NULL
++squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
++bio_map_kern_64751 bio_map_kern 3 64751 NULL
++rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
++isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
++nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
++rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL
++megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
++ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
++do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
++em28xx_read_reg_64839 em28xx_read_reg 0 64839 NULL
++altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
++ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
++mk_pid_64894 mk_pid 0-3 64894 NULL
++ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
++usb_reset_and_verify_device_64933 usb_reset_and_verify_device 0 64933 NULL
++ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
++acpi_os_install_interrupt_handler_64968 acpi_os_install_interrupt_handler 1 64968 NULL
++ext2_group_first_block_no_64972 ext2_group_first_block_no 0-2 64972 NULL
++pskb_pull_65005 pskb_pull 2 65005 NULL
++crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
++c4iw_ocqp_pool_alloc_65023 c4iw_ocqp_pool_alloc 2 65023 NULL
++insert_dent_65034 insert_dent 7 65034 NULL
++brcmf_sdcard_rwdata_65041 brcmf_sdcard_rwdata 5 65041 NULL
++ath9k_multi_regread_65056 ath9k_multi_regread 4 65056 NULL
++pcibios_enable_device_65059 pcibios_enable_device 0 65059 NULL
++make_idx_node_65068 make_idx_node 0 65068 NULL
++count_run_65072 count_run 0-4-5-2 65072 NULL nohasharray
++bnx2fc_process_l2_frame_compl_65072 bnx2fc_process_l2_frame_compl 3 65072 &count_run_65072
++__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
++ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
++scsi_add_host_with_dma_65093 scsi_add_host_with_dma 0 65093 NULL
++nf_bridge_mtu_reduction_65192 nf_bridge_mtu_reduction 0 65192 NULL
++nfulnl_alloc_skb_65207 nfulnl_alloc_skb 2-1 65207 NULL
++whci_n_caps_65247 whci_n_caps 0 65247 NULL
++kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
++compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
++get_unaligned_le16_65293 get_unaligned_le16 0 65293 NULL
++redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
++get_var_len_65304 get_var_len 0 65304 NULL
++unpack_array_65318 unpack_array 0 65318 NULL
++dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
++dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
++alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
++__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
++trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
++usb_ep_enable_65405 usb_ep_enable 0 65405 NULL
++iio_device_add_channel_sysfs_65406 iio_device_add_channel_sysfs 0 65406 NULL
++ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 3-4 65410 NULL
++drm_calloc_large_65421 drm_calloc_large 2-1 65421 NULL
++device_add_groups_65423 device_add_groups 0 65423 NULL
++cap_capable_65430 cap_capable 0 65430 NULL
++xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
++usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
++clear_user_65470 clear_user 2 65470 NULL
++ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
++alloc_dr_65495 alloc_dr 2 65495 NULL
++selnl_msglen_65499 selnl_msglen 0 65499 NULL
+diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
+new file mode 100644
+index 0000000..5515dcb
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin.c
+@@ -0,0 +1,3927 @@
++/*
++ * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -o size_overflow_plugin.so size_overflow_plugin.c
++ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "intl.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "toplev.h"
++#include "function.h"
++#include "tree-flow.h"
++#include "plugin.h"
++#include "gimple.h"
++#include "diagnostic.h"
++#include "cfgloop.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#define TODO_dump_cgraph 0
++#endif
++
++#define __unused __attribute__((__unused__))
++#define ASM_NAME(node) IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node))
++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
++#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
++#define BEFORE_STMT true
++#define AFTER_STMT false
++#define CREATE_NEW_VAR NULL_TREE
++#define CODES_LIMIT 32
++#define MAX_PARAM 31
++#define VEC_LEN 128
++#define MY_STMT GF_PLF_1
++#define NO_CAST_CHECK GF_PLF_2
++#define RET_CHECK NULL_TREE
++#define CANNOT_FIND_ARG 32
++#define WRONG_NODE 32
++#define NOT_INTENTIONAL_ASM NULL
++#define MIN_CHECK true
++#define MAX_CHECK false
++
++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF\n\t"
++#define YES_ASM_STR "# size_overflow MARK_YES\n\t"
++#define OK_ASM_STR "# size_overflow\n\t"
++
++#if BUILDING_GCC_VERSION == 4005
++#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
++#endif
++
++struct size_overflow_hash {
++ const struct size_overflow_hash * const next;
++ const char * const name;
++ const unsigned int param;
++};
++
++#include "size_overflow_hash.h"
++
++enum mark {
++ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
++};
++
++static unsigned int call_count;
++
++struct visited {
++ struct visited *next;
++ const_tree fndecl;
++ unsigned int num;
++ const_gimple first_stmt;
++};
++
++struct next_cgraph_node {
++ struct next_cgraph_node *next;
++ struct cgraph_node *current_function;
++ tree callee_fndecl;
++ unsigned int num;
++};
++
++struct interesting_node {
++ struct interesting_node *next;
++ gimple first_stmt;
++ const_tree fndecl;
++ tree node;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *last_nodes;
++#else
++ vec<tree, va_gc> *last_nodes;
++#endif
++ unsigned int num;
++ enum mark intentional_attr_decl;
++ enum mark intentional_attr_cur_fndecl;
++ gimple intentional_mark_from_gimple;
++};
++
++int plugin_is_GPL_compatible;
++void debug_gimple_stmt(gimple gs);
++
++static tree report_size_overflow_decl;
++static const_tree const_char_ptr_type_node;
++
++static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
++
++static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
++static tree get_size_overflow_type(gimple stmt, const_tree node);
++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
++
++static struct plugin_info size_overflow_plugin_info = {
++ .version = "20131214beta",
++ .help = "no-size-overflow\tturn off size overflow checking\n",
++};
++
++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
++{
++ unsigned int arg_count;
++ enum tree_code code = TREE_CODE(*node);
++
++ switch (code) {
++ case FUNCTION_DECL:
++ arg_count = type_num_arguments(TREE_TYPE(*node));
++ break;
++ case FUNCTION_TYPE:
++ case METHOD_TYPE:
++ arg_count = type_num_arguments(*node);
++ break;
++ default:
++ *no_add_attrs = true;
++ error("%s: %qE attribute only applies to functions", __func__, name);
++ return NULL_TREE;
++ }
++
++ for (; args; args = TREE_CHAIN(args)) {
++ tree position = TREE_VALUE(args);
++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
++ *no_add_attrs = true;
++ }
++ }
++ return NULL_TREE;
++}
++
++static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
++{
++ unsigned int arg_count;
++ enum tree_code code = TREE_CODE(*node);
++
++ switch (code) {
++ case FUNCTION_DECL:
++ arg_count = type_num_arguments(TREE_TYPE(*node));
++ break;
++ case FUNCTION_TYPE:
++ case METHOD_TYPE:
++ arg_count = type_num_arguments(*node);
++ break;
++ case FIELD_DECL:
++ return NULL_TREE;
++ default:
++ *no_add_attrs = true;
++ error("%qE attribute only applies to functions", name);
++ return NULL_TREE;
++ }
++
++ if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
++ return NULL_TREE;
++
++ for (; args; args = TREE_CHAIN(args)) {
++ tree position = TREE_VALUE(args);
++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
++ *no_add_attrs = true;
++ }
++ }
++ return NULL_TREE;
++}
++
++static struct attribute_spec size_overflow_attr = {
++ .name = "size_overflow",
++ .min_length = 1,
++ .max_length = -1,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_size_overflow_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = false
++#endif
++};
++
++static struct attribute_spec intentional_overflow_attr = {
++ .name = "intentional_overflow",
++ .min_length = 1,
++ .max_length = -1,
++ .decl_required = true,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_intentional_overflow_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = false
++#endif
++};
++
++static void register_attributes(void __unused *event_data, void __unused *data)
++{
++ register_attribute(&size_overflow_attr);
++ register_attribute(&intentional_overflow_attr);
++}
++
++static bool is_bool(const_tree node)
++{
++ const_tree type;
++
++ if (node == NULL_TREE)
++ return false;
++
++ type = TREE_TYPE(node);
++ if (!INTEGRAL_TYPE_P(type))
++ return false;
++ if (TREE_CODE(type) == BOOLEAN_TYPE)
++ return true;
++ if (TYPE_PRECISION(type) == 1)
++ return true;
++ return false;
++}
++
++static bool skip_types(const_tree var)
++{
++ tree type;
++ enum tree_code code;
++
++ if (is_gimple_constant(var))
++ return true;
++
++ switch (TREE_CODE(var)) {
++ case ADDR_EXPR:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case INDIRECT_REF:
++ case TARGET_MEM_REF:
++ case COMPONENT_REF:
++ case VAR_DECL:
++ case VIEW_CONVERT_EXPR:
++ return true;
++ default:
++ break;
++ }
++
++ code = TREE_CODE(var);
++ gcc_assert(code == SSA_NAME || code == PARM_DECL);
++
++ type = TREE_TYPE(var);
++ switch (TREE_CODE(type)) {
++ case INTEGER_TYPE:
++ case ENUMERAL_TYPE:
++ return false;
++ case BOOLEAN_TYPE:
++ return is_bool(var);
++ default:
++ return true;
++ }
++}
++
++static inline gimple get_def_stmt(const_tree node)
++{
++ gcc_assert(node != NULL_TREE);
++
++ if (skip_types(node))
++ return NULL;
++
++ if (TREE_CODE(node) != SSA_NAME)
++ return NULL;
++ return SSA_NAME_DEF_STMT(node);
++}
++
++static unsigned char get_tree_code(const_tree type)
++{
++ switch (TREE_CODE(type)) {
++ case ARRAY_TYPE:
++ return 0;
++ case BOOLEAN_TYPE:
++ return 1;
++ case ENUMERAL_TYPE:
++ return 2;
++ case FUNCTION_TYPE:
++ return 3;
++ case INTEGER_TYPE:
++ return 4;
++ case POINTER_TYPE:
++ return 5;
++ case RECORD_TYPE:
++ return 6;
++ case UNION_TYPE:
++ return 7;
++ case VOID_TYPE:
++ return 8;
++ case REAL_TYPE:
++ return 9;
++ case VECTOR_TYPE:
++ return 10;
++ case REFERENCE_TYPE:
++ return 11;
++ case OFFSET_TYPE:
++ return 12;
++ case COMPLEX_TYPE:
++ return 13;
++ default:
++ debug_tree((tree)type);
++ gcc_unreachable();
++ }
++}
++
++struct function_hash {
++ size_t tree_codes_len;
++ unsigned char tree_codes[CODES_LIMIT];
++ tree fndecl;
++ unsigned int hash;
++};
++
++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
++{
++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
++#define cwmixa( in ) { cwfold( in, m, k, h ); }
++#define cwmixb( in ) { cwfold( in, n, h, k ); }
++
++ unsigned int m = 0x57559429;
++ unsigned int n = 0x5052acdb;
++ const unsigned int *key4 = (const unsigned int *)key;
++ unsigned int h = len;
++ unsigned int k = len + seed + n;
++ unsigned long long p;
++
++ while (len >= 8) {
++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
++ len -= 8;
++ }
++ if (len >= 4) {
++ cwmixb(key4[0]) key4 += 1;
++ len -= 4;
++ }
++ if (len)
++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
++ cwmixb(h ^ (k + n));
++ return k ^ h;
++
++#undef cwfold
++#undef cwmixa
++#undef cwmixb
++}
++
++static void set_hash(const char *fn_name, struct function_hash *fn_hash_data)
++{
++ unsigned int fn, codes, seed = 0;
++
++ fn = CrapWow(fn_name, strlen(fn_name), seed) & 0xffff;
++ codes = CrapWow((const char*)fn_hash_data->tree_codes, fn_hash_data->tree_codes_len, seed) & 0xffff;
++
++ fn_hash_data->hash = fn ^ codes;
++}
++
++static void set_node_codes(const_tree type, struct function_hash *fn_hash_data)
++{
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(TREE_CODE_CLASS(TREE_CODE(type)) == tcc_type);
++
++ while (type && fn_hash_data->tree_codes_len < CODES_LIMIT) {
++ fn_hash_data->tree_codes[fn_hash_data->tree_codes_len] = get_tree_code(type);
++ fn_hash_data->tree_codes_len++;
++ type = TREE_TYPE(type);
++ }
++}
++
++static void set_result_codes(const_tree node, struct function_hash *fn_hash_data)
++{
++ const_tree result;
++
++ gcc_assert(node != NULL_TREE);
++
++ if (DECL_P(node)) {
++ result = DECL_RESULT(node);
++ if (result != NULL_TREE)
++ return set_node_codes(TREE_TYPE(result), fn_hash_data);
++ return set_result_codes(TREE_TYPE(node), fn_hash_data);
++ }
++
++ gcc_assert(TYPE_P(node));
++
++ if (TREE_CODE(node) == FUNCTION_TYPE)
++ return set_result_codes(TREE_TYPE(node), fn_hash_data);
++
++ return set_node_codes(node, fn_hash_data);
++}
++
++static void set_function_codes(struct function_hash *fn_hash_data)
++{
++ const_tree arg, type = TREE_TYPE(fn_hash_data->fndecl);
++ enum tree_code code = TREE_CODE(type);
++
++ gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
++
++ set_result_codes(fn_hash_data->fndecl, fn_hash_data);
++
++ for (arg = TYPE_ARG_TYPES(type); arg != NULL_TREE && fn_hash_data->tree_codes_len < CODES_LIMIT; arg = TREE_CHAIN(arg))
++ set_node_codes(TREE_VALUE(arg), fn_hash_data);
++}
++
++static const struct size_overflow_hash *get_function_hash(tree fndecl)
++{
++ const struct size_overflow_hash *entry;
++ struct function_hash fn_hash_data;
++ const char *func_name;
++
++ // skip builtins __builtin_constant_p
++ if (DECL_BUILT_IN(fndecl))
++ return NULL;
++
++ fn_hash_data.fndecl = fndecl;
++ fn_hash_data.tree_codes_len = 0;
++
++ set_function_codes(&fn_hash_data);
++ gcc_assert(fn_hash_data.tree_codes_len != 0);
++
++ func_name = ASM_NAME(fn_hash_data.fndecl);
++ set_hash(func_name, &fn_hash_data);
++
++ entry = size_overflow_hash[fn_hash_data.hash];
++
++ while (entry) {
++ if (!strcmp(entry->name, func_name))
++ return entry;
++ entry = entry->next;
++ }
++ return NULL;
++}
++
++static void print_missing_msg(tree func, unsigned int argnum)
++{
++ location_t loc;
++ const char *curfunc;
++ struct function_hash fn_hash_data;
++
++ fn_hash_data.fndecl = DECL_ORIGIN(func);
++ fn_hash_data.tree_codes_len = 0;
++
++ loc = DECL_SOURCE_LOCATION(fn_hash_data.fndecl);
++ curfunc = ASM_NAME(fn_hash_data.fndecl);
++
++ set_function_codes(&fn_hash_data);
++ set_hash(curfunc, &fn_hash_data);
++
++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash);
++}
++
++static unsigned int find_arg_number_tree(const_tree arg, const_tree func)
++{
++ tree var;
++ unsigned int argnum = 1;
++
++ if (TREE_CODE(arg) == SSA_NAME)
++ arg = SSA_NAME_VAR(arg);
++
++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
++ if (!operand_equal_p(arg, var, 0) && strcmp(NAME(var), NAME(arg)))
++ continue;
++ if (!skip_types(var))
++ return argnum;
++ }
++
++ return CANNOT_FIND_ARG;
++}
++
++static tree create_new_var(tree type)
++{
++ tree new_var = create_tmp_var(type, "cicus");
++
++#if BUILDING_GCC_VERSION <= 4007
++ add_referenced_var(new_var);
++#endif
++ return new_var;
++}
++
++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree type = TREE_TYPE(rhs1);
++ tree lhs = create_new_var(type);
++
++ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ gimple_set_plf(assign, MY_STMT, true);
++ return assign;
++}
++
++static tree cast_a_tree(tree type, tree var)
++{
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(var != NULL_TREE);
++ gcc_assert(fold_convertible_p(type, var));
++
++ return fold_convert(type, var);
++}
++
++static tree get_lhs(const_gimple stmt)
++{
++ switch (gimple_code(stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ return gimple_get_lhs(stmt);
++ case GIMPLE_PHI:
++ return gimple_phi_result(stmt);
++ default:
++ return NULL_TREE;
++ }
++}
++
++static bool skip_cast(tree dst_type, const_tree rhs, bool force)
++{
++ const_gimple def_stmt = get_def_stmt(rhs);
++
++ if (force)
++ return false;
++
++ if (is_gimple_constant(rhs))
++ return false;
++
++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++ return false;
++
++ if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
++ return false;
++
++ // DI type can be on 32 bit (from create_assign) but overflow type stays DI
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ return false;
++
++ return true;
++}
++
++static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
++{
++ gimple assign, def_stmt;
++
++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
++ if (gsi_end_p(*gsi) && before == AFTER_STMT)
++ gcc_unreachable();
++
++ def_stmt = get_def_stmt(rhs);
++ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && gimple_plf(def_stmt, MY_STMT))
++ return def_stmt;
++
++ if (lhs == CREATE_NEW_VAR)
++ lhs = create_new_var(dst_type);
++
++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
++
++ if (!gsi_end_p(*gsi)) {
++ location_t loc = gimple_location(gsi_stmt(*gsi));
++ gimple_set_location(assign, loc);
++ }
++
++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ if (before)
++ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++ else
++ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ return assign;
++}
++
++static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
++{
++ gimple_stmt_iterator gsi;
++ tree lhs;
++ gimple new_stmt;
++
++ if (rhs == NULL_TREE)
++ return NULL_TREE;
++
++ gsi = gsi_for_stmt(stmt);
++ new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
++ gimple_set_plf(new_stmt, MY_STMT, true);
++
++ lhs = get_lhs(new_stmt);
++ gcc_assert(lhs != NULL_TREE);
++ return lhs;
++}
++
++static tree cast_to_TI_type(gimple stmt, tree node)
++{
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt;
++ tree type = TREE_TYPE(node);
++
++ if (types_compatible_p(type, intTI_type_node))
++ return node;
++
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ gimple_set_plf(cast_stmt, MY_STMT, true);
++ return gimple_assign_lhs(cast_stmt);
++}
++
++static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
++{
++ tree lhs, new_lhs;
++ gimple_stmt_iterator gsi;
++
++ if (rhs1 == NULL_TREE) {
++ debug_gimple_stmt(oldstmt);
++ error("%s: rhs1 is NULL_TREE", __func__);
++ gcc_unreachable();
++ }
++
++ switch (gimple_code(oldstmt)) {
++ case GIMPLE_ASM:
++ lhs = rhs1;
++ break;
++ case GIMPLE_CALL:
++ case GIMPLE_ASSIGN:
++ lhs = gimple_get_lhs(oldstmt);
++ break;
++ default:
++ debug_gimple_stmt(oldstmt);
++ gcc_unreachable();
++ }
++
++ gsi = gsi_for_stmt(oldstmt);
++ pointer_set_insert(visited, oldstmt);
++ if (lookup_stmt_eh_lp(oldstmt) != 0) {
++ basic_block next_bb, cur_bb;
++ const_edge e;
++
++ gcc_assert(before == false);
++ gcc_assert(stmt_can_throw_internal(oldstmt));
++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
++ gcc_assert(!gsi_end_p(gsi));
++
++ cur_bb = gimple_bb(oldstmt);
++ next_bb = cur_bb->next_bb;
++ e = find_edge(cur_bb, next_bb);
++ gcc_assert(e != NULL);
++ gcc_assert(e->flags & EDGE_FALLTHRU);
++
++ gsi = gsi_after_labels(next_bb);
++ gcc_assert(!gsi_end_p(gsi));
++
++ before = true;
++ oldstmt = gsi_stmt(gsi);
++ }
++
++ new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
++ return new_lhs;
++}
++
++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
++{
++ gimple stmt;
++ gimple_stmt_iterator gsi;
++ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
++
++ if (gimple_plf(oldstmt, MY_STMT))
++ return lhs;
++
++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
++ rhs1 = gimple_assign_rhs1(oldstmt);
++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
++ }
++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
++ rhs2 = gimple_assign_rhs2(oldstmt);
++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
++ }
++
++ stmt = gimple_copy(oldstmt);
++ gimple_set_location(stmt, gimple_location(oldstmt));
++ gimple_set_plf(stmt, MY_STMT, true);
++
++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
++ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
++
++ size_overflow_type = get_size_overflow_type(oldstmt, node);
++
++ new_var = create_new_var(size_overflow_type);
++ new_var = make_ssa_name(new_var, stmt);
++ gimple_assign_set_lhs(stmt, new_var);
++
++ if (rhs1 != NULL_TREE)
++ gimple_assign_set_rhs1(stmt, rhs1);
++
++ if (rhs2 != NULL_TREE)
++ gimple_assign_set_rhs2(stmt, rhs2);
++#if BUILDING_GCC_VERSION >= 4007
++ if (rhs3 != NULL_TREE)
++ gimple_assign_set_rhs3(stmt, rhs3);
++#endif
++ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
++ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
++ update_stmt(stmt);
++ pointer_set_insert(visited, oldstmt);
++ return gimple_assign_lhs(stmt);
++}
++
++static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++ basic_block first_bb;
++
++ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
++
++ if (bb->index == 0) {
++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
++ bb = first_bb;
++ }
++
++ gsi = gsi_after_labels(bb);
++ assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
++ gimple_set_plf(assign, MY_STMT, true);
++
++ return gimple_assign_lhs(assign);
++}
++
++static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign, def_stmt = get_def_stmt(new_arg);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI) {
++ gsi = gsi_after_labels(gimple_bb(def_stmt));
++ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
++ } else {
++ gsi = gsi_for_stmt(def_stmt);
++ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
++ }
++
++ gimple_set_plf(assign, MY_STMT, true);
++ return gimple_assign_lhs(assign);
++}
++
++static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type)
++{
++ basic_block bb;
++ gimple_stmt_iterator gsi;
++ const_gimple def_stmt;
++ gimple assign;
++
++ def_stmt = get_def_stmt(arg);
++ bb = gimple_bb(def_stmt);
++ gcc_assert(bb->index != 0);
++ gsi = gsi_after_labels(bb);
++
++ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
++ gimple_set_plf(assign, MY_STMT, true);
++ return gimple_assign_lhs(assign);
++}
++
++static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
++{
++ tree size_overflow_type;
++ tree arg;
++ const_gimple def_stmt;
++
++ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
++ return new_arg;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ def_stmt = get_def_stmt(arg);
++ gcc_assert(def_stmt != NULL);
++ size_overflow_type = get_size_overflow_type(oldstmt, arg);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_PHI:
++ return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type);
++ case GIMPLE_NOP: {
++ basic_block bb;
++
++ bb = gimple_phi_arg_edge(oldstmt, i)->src;
++ return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb);
++ }
++ case GIMPLE_ASM: {
++ gimple_stmt_iterator gsi;
++ gimple assign, stmt = get_def_stmt(arg);
++
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
++ gimple_set_plf(assign, MY_STMT, true);
++ return gimple_assign_lhs(assign);
++ }
++ default:
++ gcc_assert(new_arg != NULL_TREE);
++ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
++ return use_phi_ssa_name(ssa_name_var, new_arg);
++ }
++}
++
++static gimple overflow_create_phi_node(gimple oldstmt, tree result)
++{
++ basic_block bb;
++ gimple phi;
++ gimple_seq seq;
++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
++
++ bb = gsi_bb(gsi);
++
++ if (result == NULL_TREE) {
++ tree old_result = gimple_phi_result(oldstmt);
++ tree size_overflow_type = get_size_overflow_type(oldstmt, old_result);
++
++ result = create_new_var(size_overflow_type);
++ }
++
++ phi = create_phi_node(result, bb);
++ gimple_phi_set_result(phi, make_ssa_name(result, phi));
++ seq = phi_nodes(bb);
++ gsi = gsi_last(seq);
++ gsi_remove(&gsi, false);
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
++ gimple_set_bb(phi, bb);
++ gimple_set_plf(phi, MY_STMT, true);
++ return phi;
++}
++
++#if BUILDING_GCC_VERSION <= 4007
++static tree create_new_phi_node(VEC(tree, gc) *args, tree ssa_name_var, gimple oldstmt)
++#else
++static tree create_new_phi_node(vec<tree, va_gc> *args, tree ssa_name_var, gimple oldstmt)
++#endif
++{
++ gimple new_phi;
++ unsigned int i;
++ tree arg, result;
++ location_t loc = gimple_location(oldstmt);
++
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(!VEC_empty(tree, args));
++#else
++ gcc_assert(!args->is_empty());
++#endif
++
++ new_phi = overflow_create_phi_node(oldstmt, ssa_name_var);
++ result = gimple_phi_result(new_phi);
++ ssa_name_var = SSA_NAME_VAR(result);
++
++
++#if BUILDING_GCC_VERSION == 4005
++ for (i = 0; i < VEC_length(tree, args); i++) {
++ arg = VEC_index(tree, args, i);
++#elif BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, args, i, arg) {
++#else
++ FOR_EACH_VEC_ELT(*args, i, arg) {
++#endif
++ arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i);
++ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_free(tree, gc, args);
++#else
++ vec_free(args);
++#endif
++ update_stmt(new_phi);
++ return result;
++}
++
++static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree orig_result)
++{
++ tree ssa_name_var = NULL_TREE;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *args;
++#else
++ vec<tree, va_gc> *args;
++#endif
++ gimple oldstmt = get_def_stmt(orig_result);
++ unsigned int i, len = gimple_phi_num_args(oldstmt);
++
++ pointer_set_insert(visited, oldstmt);
++#if BUILDING_GCC_VERSION <= 4007
++ args = VEC_alloc(tree, gc, len);
++#else
++ vec_alloc(args, len);
++#endif
++ for (i = 0; i < len; i++) {
++ tree arg, new_arg;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ new_arg = expand(visited, caller_node, arg);
++
++ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
++ ssa_name_var = SSA_NAME_VAR(new_arg);
++
++ if (is_gimple_constant(arg)) {
++ tree size_overflow_type = get_size_overflow_type(oldstmt, arg);
++
++ new_arg = cast_a_tree(size_overflow_type, arg);
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, gc, args, new_arg);
++#else
++ vec_safe_push(args, new_arg);
++#endif
++ }
++
++ return create_new_phi_node(args, ssa_name_var, oldstmt);
++}
++
++static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(is_gimple_assign(stmt));
++
++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ gimple_set_plf(assign, MY_STMT, true);
++ return gimple_assign_lhs(assign);
++}
++
++static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
++{
++ const_tree rhs1, lhs, rhs1_type, lhs_type;
++ enum machine_mode lhs_mode, rhs_mode;
++ gimple def_stmt = get_def_stmt(no_const_rhs);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ lhs = gimple_assign_lhs(def_stmt);
++ rhs1_type = TREE_TYPE(rhs1);
++ lhs_type = TREE_TYPE(lhs);
++ rhs_mode = TYPE_MODE(rhs1_type);
++ lhs_mode = TYPE_MODE(lhs_type);
++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
++ return false;
++
++ return true;
++}
++
++static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
++{
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree lhs = gimple_assign_lhs(stmt);
++ const_tree rhs1_type = TREE_TYPE(rhs1);
++ const_tree lhs_type = TREE_TYPE(lhs);
++
++ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ return create_assign(visited, stmt, rhs1, AFTER_STMT);
++}
++
++static bool no_uses(tree node)
++{
++ imm_use_iterator imm_iter;
++ use_operand_p use_p;
++
++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++ const_gimple use_stmt = USE_STMT(use_p);
++
++ if (use_stmt == NULL)
++ return true;
++ if (is_gimple_debug(use_stmt))
++ continue;
++ return false;
++ }
++ return true;
++}
++
++// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
++static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
++{
++ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs_type = TREE_TYPE(rhs1);
++ lhs_type = TREE_TYPE(lhs);
++ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
++ return false;
++ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
++ return false;
++
++ def_stmt = get_def_stmt(rhs1);
++ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++ return false;
++
++ if (is_gimple_constant(rhs2))
++ not_const_rhs = rhs1;
++ else
++ not_const_rhs = rhs2;
++
++ return no_uses(not_const_rhs);
++}
++
++static bool skip_lhs_cast_check(const_gimple stmt)
++{
++ const_tree rhs = gimple_assign_rhs1(stmt);
++ const_gimple def_stmt = get_def_stmt(rhs);
++
++ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
++ if (gimple_code(def_stmt) == GIMPLE_ASM)
++ return true;
++
++ if (is_const_plus_unsigned_signed_truncation(rhs))
++ return true;
++
++ return false;
++}
++
++static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
++{
++ bool cast_lhs, cast_rhs;
++ tree lhs = gimple_assign_lhs(stmt);
++ tree rhs = gimple_assign_rhs1(stmt);
++ const_tree lhs_type = TREE_TYPE(lhs);
++ const_tree rhs_type = TREE_TYPE(rhs);
++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
++ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
++
++ static bool check_lhs[3][4] = {
++ // ss su us uu
++ { false, true, true, false }, // lhs > rhs
++ { false, false, false, false }, // lhs = rhs
++ { true, true, true, true }, // lhs < rhs
++ };
++
++ static bool check_rhs[3][4] = {
++ // ss su us uu
++ { true, false, true, true }, // lhs > rhs
++ { true, false, true, true }, // lhs = rhs
++ { true, false, true, true }, // lhs < rhs
++ };
++
++ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
++ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (lhs_size > rhs_size) {
++ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ } else if (lhs_size == rhs_size) {
++ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ } else {
++ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ }
++
++ if (!cast_lhs && !cast_rhs)
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ if (cast_lhs && !skip_lhs_cast_check(stmt))
++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
++
++ if (cast_rhs)
++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
++
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++}
++
++static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
++{
++ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return lhs;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ new_rhs1 = expand(visited, caller_node, rhs1);
++
++ if (new_rhs1 == NULL_TREE)
++ return create_cast_assign(visited, stmt);
++
++ if (gimple_plf(stmt, NO_CAST_CHECK))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
++ tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
++
++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++ }
++
++ if (!gimple_assign_cast_p(stmt))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
++}
++
++static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
++{
++ tree rhs1, lhs = gimple_assign_lhs(stmt);
++ gimple def_stmt = get_def_stmt(lhs);
++
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
++ rhs1 = gimple_assign_rhs1(def_stmt);
++
++ if (is_gimple_constant(rhs1))
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++
++ switch (TREE_CODE(rhs1)) {
++ case SSA_NAME:
++ return handle_unary_rhs(visited, caller_node, def_stmt);
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case ADDR_EXPR:
++ case COMPONENT_REF:
++ case INDIRECT_REF:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case TARGET_MEM_REF:
++ case VIEW_CONVERT_EXPR:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ case PARM_DECL:
++ case VAR_DECL:
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ default:
++ debug_gimple_stmt(def_stmt);
++ debug_tree(rhs1);
++ gcc_unreachable();
++ }
++}
++
++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
++{
++ gimple cond_stmt;
++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
++
++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
++ update_stmt(cond_stmt);
++}
++
++static tree create_string_param(tree string)
++{
++ tree i_type, a_type;
++ const int length = TREE_STRING_LENGTH(string);
++
++ gcc_assert(length > 0);
++
++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
++ a_type = build_array_type(char_type_node, i_type);
++
++ TREE_TYPE(string) = a_type;
++ TREE_CONSTANT(string) = 1;
++ TREE_READONLY(string) = 1;
++
++ return build1(ADDR_EXPR, ptr_type_node, string);
++}
++
++#if BUILDING_GCC_VERSION <= 4006
++struct cgraph_node *cgraph_get_create_node(tree decl);
++
++struct cgraph_node *cgraph_get_create_node(tree decl)
++{
++ struct cgraph_node *node;
++
++ node = cgraph_get_node(decl);
++ if (node)
++ return node;
++ return cgraph_node(decl);
++}
++#endif
++
++static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
++{
++ gimple func_stmt;
++ const_gimple def_stmt;
++ const_tree loc_line;
++ tree loc_file, ssa_name, current_func;
++ expanded_location xloc;
++ char *ssa_name_buf;
++ int len;
++ struct cgraph_edge *edge;
++ struct cgraph_node *callee_node;
++ int frequency;
++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
++
++ def_stmt = get_def_stmt(arg);
++ xloc = expand_location(gimple_location(def_stmt));
++
++ if (!gimple_has_location(def_stmt)) {
++ xloc = expand_location(gimple_location(stmt));
++ if (!gimple_has_location(stmt))
++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++ }
++
++ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
++
++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
++ loc_file = create_string_param(loc_file);
++
++ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
++ current_func = create_string_param(current_func);
++
++ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
++ call_count++;
++ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
++ gcc_assert(len > 0);
++ ssa_name = build_string(len + 1, ssa_name_buf);
++ free(ssa_name_buf);
++ ssa_name = create_string_param(ssa_name);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
++
++ callee_node = cgraph_get_create_node(report_size_overflow_decl);
++ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
++
++#if BUILDING_GCC_VERSION <= 4006
++ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
++#else
++ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency);
++#endif
++ gcc_assert(edge != NULL);
++}
++
++static void __unused print_the_code_insertions(const_gimple stmt)
++{
++ location_t loc = gimple_location(stmt);
++
++ inform(loc, "Integer size_overflow check applied here.");
++}
++
++static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
++{
++ basic_block cond_bb, join_bb, bb_true;
++ edge e;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ cond_bb = gimple_bb(stmt);
++ if (before)
++ gsi_prev(&gsi);
++ if (gsi_end_p(gsi))
++ e = split_block_after_labels(cond_bb);
++ else
++ e = split_block(cond_bb, gsi_stmt(gsi));
++ cond_bb = e->src;
++ join_bb = e->dest;
++ e->flags = EDGE_FALSE_VALUE;
++ e->probability = REG_BR_PROB_BASE;
++
++ bb_true = create_empty_bb(cond_bb);
++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
++ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
++
++ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
++
++ if (current_loops != NULL) {
++ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
++ add_bb_to_loop(bb_true, cond_bb->loop_father);
++ }
++
++ insert_cond(cond_bb, arg, cond_code, type_value);
++ insert_cond_result(caller_node, bb_true, stmt, arg, min);
++
++// print_the_code_insertions(stmt);
++}
++
++static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
++{
++ const_tree rhs_type = TREE_TYPE(rhs);
++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
++
++ gcc_assert(rhs_type != NULL_TREE);
++ if (TREE_CODE(rhs_type) == POINTER_TYPE)
++ return;
++
++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
++
++ if (is_const_plus_unsigned_signed_truncation(rhs))
++ return;
++
++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ // typemax (-1) < typemin (0)
++ if (TREE_OVERFLOW(type_max))
++ return;
++
++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++ cast_rhs_type = TREE_TYPE(cast_rhs);
++ type_max_type = TREE_TYPE(type_max);
++ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
++
++ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
++
++ // special case: get_size_overflow_type(), 32, u64->s
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
++ return;
++
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
++ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
++}
++
++static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
++{
++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
++ return false;
++ if (!is_gimple_constant(rhs))
++ return false;
++ return true;
++}
++
++static tree get_def_stmt_rhs(const_tree var)
++{
++ tree rhs1, def_stmt_rhs1;
++ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
++
++ def_stmt = get_def_stmt(var);
++ if (!gimple_assign_cast_p(def_stmt))
++ return NULL_TREE;
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ if (!gimple_assign_cast_p(rhs1_def_stmt))
++ return rhs1;
++
++ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
++
++ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
++ case GIMPLE_CALL:
++ case GIMPLE_NOP:
++ case GIMPLE_ASM:
++ case GIMPLE_PHI:
++ return def_stmt_rhs1;
++ case GIMPLE_ASSIGN:
++ return rhs1;
++ default:
++ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
++ gcc_unreachable();
++ }
++}
++
++static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
++{
++ tree new_rhs, orig_rhs;
++ void (*gimple_assign_set_rhs)(gimple, tree);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++ tree lhs = gimple_assign_lhs(stmt);
++
++ if (!check_overflow)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (change_rhs == NULL_TREE)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (new_rhs2 == NULL_TREE) {
++ orig_rhs = rhs1;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++ } else {
++ orig_rhs = rhs2;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++ }
++
++ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
++
++ new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
++ gimple_assign_set_rhs(stmt, new_rhs);
++ update_stmt(stmt);
++
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++}
++
++static bool is_subtraction_special(const_gimple stmt)
++{
++ gimple rhs1_def_stmt, rhs2_def_stmt;
++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
++ const_tree rhs1 = gimple_assign_rhs1(stmt);
++ const_tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++ return false;
++
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ rhs2_def_stmt = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ return false;
++
++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
++ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
++ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
++ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
++ return false;
++ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
++ return false;
++
++ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
++ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
++ return true;
++}
++
++static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node *caller_node, const_tree lhs)
++{
++ tree new_rhs1, new_rhs2;
++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++ gimple assign, stmt = get_def_stmt(lhs);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (!is_subtraction_special(stmt))
++ return NULL_TREE;
++
++ new_rhs1 = expand(visited, caller_node, rhs1);
++ new_rhs2 = expand(visited, caller_node, rhs2);
++
++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
++
++ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
++ return NULL_TREE;
++
++ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
++ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
++ }
++
++ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++ new_lhs = gimple_assign_lhs(assign);
++ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
++
++ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return false;
++
++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
++ return false;
++
++ def_stmt = get_def_stmt(rhs);
++ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
++ return false;
++
++ return true;
++}
++
++static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ tree rhs1, rhs2, new_lhs;
++ gimple def_stmt = get_def_stmt(lhs);
++ tree new_rhs1 = NULL_TREE;
++ tree new_rhs2 = NULL_TREE;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ /* no DImode/TImode division in the 32/64 bit kernel */
++ switch (gimple_assign_rhs_code(def_stmt)) {
++ case RDIV_EXPR:
++ case TRUNC_DIV_EXPR:
++ case CEIL_DIV_EXPR:
++ case FLOOR_DIV_EXPR:
++ case ROUND_DIV_EXPR:
++ case TRUNC_MOD_EXPR:
++ case CEIL_MOD_EXPR:
++ case FLOOR_MOD_EXPR:
++ case ROUND_MOD_EXPR:
++ case EXACT_DIV_EXPR:
++ case POINTER_PLUS_EXPR:
++ case BIT_AND_EXPR:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ default:
++ break;
++ }
++
++ new_lhs = handle_integer_truncation(visited, caller_node, lhs);
++ if (new_lhs != NULL_TREE)
++ return new_lhs;
++
++ if (TREE_CODE(rhs1) == SSA_NAME)
++ new_rhs1 = expand(visited, caller_node, rhs1);
++ if (TREE_CODE(rhs2) == SSA_NAME)
++ new_rhs2 = expand(visited, caller_node, rhs2);
++
++ if (is_a_neg_overflow(def_stmt, rhs2))
++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
++ if (is_a_neg_overflow(def_stmt, rhs1))
++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
++
++
++ if (is_a_constant_overflow(def_stmt, rhs2))
++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
++ if (is_a_constant_overflow(def_stmt, rhs1))
++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
++
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++#if BUILDING_GCC_VERSION >= 4007
++static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
++{
++ if (is_gimple_constant(rhs))
++ return cast_a_tree(size_overflow_type, rhs);
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return NULL_TREE;
++ return expand(visited, caller_node, rhs);
++}
++
++static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs3 = gimple_assign_rhs3(def_stmt);
++ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
++ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
++ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
++
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
++}
++#endif
++
++static tree get_size_overflow_type(gimple stmt, const_tree node)
++{
++ const_tree type;
++ tree new_type;
++
++ gcc_assert(node != NULL_TREE);
++
++ type = TREE_TYPE(node);
++
++ if (gimple_plf(stmt, MY_STMT))
++ return TREE_TYPE(node);
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ new_type = intHI_type_node;
++ break;
++ case HImode:
++ new_type = intSI_type_node;
++ break;
++ case SImode:
++ new_type = intDI_type_node;
++ break;
++ case DImode:
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
++ else
++ new_type = intTI_type_node;
++ break;
++ case TImode:
++ gcc_assert(!TYPE_UNSIGNED(type));
++ new_type = intTI_type_node;
++ break;
++ default:
++ debug_tree((tree)node);
++ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
++ gcc_unreachable();
++ }
++
++ if (TYPE_QUALS(type) != 0)
++ return build_qualified_type(new_type, TYPE_QUALS(type));
++ return new_type;
++}
++
++static tree expand_visited(gimple def_stmt)
++{
++ const_gimple next_stmt;
++ gimple_stmt_iterator gsi;
++ enum gimple_code code = gimple_code(def_stmt);
++
++ if (code == GIMPLE_ASM)
++ return NULL_TREE;
++
++ gsi = gsi_for_stmt(def_stmt);
++ gsi_next(&gsi);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
++ return NULL_TREE;
++ gcc_assert(!gsi_end_p(gsi));
++ next_stmt = gsi_stmt(gsi);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI && !gimple_plf((gimple)next_stmt, MY_STMT))
++ return NULL_TREE;
++ gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
++
++ return get_lhs(next_stmt);
++}
++
++static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(lhs);
++
++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++ return NULL_TREE;
++
++ if (gimple_plf(def_stmt, MY_STMT))
++ return lhs;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return expand_visited(def_stmt);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_PHI:
++ return handle_phi(visited, caller_node, lhs);
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return handle_unary_ops(visited, caller_node, def_stmt);
++ case 3:
++ return handle_binary_ops(visited, caller_node, lhs);
++#if BUILDING_GCC_VERSION >= 4007
++ case 4:
++ return handle_ternary_ops(visited, caller_node, lhs);
++#endif
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node)
++{
++ const_gimple assign;
++ tree orig_type = TREE_TYPE(orig_node);
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ return gimple_assign_lhs(assign);
++}
++
++static void change_orig_node(struct interesting_node *cur_node, tree new_node)
++{
++ void (*set_rhs)(gimple, tree);
++ gimple stmt = cur_node->first_stmt;
++ const_tree orig_node = cur_node->node;
++
++ switch (gimple_code(stmt)) {
++ case GIMPLE_RETURN:
++ gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node));
++ break;
++ case GIMPLE_CALL:
++ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node));
++ break;
++ case GIMPLE_ASSIGN:
++ switch (cur_node->num) {
++ case 1:
++ set_rhs = &gimple_assign_set_rhs1;
++ break;
++ case 2:
++ set_rhs = &gimple_assign_set_rhs2;
++ break;
++#if BUILDING_GCC_VERSION > 4005
++ case 3:
++ set_rhs = &gimple_assign_set_rhs3;
++ break;
++#endif
++ default:
++ gcc_unreachable();
++ }
++
++ set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node));
++ break;
++ default:
++ debug_gimple_stmt(stmt);
++ gcc_unreachable();
++ }
++
++ update_stmt(stmt);
++}
++
++static unsigned int get_correct_arg_count(unsigned int argnum, tree fndecl)
++{
++ const struct size_overflow_hash *hash;
++ unsigned int new_argnum;
++ tree arg;
++ const_tree origarg;
++
++ if (argnum == 0)
++ return argnum;
++
++ hash = get_function_hash(fndecl);
++ if (hash && hash->param & (1U << argnum))
++ return argnum;
++
++ if (DECL_EXTERNAL(fndecl))
++ return argnum;
++
++ origarg = DECL_ARGUMENTS(DECL_ORIGIN(fndecl));
++ argnum--;
++ while (origarg && argnum) {
++ origarg = TREE_CHAIN(origarg);
++ argnum--;
++ }
++ gcc_assert(argnum == 0);
++ gcc_assert(origarg != NULL_TREE);
++
++ for (arg = DECL_ARGUMENTS(fndecl), new_argnum = 1; arg; arg = TREE_CHAIN(arg), new_argnum++)
++ if (operand_equal_p(origarg, arg, 0) || !strcmp(NAME(origarg), NAME(arg)))
++ return new_argnum;
++
++ return CANNOT_FIND_ARG;
++}
++
++// Don't want to duplicate entries in next_cgraph_node
++static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
++{
++ const_tree new_callee_fndecl;
++ struct next_cgraph_node *cur_node;
++
++ if (fndecl == RET_CHECK)
++#if BUILDING_GCC_VERSION <= 4007
++ new_callee_fndecl = node->decl;
++#else
++ new_callee_fndecl = node->symbol.decl;
++#endif
++ else
++ new_callee_fndecl = fndecl;
++
++ for (cur_node = head; cur_node; cur_node = cur_node->next) {
++#if BUILDING_GCC_VERSION <= 4007
++ if (!operand_equal_p(cur_node->current_function->decl, node->decl, 0))
++#else
++ if (!operand_equal_p(cur_node->current_function->symbol.decl, node->symbol.decl, 0))
++#endif
++ continue;
++ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
++ continue;
++ if (num == cur_node->num)
++ return true;
++ }
++ return false;
++}
++
++/* Add a next_cgraph_node into the list for handle_function().
++ * handle_function() iterates over all the next cgraph nodes and
++ * starts the overflow check insertion process.
++ */
++static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
++{
++ struct next_cgraph_node *new_node;
++
++ if (is_in_next_cgraph_node(head, node, fndecl, num))
++ return head;
++
++ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
++ new_node->current_function = node;
++ new_node->next = NULL;
++ new_node->num = num;
++ if (fndecl == RET_CHECK)
++#if BUILDING_GCC_VERSION <= 4007
++ new_node->callee_fndecl = node->decl;
++#else
++ new_node->callee_fndecl = node->symbol.decl;
++#endif
++ else
++ new_node->callee_fndecl = fndecl;
++
++ if (!head)
++ return new_node;
++
++ new_node->next = head;
++ return new_node;
++}
++
++static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
++{
++ struct cgraph_edge *e;
++
++ if (num == 0)
++ return create_new_next_cgraph_node(head, node, RET_CHECK, num);
++
++ for (e = node->callers; e; e = e->next_caller) {
++ tree fndecl = gimple_call_fndecl(e->call_stmt);
++
++ gcc_assert(fndecl != NULL_TREE);
++ head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
++ }
++
++ return head;
++}
++
++static bool is_a_return_check(const_tree node)
++{
++ if (TREE_CODE(node) == FUNCTION_DECL)
++ return true;
++
++ gcc_assert(TREE_CODE(node) == PARM_DECL);
++ return false;
++}
++
++static bool is_in_hash_table(tree fndecl, unsigned int num)
++{
++ const struct size_overflow_hash *hash;
++
++ hash = get_function_hash(fndecl);
++ if (hash && (hash->param & (1U << num)))
++ return true;
++ return false;
++}
++
++struct missing_functions {
++ struct missing_functions *next;
++ const_tree node;
++ tree fndecl;
++};
++
++static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
++{
++ struct missing_functions *new_function;
++
++ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
++ new_function->node = node;
++ new_function->next = NULL;
++
++ if (TREE_CODE(node) == FUNCTION_DECL)
++ new_function->fndecl = node;
++ else
++ new_function->fndecl = current_function_decl;
++ gcc_assert(new_function->fndecl);
++
++ if (!missing_fn_head)
++ return new_function;
++
++ new_function->next = missing_fn_head;
++ return new_function;
++}
++
++/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table.
++ * If the function is missing everywhere then print the missing message into stderr.
++ */
++static bool is_missing_function(tree orig_fndecl, unsigned int num)
++{
++ switch (DECL_FUNCTION_CODE(orig_fndecl)) {
++#if BUILDING_GCC_VERSION >= 4008
++ case BUILT_IN_BSWAP16:
++#endif
++ case BUILT_IN_BSWAP32:
++ case BUILT_IN_BSWAP64:
++ case BUILT_IN_EXPECT:
++ case BUILT_IN_MEMCMP:
++ return false;
++ default:
++ break;
++ }
++
++ // skip test.c
++ if (strcmp(NAME(current_function_decl), "coolmalloc")) {
++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(orig_fndecl)))
++ warning(0, "unnecessary size_overflow attribute on: %s\n", NAME(orig_fndecl));
++ }
++
++ if (is_in_hash_table(orig_fndecl, num))
++ return false;
++
++ print_missing_msg(orig_fndecl, num);
++ return true;
++}
++
++// Get the argnum of a function decl, if node is a return then the argnum is 0
++static unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
++{
++ if (is_a_return_check(node))
++ return 0;
++ else
++ return find_arg_number_tree(node, orig_fndecl);
++}
++
++/* If the function is missing from the hash table and it is a static function
++ * then create a next_cgraph_node from it for handle_function()
++ */
++static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
++{
++ unsigned int num;
++ tree orig_fndecl;
++ struct cgraph_node *next_node = NULL;
++
++ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
++
++ num = get_function_num(missing_fn_head->node, orig_fndecl);
++ if (num == CANNOT_FIND_ARG)
++ return cnodes;
++
++ if (!is_missing_function(orig_fndecl, num))
++ return cnodes;
++
++ next_node = cgraph_get_node(missing_fn_head->fndecl);
++ if (next_node && next_node->local.local)
++ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
++ return cnodes;
++}
++
++/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
++ * into the next_cgraph_node list. They will be the next interesting returns or callees.
++ */
++static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
++{
++ unsigned int i;
++ tree node;
++ struct missing_functions *cur, *missing_fn_head = NULL;
++
++#if BUILDING_GCC_VERSION == 4005
++ for (i = 0; i < VEC_length(tree, cur_node->last_nodes); i++) {
++ node = VEC_index(tree, cur_node->last_nodes, i);
++#elif BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
++#endif
++ switch (TREE_CODE(node)) {
++ case PARM_DECL:
++ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
++ break;
++ case FUNCTION_DECL:
++ missing_fn_head = create_new_missing_function(missing_fn_head, node);
++ break;
++ default:
++ break;
++ }
++ }
++
++ while (missing_fn_head) {
++ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
++
++ cur = missing_fn_head->next;
++ free(missing_fn_head);
++ missing_fn_head = cur;
++ }
++
++ return cnodes;
++}
++
++static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ const_tree arg = gimple_phi_arg_def(phi, i);
++
++ set_conditions(visited, interesting_conditions, arg);
++ }
++}
++
++enum conditions {
++ FROM_CONST, NOT_UNARY, CAST
++};
++
++// Search for constants, cast assignments and binary/ternary assignments
++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
++{
++ gimple def_stmt = get_def_stmt(lhs);
++
++ if (is_gimple_constant(lhs)) {
++ interesting_conditions[FROM_CONST] = true;
++ return;
++ }
++
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return;
++ case GIMPLE_PHI:
++ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
++ case GIMPLE_ASSIGN:
++ if (gimple_num_ops(def_stmt) == 2) {
++ const_tree rhs = gimple_assign_rhs1(def_stmt);
++
++ if (gimple_assign_cast_p(def_stmt))
++ interesting_conditions[CAST] = true;
++
++ return set_conditions(visited, interesting_conditions, rhs);
++ } else {
++ interesting_conditions[NOT_UNARY] = true;
++ return;
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ gcc_unreachable();
++ }
++}
++
++// determine whether duplication will be necessary or not.
++static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
++{
++ struct pointer_set_t *visited;
++
++ if (gimple_assign_cast_p(cur_node->first_stmt))
++ interesting_conditions[CAST] = true;
++ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
++ interesting_conditions[NOT_UNARY] = true;
++
++ visited = pointer_set_create();
++ set_conditions(visited, interesting_conditions, cur_node->node);
++ pointer_set_destroy(visited);
++}
++
++// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
++static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++
++ // already removed
++ if (gimple_bb(asm_stmt) == NULL)
++ return;
++ gsi = gsi_for_stmt(asm_stmt);
++
++ assign = gimple_build_assign(lhs, rhs);
++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
++ SSA_NAME_DEF_STMT(lhs) = assign;
++
++ gsi_remove(&gsi, true);
++}
++
++// Get the field decl of a component ref for intentional_overflow checking
++static const_tree search_field_decl(const_tree comp_ref)
++{
++ const_tree field = NULL_TREE;
++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
++
++ for (i = 0; i < len; i++) {
++ field = TREE_OPERAND(comp_ref, i);
++ if (TREE_CODE(field) == FIELD_DECL)
++ break;
++ }
++ gcc_assert(TREE_CODE(field) == FIELD_DECL);
++ return field;
++}
++
++/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
++ * stmt is a return otherwise it is the callee function.
++ */
++static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
++{
++ const_tree fndecl;
++
++ if (argnum == 0)
++ fndecl = current_function_decl;
++ else
++ fndecl = gimple_call_fndecl(stmt);
++
++ if (fndecl == NULL_TREE)
++ return NULL_TREE;
++
++ return DECL_ORIGIN(fndecl);
++}
++
++/* Get the param of the intentional_overflow attribute.
++ * * 0: MARK_NOT_INTENTIONAL
++ * * 1..MAX_PARAM: MARK_YES
++ * * -1: MARK_TURN_OFF
++ */
++static tree get_attribute_param(const_tree decl)
++{
++ const_tree attr;
++
++ if (decl == NULL_TREE)
++ return NULL_TREE;
++
++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
++ if (!attr || !TREE_VALUE(attr))
++ return NULL_TREE;
++
++ return TREE_VALUE(attr);
++}
++
++// MARK_TURN_OFF
++static bool is_turn_off_intentional_attr(const_tree decl)
++{
++ const_tree param_head;
++
++ param_head = get_attribute_param(decl);
++ if (param_head == NULL_TREE)
++ return false;
++
++ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
++ return true;
++ return false;
++}
++
++// MARK_NOT_INTENTIONAL
++static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
++{
++ const_tree param_head;
++
++ if (argnum == 0)
++ return false;
++
++ param_head = get_attribute_param(decl);
++ if (param_head == NULL_TREE)
++ return false;
++
++ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
++ return true;
++ return false;
++}
++
++// MARK_YES
++static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
++{
++ tree param, param_head;
++
++ if (argnum == 0)
++ return false;
++
++ param_head = get_attribute_param(decl);
++ for (param = param_head; param; param = TREE_CHAIN(param))
++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
++ return true;
++ return false;
++}
++
++static const char *get_asm_string(const_gimple stmt)
++{
++ if (!stmt)
++ return NULL;
++ if (gimple_code(stmt) != GIMPLE_ASM)
++ return NULL;
++
++ return gimple_asm_string(stmt);
++}
++
++static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
++{
++ const char *str;
++
++ str = get_asm_string(stmt);
++ if (!str)
++ return false;
++ return !strcmp(str, TURN_OFF_ASM_STR);
++}
++
++static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
++{
++ const char *str;
++
++ str = get_asm_string(stmt);
++ if (!str)
++ return false;
++ return !strcmp(str, YES_ASM_STR);
++}
++
++static bool is_size_overflow_asm(const_gimple stmt)
++{
++ const char *str;
++
++ str = get_asm_string(stmt);
++ if (!str)
++ return false;
++ return !strncmp(str, "# size_overflow", 15);
++}
++
++static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
++{
++ location_t loc;
++
++ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
++ return;
++
++ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
++ return;
++
++ loc = DECL_SOURCE_LOCATION(decl);
++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", NAME(decl), argnum);
++}
++
++/* Get the type of the intentional_overflow attribute of a node
++ * * MARK_TURN_OFF
++ * * MARK_YES
++ * * MARK_NO
++ * * MARK_NOT_INTENTIONAL
++ */
++static enum mark get_intentional_attr_type(const_tree node)
++{
++ const_tree cur_decl;
++
++ if (node == NULL_TREE)
++ return MARK_NO;
++
++ switch (TREE_CODE(node)) {
++ case COMPONENT_REF:
++ cur_decl = search_field_decl(node);
++ if (is_turn_off_intentional_attr(cur_decl))
++ return MARK_TURN_OFF;
++ if (is_end_intentional_intentional_attr(cur_decl, 1))
++ return MARK_YES;
++ break;
++ case PARM_DECL: {
++ unsigned int argnum;
++
++ cur_decl = DECL_ORIGIN(current_function_decl);
++ argnum = find_arg_number_tree(node, cur_decl);
++ if (argnum == CANNOT_FIND_ARG)
++ return MARK_NO;
++ if (is_yes_intentional_attr(cur_decl, argnum))
++ return MARK_YES;
++ if (is_end_intentional_intentional_attr(cur_decl, argnum))
++ return MARK_NOT_INTENTIONAL;
++ break;
++ }
++ case FUNCTION_DECL:
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
++ return MARK_TURN_OFF;
++ break;
++ default:
++ break;
++ }
++ return MARK_NO;
++}
++
++// Search for the intentional_overflow attribute on the last nodes
++static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
++{
++ unsigned int i;
++ tree last_node;
++ enum mark mark = MARK_NO;
++
++#if BUILDING_GCC_VERSION == 4005
++ for (i = 0; i < VEC_length(tree, cur_node->last_nodes); i++) {
++ last_node = VEC_index(tree, cur_node->last_nodes, i);
++#elif BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
++#endif
++ mark = get_intentional_attr_type(last_node);
++ if (mark != MARK_NO)
++ break;
++ }
++ return mark;
++}
++
++/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
++ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
++ */
++static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
++{
++ if (!cur_node->intentional_mark_from_gimple)
++ return false;
++
++ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
++ cur_node->intentional_attr_cur_fndecl = MARK_YES;
++ else
++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
++
++ // skip param decls
++ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
++ return true;
++ return true;
++}
++
++/* Search intentional_overflow attribute on caller and on callee too.
++ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
++ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
++ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
++*/
++static void check_intentional_attribute_ipa(struct interesting_node *cur_node)
++{
++ const_tree fndecl;
++
++ if (is_intentional_attribute_from_gimple(cur_node))
++ return;
++
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
++ return;
++ }
++
++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
++ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
++ return;
++ }
++
++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
++ return;
++
++ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
++ if (is_turn_off_intentional_attr(fndecl)) {
++ cur_node->intentional_attr_decl = MARK_TURN_OFF;
++ return;
++ }
++
++ if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
++ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
++ else if (is_yes_intentional_attr(fndecl, cur_node->num))
++ cur_node->intentional_attr_decl = MARK_YES;
++
++ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
++ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
++}
++
++// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
++static bool skip_asm(const_tree arg)
++{
++ gimple def_stmt = get_def_stmt(arg);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
++ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
++}
++
++static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ walk_use_def(visited, cur_node, arg);
++ }
++}
++
++static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++{
++ gimple def_stmt = get_def_stmt(lhs);
++ tree rhs1, rhs2;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ walk_use_def(visited, cur_node, rhs1);
++ walk_use_def(visited, cur_node, rhs2);
++}
++
++static void insert_last_node(struct interesting_node *cur_node, tree node)
++{
++ unsigned int i;
++ tree element;
++ enum tree_code code;
++
++ gcc_assert(node != NULL_TREE);
++
++ if (is_gimple_constant(node))
++ return;
++
++ code = TREE_CODE(node);
++ if (code == VAR_DECL) {
++ node = DECL_ORIGIN(node);
++ code = TREE_CODE(node);
++ }
++
++ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
++ return;
++
++#if BUILDING_GCC_VERSION == 4005
++ for (i = 0; i < VEC_length(tree, cur_node->last_nodes); i++) {
++ element = VEC_index(tree, cur_node->last_nodes, i);
++#elif BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
++#endif
++ if (operand_equal_p(node, element, 0))
++ return;
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
++ VEC_safe_push(tree, gc, cur_node->last_nodes, node);
++#else
++ gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
++ vec_safe_push(cur_node->last_nodes, node);
++#endif
++}
++
++// a size_overflow asm stmt in the control flow doesn't stop the recursion
++static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
++{
++ if (!is_size_overflow_asm(stmt))
++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
++}
++
++/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
++ * and component refs (for checking the intentional_overflow attribute).
++ */
++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) != SSA_NAME) {
++ insert_last_node(cur_node, lhs);
++ return;
++ }
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
++ case GIMPLE_CALL: {
++ tree fndecl = gimple_call_fndecl(def_stmt);
++
++ if (fndecl == NULL_TREE)
++ return;
++ insert_last_node(cur_node, fndecl);
++ return;
++ }
++ case GIMPLE_PHI:
++ return walk_use_def_phi(visited, cur_node, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return walk_use_def_binary(visited, cur_node, lhs);
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
++static void set_last_nodes(struct interesting_node *cur_node)
++{
++ struct pointer_set_t *visited;
++
++ visited = pointer_set_create();
++ walk_use_def(visited, cur_node, cur_node->node);
++ pointer_set_destroy(visited);
++}
++
++enum precond {
++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
++};
++
++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
++ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
++ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
++ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
++ */
++static enum precond check_preconditions(struct interesting_node *cur_node)
++{
++ bool interesting_conditions[3] = {false, false, false};
++
++ set_last_nodes(cur_node);
++
++ check_intentional_attribute_ipa(cur_node);
++ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
++ return NO_ATTRIBUTE_SEARCH;
++
++ search_interesting_conditions(cur_node, interesting_conditions);
++
++ // error code
++ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
++ return NO_ATTRIBUTE_SEARCH;
++
++ // unnecessary overflow check
++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ return NO_CHECK_INSERT;
++
++ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
++ return NO_CHECK_INSERT;
++
++ return NONE;
++}
++
++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
++ */
++static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++{
++ enum precond ret;
++ struct pointer_set_t *visited;
++ tree new_node, orig_node = cur_node->node;
++
++ ret = check_preconditions(cur_node);
++ if (ret == NO_ATTRIBUTE_SEARCH)
++ return cnodes;
++
++ cnodes = search_overflow_attribute(cnodes, cur_node);
++
++ if (ret == NO_CHECK_INSERT)
++ return cnodes;
++
++ visited = pointer_set_create();
++ new_node = expand(visited, caller_node, orig_node);
++ pointer_set_destroy(visited);
++
++ if (new_node == NULL_TREE)
++ return cnodes;
++
++ change_orig_node(cur_node, new_node);
++ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
++
++ return cnodes;
++}
++
++// Check visited interesting nodes.
++static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
++{
++ struct interesting_node *cur;
++
++ for (cur = head; cur; cur = cur->next) {
++ if (!operand_equal_p(node, cur->node, 0))
++ continue;
++ if (num != cur->num)
++ continue;
++ if (first_stmt == cur->first_stmt)
++ return true;
++ }
++ return false;
++}
++
++/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
++ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
++ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
++ the intentional_overflow attribute check. They are collected by set_last_nodes().
++ num: arg count of a call stmt or 0 when it is a ret
++ node: the recursion starts from here, it is a call arg or a return value
++ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
++ intentional_attr_decl: intentional_overflow attribute of the callee function
++ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
++ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
++ */
++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
++{
++ struct interesting_node *new_node;
++ tree fndecl;
++ enum gimple_code code;
++
++ gcc_assert(node != NULL_TREE);
++ code = gimple_code(first_stmt);
++ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
++
++ if (num == CANNOT_FIND_ARG)
++ return head;
++
++ if (skip_types(node))
++ return head;
++
++ if (skip_asm(node))
++ return head;
++
++ if (is_gimple_call(first_stmt))
++ fndecl = gimple_call_fndecl(first_stmt);
++ else
++ fndecl = current_function_decl;
++
++ if (fndecl == NULL_TREE)
++ return head;
++
++ if (is_in_interesting_node(head, first_stmt, node, num))
++ return head;
++
++ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
++
++ new_node->next = NULL;
++ new_node->first_stmt = first_stmt;
++#if BUILDING_GCC_VERSION <= 4007
++ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
++#else
++ vec_alloc(new_node->last_nodes, VEC_LEN);
++#endif
++ new_node->num = num;
++ new_node->node = node;
++ new_node->fndecl = fndecl;
++ new_node->intentional_attr_decl = MARK_NO;
++ new_node->intentional_attr_cur_fndecl = MARK_NO;
++ new_node->intentional_mark_from_gimple = asm_stmt;
++
++ if (!head)
++ return new_node;
++
++ new_node->next = head;
++ return new_node;
++}
++
++/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
++ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
++ */
++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++{
++ struct next_cgraph_node *cur_node;
++ tree ret = gimple_return_retval(stmt);
++
++ if (ret == NULL_TREE)
++ return head;
++
++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
++ continue;
++ if (cur_node->num == 0)
++ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
++ }
++
++ return head;
++}
++
++/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
++ * If the call stmt is in the next cgraph node list then it's an interesting call.
++ */
++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++{
++ unsigned int argnum;
++ tree fndecl, arg;
++ struct next_cgraph_node *cur_node;
++
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ return head;
++
++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
++ continue;
++ argnum = get_correct_arg_count(cur_node->num, fndecl);
++ gcc_assert(argnum != CANNOT_FIND_ARG);
++ if (argnum == 0)
++ continue;
++
++ arg = gimple_call_arg(stmt, argnum - 1);
++ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
++ }
++
++ return head;
++}
++
++static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
++{
++ if (!operand_equal_p(orig_node, node, 0))
++ return WRONG_NODE;
++ if (skip_types(node))
++ return WRONG_NODE;
++ return ret_count;
++}
++
++// Get the index of the rhs node in an assignment
++static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
++{
++ const_tree rhs1, rhs2;
++ unsigned int ret;
++
++ gcc_assert(stmt);
++ gcc_assert(is_gimple_assign(stmt));
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ gcc_assert(rhs1 != NULL_TREE);
++
++ switch (gimple_num_ops(stmt)) {
++ case 2:
++ return check_ops(node, rhs1, 1);
++ case 3:
++ ret = check_ops(node, rhs1, 1);
++ if (ret != WRONG_NODE)
++ return ret;
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ gcc_assert(rhs2 != NULL_TREE);
++ return check_ops(node, rhs2, 2);
++ default:
++ gcc_unreachable();
++ }
++}
++
++// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
++static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
++{
++ unsigned int i;
++
++ if (gimple_call_fndecl(stmt) == NULL_TREE)
++ return CANNOT_FIND_ARG;
++
++ for (i = 0; i < gimple_call_num_args(stmt); i++) {
++ tree node;
++
++ node = gimple_call_arg(stmt, i);
++ if (!operand_equal_p(arg, node, 0))
++ continue;
++ if (!skip_types(node))
++ return i + 1;
++ }
++
++ return CANNOT_FIND_ARG;
++}
++
++/* starting from the size_overflow asm stmt collect interesting stmts. They can be
++ * any of return, call or assignment stmts (because of inlining).
++ */
++static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
++{
++ use_operand_p use_p;
++ imm_use_iterator imm_iter;
++ unsigned int argnum;
++
++ gcc_assert(TREE_CODE(node) == SSA_NAME);
++
++ if (pointer_set_insert(visited, node))
++ return head;
++
++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++ gimple stmt = USE_STMT(use_p);
++
++ if (stmt == NULL)
++ return head;
++ if (is_gimple_debug(stmt))
++ continue;
++
++ switch (gimple_code(stmt)) {
++ case GIMPLE_CALL:
++ argnum = find_arg_number_gimple(node, stmt);
++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
++ break;
++ case GIMPLE_RETURN:
++ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
++ break;
++ case GIMPLE_ASSIGN:
++ argnum = get_assign_ops_count(stmt, node);
++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
++ break;
++ case GIMPLE_PHI: {
++ tree result = gimple_phi_result(stmt);
++ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
++ break;
++ }
++ case GIMPLE_ASM:
++ if (gimple_asm_noutputs(stmt) != 0)
++ break;
++ if (!is_size_overflow_asm(stmt))
++ break;
++ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
++ break;
++ case GIMPLE_COND:
++ case GIMPLE_SWITCH:
++ break;
++ default:
++ debug_gimple_stmt(stmt);
++ gcc_unreachable();
++ break;
++ }
++ }
++ return head;
++}
++
++static void remove_size_overflow_asm(gimple stmt)
++{
++ gimple_stmt_iterator gsi;
++ tree input, output;
++
++ if (!is_size_overflow_asm(stmt))
++ return;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ gsi = gsi_for_stmt(stmt);
++ gsi_remove(&gsi, true);
++ return;
++ }
++
++ input = gimple_asm_input_op(stmt, 0);
++ output = gimple_asm_output_op(stmt, 0);
++ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
++}
++
++/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
++ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
++ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
++ */
++static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
++{
++ const_tree output;
++ struct pointer_set_t *visited;
++ gimple intentional_asm = NOT_INTENTIONAL_ASM;
++
++ if (!is_size_overflow_asm(stmt))
++ return head;
++
++ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
++ intentional_asm = stmt;
++
++ gcc_assert(gimple_asm_ninputs(stmt) == 1);
++
++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ const_tree input;
++
++ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ input = gimple_asm_input_op(stmt, 0);
++ remove_size_overflow_asm(stmt);
++ if (is_gimple_constant(TREE_VALUE(input)))
++ return head;
++ visited = pointer_set_create();
++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
++ pointer_set_destroy(visited);
++ return head;
++ }
++
++ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
++ remove_size_overflow_asm(stmt);
++
++ visited = pointer_set_create();
++ output = gimple_asm_output_op(stmt, 0);
++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
++ pointer_set_destroy(visited);
++ return head;
++}
++
++/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
++ * or a call stmt or a return stmt and store them in the interesting_node list
++ */
++static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
++{
++ basic_block bb;
++ struct interesting_node *head = NULL;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ enum gimple_code code;
++ gimple stmt = gsi_stmt(gsi);
++
++ code = gimple_code(stmt);
++
++ if (code == GIMPLE_ASM)
++ head = handle_stmt_by_size_overflow_asm(stmt, head);
++
++ if (!next_node)
++ continue;
++ if (code == GIMPLE_CALL)
++ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
++ if (code == GIMPLE_RETURN)
++ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
++ }
++ }
++ return head;
++}
++
++static void set_current_function_decl(tree fndecl)
++{
++ gcc_assert(fndecl != NULL_TREE);
++
++ push_cfun(DECL_STRUCT_FUNCTION(fndecl));
++ calculate_dominance_info(CDI_DOMINATORS);
++ current_function_decl = fndecl;
++}
++
++static void unset_current_function_decl(void)
++{
++ free_dominance_info(CDI_DOMINATORS);
++ pop_cfun();
++ current_function_decl = NULL_TREE;
++}
++
++static void free_interesting_node(struct interesting_node *head)
++{
++ struct interesting_node *cur;
++
++ while (head) {
++ cur = head->next;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_free(tree, gc, head->last_nodes);
++#else
++ vec_free(head->last_nodes);
++#endif
++ free(head);
++ head = cur;
++ }
++}
++
++static struct visited *insert_visited_function(struct visited *head, struct interesting_node *cur_node)
++{
++ struct visited *new_visited;
++
++ new_visited = (struct visited *)xmalloc(sizeof(*new_visited));
++ new_visited->fndecl = cur_node->fndecl;
++ new_visited->num = cur_node->num;
++ new_visited->first_stmt = cur_node->first_stmt;
++ new_visited->next = NULL;
++
++ if (!head)
++ return new_visited;
++
++ new_visited->next = head;
++ return new_visited;
++}
++
++/* Check whether the function was already visited. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
++ * it is a visited function.
++ */
++static bool is_visited_function(struct visited *head, struct interesting_node *cur_node)
++{
++ struct visited *cur;
++
++ if (!head)
++ return false;
++
++ for (cur = head; cur; cur = cur->next) {
++ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
++ continue;
++ if (cur_node->num != cur->num)
++ continue;
++ if (cur_node->first_stmt == cur->first_stmt)
++ return true;
++ }
++ return false;
++}
++
++static void free_next_cgraph_node(struct next_cgraph_node *head)
++{
++ struct next_cgraph_node *cur;
++
++ while (head) {
++ cur = head->next;
++ free(head);
++ head = cur;
++ }
++}
++
++static void remove_all_size_overflow_asm(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ remove_size_overflow_asm(gsi_stmt(si));
++ }
++}
++
++/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
++ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
++ * the newly collected interesting functions (they are interesting if there is control flow between
++ * the interesting stmts and them).
++ */
++static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited *visited)
++{
++ struct interesting_node *head, *cur_node;
++ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
++
++#if BUILDING_GCC_VERSION <= 4007
++ set_current_function_decl(node->decl);
++#else
++ set_current_function_decl(node->symbol.decl);
++#endif
++ call_count = 0;
++
++ head = collect_interesting_stmts(next_node);
++ for (cur_node = head; cur_node; cur_node = cur_node->next) {
++ if (is_visited_function(visited, cur_node))
++ continue;
++ cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node);
++ visited = insert_visited_function(visited, cur_node);
++ }
++
++ free_interesting_node(head);
++ remove_all_size_overflow_asm();
++ unset_current_function_decl();
++
++ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
++ visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited);
++
++ free_next_cgraph_node(cnodes_head);
++ return visited;
++}
++
++static void free_visited(struct visited *head)
++{
++ struct visited *cur;
++
++ while (head) {
++ cur = head->next;
++ free(head);
++ head = cur;
++ }
++}
++
++// erase the local flag
++static void set_plf_false(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
++ }
++}
++
++#if BUILDING_GCC_VERSION <= 4006
++static bool cgraph_function_with_gimple_body_p(struct cgraph_node *node)
++{
++ return node->analyzed && !node->thunk.thunk_p && !node->alias;
++}
++
++static struct cgraph_node *cgraph_first_function_with_gimple_body(void)
++{
++ struct cgraph_node *node;
++
++ for (node = cgraph_nodes; node; node = node->next) {
++ if (cgraph_function_with_gimple_body_p(node))
++ return node;
++ }
++ return NULL;
++}
++
++static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node)
++{
++ for (node = node->next; node; node = node->next) {
++ if (cgraph_function_with_gimple_body_p(node))
++ return node;
++ }
++ return NULL;
++}
++
++#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
++ for ((node) = cgraph_first_function_with_gimple_body (); (node); \
++ (node) = cgraph_next_function_with_gimple_body (node))
++
++#endif
++
++// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
++static unsigned int search_function(void)
++{
++ struct cgraph_node *node;
++ struct visited *visited = NULL;
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++#if BUILDING_GCC_VERSION <= 4007
++ set_current_function_decl(node->decl);
++#else
++ set_current_function_decl(node->symbol.decl);
++#endif
++ set_plf_false();
++ unset_current_function_decl();
++ }
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++ gcc_assert(cgraph_function_flags_ready);
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(node->reachable);
++#endif
++
++ visited = handle_function(node, NULL, visited);
++ }
++
++ free_visited(visited);
++ return 0;
++}
++
++static struct ipa_opt_pass_d pass_ipa = {
++ .pass = {
++ .type = SIMPLE_IPA_PASS,
++ .name = "size_overflow",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = search_function,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
++ },
++ .generate_summary = NULL,
++ .write_summary = NULL,
++ .read_summary = NULL,
++#if BUILDING_GCC_VERSION >= 4006
++ .write_optimization_summary = NULL,
++ .read_optimization_summary = NULL,
++#endif
++ .stmt_fixup = NULL,
++ .function_transform_todo_flags_start = 0,
++ .function_transform = NULL,
++ .variable_transform = NULL,
++};
++
++// data for the size_overflow asm stmt
++struct asm_data {
++ gimple def_stmt;
++ tree input;
++ tree output;
++};
++
++#if BUILDING_GCC_VERSION <= 4007
++static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
++#else
++static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
++#endif
++{
++ tree list;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *vec_list = NULL;
++#else
++ vec<tree, va_gc> *vec_list = NULL;
++#endif
++
++ list = build_tree_list(NULL_TREE, string);
++ list = chainon(NULL_TREE, build_tree_list(list, io));
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, gc, vec_list, list);
++#else
++ vec_safe_push(vec_list, list);
++#endif
++ return vec_list;
++}
++
++static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
++{
++ gimple asm_stmt;
++ gimple_stmt_iterator gsi;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *input, *output = NULL;
++#else
++ vec<tree, va_gc> *input, *output = NULL;
++#endif
++
++ input = create_asm_io_list(str_input, asm_data->input);
++
++ if (asm_data->output)
++ output = create_asm_io_list(str_output, asm_data->output);
++
++ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
++ gsi = gsi_for_stmt(asm_data->def_stmt);
++ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
++
++ if (asm_data->output)
++ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
++}
++
++static void replace_call_lhs(const struct asm_data *asm_data)
++{
++ gimple_set_lhs(asm_data->def_stmt, asm_data->input);
++ update_stmt(asm_data->def_stmt);
++ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
++}
++
++static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
++{
++ enum mark cur_fndecl_attr;
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ cur_fndecl_attr = search_intentional(visited, arg);
++ if (cur_fndecl_attr != MARK_NO)
++ return cur_fndecl_attr;
++ }
++ return MARK_NO;
++}
++
++static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
++{
++ enum mark cur_fndecl_attr;
++ const_tree rhs1, rhs2;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ cur_fndecl_attr = search_intentional(visited, rhs1);
++ if (cur_fndecl_attr != MARK_NO)
++ return cur_fndecl_attr;
++ return search_intentional(visited, rhs2);
++}
++
++// Look up the intentional_overflow attribute on the caller and the callee functions.
++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) != SSA_NAME)
++ return get_intentional_attr_type(lhs);
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return MARK_NO;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return MARK_NO;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return search_intentional(visited, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ return MARK_TURN_OFF;
++ return MARK_NO;
++ case GIMPLE_CALL:
++ return MARK_NO;
++ case GIMPLE_PHI:
++ return search_intentional_phi(visited, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return search_intentional_binary(visited, lhs);
++ }
++ case GIMPLE_RETURN:
++ return MARK_NO;
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
++{
++ const_tree fndecl;
++ struct pointer_set_t *visited;
++ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
++
++ fndecl = get_interesting_orig_fndecl(stmt, argnum);
++ if (is_end_intentional_intentional_attr(fndecl, argnum))
++ decl_attr = MARK_NOT_INTENTIONAL;
++ else if (is_yes_intentional_attr(fndecl, argnum))
++ decl_attr = MARK_YES;
++ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
++ return MARK_TURN_OFF;
++ }
++
++ visited = pointer_set_create();
++ cur_fndecl_attr = search_intentional(visited, arg);
++ pointer_set_destroy(visited);
++
++ switch (cur_fndecl_attr) {
++ case MARK_NO:
++ return MARK_NO;
++ case MARK_TURN_OFF:
++ return MARK_TURN_OFF;
++ default:
++ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
++ return MARK_YES;
++ }
++}
++
++static void check_missing_size_overflow_attribute(tree var)
++{
++ tree orig_fndecl;
++ unsigned int num;
++
++ if (is_a_return_check(var))
++ orig_fndecl = DECL_ORIGIN(var);
++ else
++ orig_fndecl = DECL_ORIGIN(current_function_decl);
++
++ num = get_function_num(var, orig_fndecl);
++ if (num == CANNOT_FIND_ARG)
++ return;
++
++ is_missing_function(orig_fndecl, num);
++}
++
++static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ search_size_overflow_attribute(visited, arg);
++ }
++}
++
++static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
++{
++ const_gimple def_stmt = get_def_stmt(lhs);
++ tree rhs1, rhs2;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ search_size_overflow_attribute(visited, rhs1);
++ search_size_overflow_attribute(visited, rhs2);
++}
++
++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) == PARM_DECL) {
++ check_missing_size_overflow_attribute(lhs);
++ return;
++ }
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ return;
++ case GIMPLE_CALL: {
++ tree fndecl = gimple_call_fndecl(def_stmt);
++
++ if (fndecl == NULL_TREE)
++ return;
++ check_missing_size_overflow_attribute(fndecl);
++ return;
++ }
++ case GIMPLE_PHI:
++ return search_size_overflow_attribute_phi(visited, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return search_size_overflow_attribute_binary(visited, lhs);
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Search missing entries in the hash table (invoked from the gimple pass)
++static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
++{
++ tree fndecl = NULL_TREE;
++ tree lhs;
++ struct pointer_set_t *visited;
++
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
++ return;
++
++ if (num == 0) {
++ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
++ lhs = gimple_return_retval(stmt);
++ } else {
++ gcc_assert(is_gimple_call(stmt));
++ lhs = gimple_call_arg(stmt, num - 1);
++ fndecl = gimple_call_fndecl(stmt);
++ }
++
++ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
++ return;
++
++ visited = pointer_set_create();
++ search_size_overflow_attribute(visited, lhs);
++ pointer_set_destroy(visited);
++}
++
++static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++
++ assign = gimple_build_assign(asm_data->input, asm_data->output);
++ gsi = gsi_for_stmt(stmt);
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ asm_data->def_stmt = assign;
++
++ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
++ asm_data->output = make_ssa_name(asm_data->output, stmt);
++ if (gimple_code(stmt) == GIMPLE_RETURN)
++ gimple_return_set_retval(stmt, asm_data->output);
++ else
++ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
++ update_stmt(stmt);
++}
++
++static const char *convert_mark_to_str(enum mark mark)
++{
++ switch (mark) {
++ case MARK_NO:
++ return OK_ASM_STR;
++ case MARK_YES:
++ case MARK_NOT_INTENTIONAL:
++ return YES_ASM_STR;
++ case MARK_TURN_OFF:
++ return TURN_OFF_ASM_STR;
++ }
++
++ gcc_unreachable();
++}
++
++/* Create the input of the size_overflow asm stmt.
++ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
++ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
++ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
++ */
++static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
++{
++ if (!asm_data->def_stmt) {
++ asm_data->input = NULL_TREE;
++ return;
++ }
++
++ gcc_assert(!is_size_overflow_intentional_asm_turn_off(asm_data->def_stmt));
++
++ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
++ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
++
++ switch (gimple_code(asm_data->def_stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ replace_call_lhs(asm_data);
++ break;
++ case GIMPLE_PHI:
++ create_output_from_phi(stmt, argnum, asm_data);
++ break;
++ case GIMPLE_NOP: {
++ enum mark mark;
++ const char *str;
++
++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++ str = convert_mark_to_str(mark);
++
++ asm_data->input = asm_data->output;
++ asm_data->output = NULL;
++ asm_data->def_stmt = stmt;
++
++ create_asm_stmt(str, build_string(2, "rm"), NULL, asm_data);
++ asm_data->input = NULL_TREE;
++ break;
++ }
++ case GIMPLE_ASM:
++ if (is_size_overflow_asm(asm_data->def_stmt)) {
++ asm_data->input = NULL_TREE;
++ break;
++ }
++ default:
++ debug_gimple_stmt(asm_data->def_stmt);
++ gcc_unreachable();
++ }
++}
++
++/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
++ * is of the right kind create the appropriate size_overflow asm stmts:
++ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
++ */
++static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
++{
++ struct asm_data asm_data;
++ const char *str;
++ enum mark mark;
++
++ if (is_gimple_constant(output_node))
++ return;
++
++ asm_data.output = output_node;
++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ if (mark == MARK_TURN_OFF)
++ return;
++
++ search_missing_size_overflow_attribute_gimple(stmt, argnum);
++
++ asm_data.def_stmt = get_def_stmt(asm_data.output);
++ create_asm_input(stmt, argnum, &asm_data);
++ if (asm_data.input == NULL_TREE)
++ return;
++
++ str = convert_mark_to_str(mark);
++ create_asm_stmt(str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
++}
++
++// Determine the return value and insert the asm stmt to mark the return stmt.
++static void insert_asm_ret(gimple stmt)
++{
++ tree ret;
++
++ ret = gimple_return_retval(stmt);
++ create_size_overflow_asm(stmt, ret, 0);
++}
++
++// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
++static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
++{
++ tree arg;
++ unsigned int argnum;
++
++ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
++ gcc_assert(argnum != 0);
++ if (argnum == CANNOT_FIND_ARG)
++ return;
++
++ arg = gimple_call_arg(stmt, argnum - 1);
++ gcc_assert(arg != NULL_TREE);
++ create_size_overflow_asm(stmt, arg, argnum);
++}
++
++// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
++static void set_argnum_attribute(const_tree attr, bool *argnums)
++{
++ unsigned int argnum;
++ tree attr_value;
++
++ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
++ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
++ argnums[argnum] = true;
++ }
++}
++
++// If a function arg or the return value is in the hash table then set its index in the array.
++static void set_argnum_hash(tree fndecl, bool *argnums)
++{
++ unsigned int num;
++ const struct size_overflow_hash *hash;
++
++ hash = get_function_hash(DECL_ORIGIN(fndecl));
++ if (!hash)
++ return;
++
++ for (num = 0; num <= MAX_PARAM; num++) {
++ if (!(hash->param & (1U << num)))
++ continue;
++
++ argnums[num] = true;
++ }
++}
++
++static bool is_all_the_argnums_empty(bool *argnums)
++{
++ unsigned int i;
++
++ for (i = 0; i <= MAX_PARAM; i++)
++ if (argnums[i])
++ return false;
++ return true;
++}
++
++// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
++static void search_interesting_args(tree fndecl, bool *argnums)
++{
++ const_tree attr;
++
++ set_argnum_hash(fndecl, argnums);
++ if (!is_all_the_argnums_empty(argnums))
++ return;
++
++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
++ if (attr && TREE_VALUE(attr))
++ set_argnum_attribute(attr, argnums);
++}
++
++/*
++ * Look up the intentional_overflow attribute that turns off ipa based duplication
++ * on the callee function, if found insert an asm stmt with "MARK_TURN_OFF".
++ */
++static bool create_mark_turn_off_asm(gimple stmt)
++{
++ enum mark mark;
++ struct asm_data asm_data;
++ const_tree fndecl = gimple_call_fndecl(stmt);
++
++ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
++ if (mark != MARK_TURN_OFF)
++ return false;
++
++ asm_data.def_stmt = stmt;
++ asm_data.output = gimple_call_lhs(stmt);
++
++ if (asm_data.output == NULL_TREE) {
++ asm_data.input = gimple_call_arg(stmt, 0);
++ if (is_gimple_constant(asm_data.input))
++ return false;
++ asm_data.output = NULL;
++ create_asm_stmt(TURN_OFF_ASM_STR, build_string(2, "rm"), NULL, &asm_data);
++ return true;
++ }
++
++ create_asm_input(stmt, 0, &asm_data);
++ gcc_assert(asm_data.input != NULL_TREE);
++
++ create_asm_stmt(TURN_OFF_ASM_STR, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
++ return true;
++}
++
++// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
++static void handle_interesting_function(gimple stmt)
++{
++ unsigned int argnum;
++ tree fndecl;
++ bool orig_argnums[MAX_PARAM + 1] = {false};
++
++ if (gimple_call_num_args(stmt) == 0)
++ return;
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ return;
++ fndecl = DECL_ORIGIN(fndecl);
++
++ if (create_mark_turn_off_asm(stmt))
++ return;
++
++ search_interesting_args(fndecl, orig_argnums);
++
++ for (argnum = 1; argnum < MAX_PARAM; argnum++)
++ if (orig_argnums[argnum])
++ insert_asm_arg(stmt, argnum);
++}
++
++// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
++static void handle_interesting_ret(gimple stmt)
++{
++ bool orig_argnums[MAX_PARAM + 1] = {false};
++
++ search_interesting_args(current_function_decl, orig_argnums);
++
++ if (orig_argnums[0])
++ insert_asm_ret(stmt);
++}
++
++// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
++static unsigned int search_interesting_functions(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt = gsi_stmt(gsi);
++
++ if (is_size_overflow_asm(stmt))
++ continue;
++
++ if (is_gimple_call(stmt))
++ handle_interesting_function(stmt);
++ else if (gimple_code(stmt) == GIMPLE_RETURN)
++ handle_interesting_ret(stmt);
++ }
++ }
++ return 0;
++}
++
++/*
++ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
++ * this pass inserts asm stmts to mark the interesting args
++ * that the ipa pass will detect and insert the size overflow checks for.
++ */
++static struct gimple_opt_pass insert_size_overflow_asm_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "insert_size_overflow_asm",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = search_interesting_functions,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
++ }
++};
++
++// Create the noreturn report_size_overflow() function decl.
++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
++{
++ tree fntype;
++
++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
++
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
++ fntype = build_function_type_list(void_type_node,
++ const_char_ptr_type_node,
++ unsigned_type_node,
++ const_char_ptr_type_node,
++ const_char_ptr_type_node,
++ NULL_TREE);
++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
++
++ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
++ TREE_PUBLIC(report_size_overflow_decl) = 1;
++ DECL_EXTERNAL(report_size_overflow_decl) = 1;
++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
++}
++
++static unsigned int dump_functions(void)
++{
++ struct cgraph_node *node;
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++ basic_block bb;
++
++#if BUILDING_GCC_VERSION <= 4007
++ push_cfun(DECL_STRUCT_FUNCTION(node->decl));
++ current_function_decl = node->decl;
++#else
++ push_cfun(DECL_STRUCT_FUNCTION(node->symbol.decl));
++ current_function_decl = node->symbol.decl;
++#endif
++
++ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", NAME(current_function_decl));
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ fprintf(stderr, "<bb %u>:\n", bb->index);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ debug_gimple_stmt(gsi_stmt(si));
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ debug_gimple_stmt(gsi_stmt(si));
++ fprintf(stderr, "\n");
++ }
++
++ fprintf(stderr, "-------------------------------------------------------------------------\n");
++
++ pop_cfun();
++ current_function_decl = NULL_TREE;
++ }
++
++ fprintf(stderr, "###############################################################################\n");
++
++ return 0;
++}
++
++static struct ipa_opt_pass_d pass_dump = {
++ .pass = {
++ .type = SIMPLE_IPA_PASS,
++ .name = "dump",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = dump_functions,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0,
++ },
++ .generate_summary = NULL,
++ .write_summary = NULL,
++ .read_summary = NULL,
++#if BUILDING_GCC_VERSION >= 4006
++ .write_optimization_summary = NULL,
++ .read_optimization_summary = NULL,
++#endif
++ .stmt_fixup = NULL,
++ .function_transform_todo_flags_start = 0,
++ .function_transform = NULL,
++ .variable_transform = NULL,
++};
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ int i;
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ bool enable = true;
++
++ struct register_pass_info insert_size_overflow_asm_pass_info = {
++ .pass = &insert_size_overflow_asm_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ struct register_pass_info __unused dump_before_pass_info = {
++ .pass = &pass_dump.pass,
++ .reference_pass_name = "increase_alignment",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ struct register_pass_info ipa_pass_info = {
++ .pass = &pass_ipa.pass,
++ .reference_pass_name = "increase_alignment",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ struct register_pass_info __unused dump_after_pass_info = {
++ .pass = &pass_dump.pass,
++ .reference_pass_name = "increase_alignment",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "no-size-overflow")) {
++ enable = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
++ if (enable) {
++ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
++ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
+diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
+new file mode 100644
+index 0000000..ac2901e
+--- /dev/null
++++ b/tools/gcc/stackleak_plugin.c
+@@ -0,0 +1,327 @@
++/*
++ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to help implement various PaX features
++ *
++ * - track lowest stack pointer
++ *
++ * TODO:
++ * - initialize all local variables
++ *
++ * BUGS:
++ * - none known
++ */
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "flags.h"
++#include "intl.h"
++#include "toplev.h"
++#include "plugin.h"
++//#include "expr.h" where are you...
++#include "diagnostic.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "rtl.h"
++#include "emit-rtl.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++extern void print_gimple_stmt(FILE *, gimple, int, int);
++
++int plugin_is_GPL_compatible;
++
++static int track_frame_size = -1;
++static const char track_function[] = "pax_track_stack";
++static const char check_function[] = "pax_check_alloca";
++static bool init_locals;
++
++static struct plugin_info stackleak_plugin_info = {
++ .version = "201302112000",
++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
++// "initialize-locals\t\tforcibly initialize all stack frames\n"
++};
++
++static bool gate_stackleak_track_stack(void);
++static unsigned int execute_stackleak_tree_instrument(void);
++static unsigned int execute_stackleak_final(void);
++
++static struct gimple_opt_pass stackleak_tree_instrument_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "stackleak_tree_instrument",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = gate_stackleak_track_stack,
++ .execute = execute_stackleak_tree_instrument,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_gimple_leh | PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
++ }
++};
++
++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
++ .pass = {
++ .type = RTL_PASS,
++ .name = "stackleak_final",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = gate_stackleak_track_stack,
++ .execute = execute_stackleak_final,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func
++ }
++};
++
++static bool gate_stackleak_track_stack(void)
++{
++ return track_frame_size >= 0;
++}
++
++static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
++{
++ gimple check_alloca;
++ tree fntype, fndecl, alloca_size;
++
++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
++ fndecl = build_fn_decl(check_function, fntype);
++ DECL_ASSEMBLER_NAME(fndecl); // for LTO
++
++ // insert call to void pax_check_alloca(unsigned long size)
++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
++ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
++}
++
++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
++{
++ gimple track_stack;
++ tree fntype, fndecl;
++
++ fntype = build_function_type_list(void_type_node, NULL_TREE);
++ fndecl = build_fn_decl(track_function, fntype);
++ DECL_ASSEMBLER_NAME(fndecl); // for LTO
++
++ // insert call to void pax_track_stack(void)
++ track_stack = gimple_build_call(fndecl, 0);
++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
++}
++
++#if BUILDING_GCC_VERSION == 4005
++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
++{
++ tree fndecl;
++
++ if (!is_gimple_call(stmt))
++ return false;
++ fndecl = gimple_call_fndecl(stmt);
++ if (!fndecl)
++ return false;
++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
++ return false;
++// print_node(stderr, "pax", fndecl, 4);
++ return DECL_FUNCTION_CODE(fndecl) == code;
++}
++#endif
++
++static bool is_alloca(gimple stmt)
++{
++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
++ return true;
++
++#if BUILDING_GCC_VERSION >= 4007
++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
++ return true;
++#endif
++
++ return false;
++}
++
++static unsigned int execute_stackleak_tree_instrument(void)
++{
++ basic_block bb, entry_bb;
++ bool prologue_instrumented = false, is_leaf = true;
++
++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
++
++ // 1. loop through BBs and GIMPLE statements
++ FOR_EACH_BB(bb) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt;
++
++ stmt = gsi_stmt(gsi);
++
++ if (is_gimple_call(stmt))
++ is_leaf = false;
++
++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
++ if (!is_alloca(stmt))
++ continue;
++
++ // 2. insert stack overflow check before each __builtin_alloca call
++ stackleak_check_alloca(&gsi);
++
++ // 3. insert track call after each __builtin_alloca call
++ stackleak_add_instrumentation(&gsi);
++ if (bb == entry_bb)
++ prologue_instrumented = true;
++ }
++ }
++
++ // special cases for some bad linux code: taking the address of static inline functions will materialize them
++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
++ return 0;
++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
++ return 0;
++
++ // 4. insert track call at the beginning
++ if (!prologue_instrumented) {
++ gimple_stmt_iterator gsi;
++
++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++ gsi = gsi_start_bb(bb);
++ stackleak_add_instrumentation(&gsi);
++ }
++
++ return 0;
++}
++
++static unsigned int execute_stackleak_final(void)
++{
++ rtx insn, next;
++
++ if (cfun->calls_alloca)
++ return 0;
++
++ // keep calls only if function frame is big enough
++ if (get_frame_size() >= track_frame_size)
++ return 0;
++
++ // 1. find pax_track_stack calls
++ for (insn = get_insns(); insn; insn = next) {
++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
++ rtx body;
++
++ next = NEXT_INSN(insn);
++ if (!CALL_P(insn))
++ continue;
++ body = PATTERN(insn);
++ if (GET_CODE(body) != CALL)
++ continue;
++ body = XEXP(body, 0);
++ if (GET_CODE(body) != MEM)
++ continue;
++ body = XEXP(body, 0);
++ if (GET_CODE(body) != SYMBOL_REF)
++ continue;
++ if (strcmp(XSTR(body, 0), track_function))
++ continue;
++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++ // 2. delete call
++ delete_insn_and_edges(insn);
++#if BUILDING_GCC_VERSION >= 4007
++ if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
++ insn = next;
++ next = NEXT_INSN(insn);
++ delete_insn_and_edges(insn);
++ }
++#endif
++ }
++
++// print_simple_rtl(stderr, get_insns());
++// print_rtl(stderr, get_insns());
++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
++
++ return 0;
++}
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ int i;
++ struct register_pass_info stackleak_tree_instrument_pass_info = {
++ .pass = &stackleak_tree_instrument_pass.pass,
++// .reference_pass_name = "tree_profile",
++ .reference_pass_name = "optimized",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++ struct register_pass_info stackleak_final_pass_info = {
++ .pass = &stackleak_final_rtl_opt_pass.pass,
++ .reference_pass_name = "final",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_BEFORE
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "track-lowest-sp")) {
++ if (!argv[i].value) {
++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ continue;
++ }
++ track_frame_size = atoi(argv[i].value);
++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ if (!strcmp(argv[i].key, "initialize-locals")) {
++ if (argv[i].value) {
++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
++ continue;
++ }
++ init_locals = true;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
++
++ return 0;
++}
+diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c
+new file mode 100644
+index 0000000..4fae911
+--- /dev/null
++++ b/tools/gcc/structleak_plugin.c
+@@ -0,0 +1,277 @@
++/*
++ * Copyright 2013 by PaX Team <pageexec@freemail.hu>
++ * Licensed under the GPL v2
++ *
++ * Note: the choice of the license means that the compilation process is
++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
++ * but for the kernel it doesn't matter since it doesn't link against
++ * any of the gcc libraries
++ *
++ * gcc plugin to forcibly initialize certain local variables that could
++ * otherwise leak kernel stack to userland if they aren't properly initialized
++ * by later code
++ *
++ * Homepage: http://pax.grsecurity.net/
++ *
++ * Usage:
++ * $ # for 4.5/4.6/C based 4.7
++ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
++ * $ # for C++ based 4.7/4.8+
++ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
++ * $ gcc -fplugin=./structleak_plugin.so test.c -O2
++ *
++ * TODO: eliminate redundant initializers
++ * increase type coverage
++ */
++
++#include "gcc-plugin.h"
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "intl.h"
++#include "plugin-version.h"
++#include "tm.h"
++#include "toplev.h"
++#include "function.h"
++#include "tree-flow.h"
++#include "plugin.h"
++#include "gimple.h"
++#include "diagnostic.h"
++#include "cfgloop.h"
++#include "langhooks.h"
++
++#if BUILDING_GCC_VERSION >= 4008
++#define TODO_dump_func 0
++#endif
++
++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
++
++// unused type flag in all versions 4.5-4.8
++#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE)
++
++int plugin_is_GPL_compatible;
++void debug_gimple_stmt(gimple gs);
++
++static struct plugin_info structleak_plugin_info = {
++ .version = "201304082245",
++ .help = "disable\tdo not activate plugin\n",
++};
++
++static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
++{
++ *no_add_attrs = true;
++
++ // check for types? for now accept everything linux has to offer
++ if (TREE_CODE(*node) != FIELD_DECL)
++ return NULL_TREE;
++
++ *no_add_attrs = false;
++ return NULL_TREE;
++}
++
++static struct attribute_spec user_attr = {
++ .name = "user",
++ .min_length = 0,
++ .max_length = 0,
++ .decl_required = false,
++ .type_required = false,
++ .function_type_required = false,
++ .handler = handle_user_attribute,
++#if BUILDING_GCC_VERSION >= 4007
++ .affects_type_identity = true
++#endif
++};
++
++static void register_attributes(void *event_data, void *data)
++{
++ register_attribute(&user_attr);
++// register_attribute(&force_attr);
++}
++
++static tree get_field_type(tree field)
++{
++ return strip_array_types(TREE_TYPE(field));
++}
++
++static bool is_userspace_type(tree type)
++{
++ tree field;
++
++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
++ tree fieldtype = get_field_type(field);
++ enum tree_code code = TREE_CODE(fieldtype);
++
++ if (code == RECORD_TYPE || code == UNION_TYPE)
++ if (is_userspace_type(fieldtype))
++ return true;
++
++ if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
++ return true;
++ }
++ return false;
++}
++
++static void finish_type(void *event_data, void *data)
++{
++ tree type = (tree)event_data;
++
++ if (TYPE_USERSPACE(type))
++ return;
++
++ if (is_userspace_type(type))
++ TYPE_USERSPACE(type) = 1;
++}
++
++static void initialize(tree var)
++{
++ basic_block bb;
++ gimple_stmt_iterator gsi;
++ tree initializer;
++ gimple init_stmt;
++
++ // this is the original entry bb before the forced split
++ // TODO: check further BBs in case more splits occured before us
++ bb = ENTRY_BLOCK_PTR->next_bb->next_bb;
++
++ // first check if the variable is already initialized, warn otherwise
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt = gsi_stmt(gsi);
++ tree rhs1;
++
++ // we're looking for an assignment of a single rhs...
++ if (!gimple_assign_single_p(stmt))
++ continue;
++ rhs1 = gimple_assign_rhs1(stmt);
++#if BUILDING_GCC_VERSION >= 4007
++ // ... of a non-clobbering expression...
++ if (TREE_CLOBBER_P(rhs1))
++ continue;
++#endif
++ // ... to our variable...
++ if (gimple_get_lhs(stmt) != var)
++ continue;
++ // if it's an initializer then we're good
++ if (TREE_CODE(rhs1) == CONSTRUCTOR)
++ return;
++ }
++
++ // these aren't the 0days you're looking for
++// inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
++
++ // build the initializer expression
++ initializer = build_constructor(TREE_TYPE(var), NULL);
++
++ // build the initializer stmt
++ init_stmt = gimple_build_assign(var, initializer);
++ gsi = gsi_start_bb(ENTRY_BLOCK_PTR->next_bb);
++ gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
++ update_stmt(init_stmt);
++}
++
++static unsigned int handle_function(void)
++{
++ basic_block bb;
++ unsigned int ret = 0;
++ tree var;
++
++#if BUILDING_GCC_VERSION == 4005
++ tree vars;
++#else
++ unsigned int i;
++#endif
++
++ // split the first bb where we can put the forced initializers
++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
++ if (dom_info_available_p(CDI_DOMINATORS))
++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
++
++ // enumarate all local variables and forcibly initialize our targets
++#if BUILDING_GCC_VERSION == 4005
++ for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
++ var = TREE_VALUE(vars);
++#else
++ FOR_EACH_LOCAL_DECL(cfun, i, var) {
++#endif
++ tree type = TREE_TYPE(var);
++
++ gcc_assert(DECL_P(var));
++ if (!auto_var_in_fn_p(var, current_function_decl))
++ continue;
++
++ // only care about structure types
++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++ continue;
++
++ // if the type is of interest, examine the variable
++ if (TYPE_USERSPACE(type))
++ initialize(var);
++ }
++
++ return ret;
++}
++
++static struct gimple_opt_pass structleak_pass = {
++ .pass = {
++ .type = GIMPLE_PASS,
++ .name = "structleak",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++ .gate = NULL,
++ .execute = handle_function,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++ .tv_id = TV_NONE,
++ .properties_required = PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
++ }
++};
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
++{
++ int i;
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ bool enable = true;
++
++ struct register_pass_info structleak_pass_info = {
++ .pass = &structleak_pass.pass,
++ .reference_pass_name = "ssa",
++ .ref_pass_instance_number = 1,
++ .pos_op = PASS_POS_INSERT_AFTER
++ };
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
++ }
++
++ if (strcmp(lang_hooks.name, "GNU C")) {
++ inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
++ enable = false;
++ }
++
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "disable")) {
++ enable = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
++
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
++ if (enable) {
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
++ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
++}
+diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
+index 6789d78..4afd019e 100644
+--- a/tools/perf/util/include/asm/alternative-asm.h
++++ b/tools/perf/util/include/asm/alternative-asm.h
+@@ -5,4 +5,7 @@
+
+ #define altinstruction_entry #
+
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++
+ #endif
+diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
+index 547628e..74de9f2 100644
+--- a/tools/perf/util/include/linux/compiler.h
++++ b/tools/perf/util/include/linux/compiler.h
+@@ -11,4 +11,12 @@
+
+ #define __used __attribute__((__unused__))
+
++#ifndef __size_overflow
++# define __size_overflow(...)
++#endif
++
++#ifndef __intentional_overflow
++# define __intentional_overflow(...)
++#endif
++
+ #endif
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index d83aa5e..e097f17 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
+
+ static cpumask_var_t cpus_hardware_enabled;
+ static int kvm_usage_count = 0;
+-static atomic_t hardware_enable_failed;
++static atomic_unchecked_t hardware_enable_failed;
+
+ struct kmem_cache *kvm_vcpu_cache;
+ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+
+-static __read_mostly struct preempt_ops kvm_preempt_ops;
++static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
++static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
++static struct preempt_ops kvm_preempt_ops = {
++ .sched_in = kvm_sched_in,
++ .sched_out = kvm_sched_out,
++};
+
+ struct dentry *kvm_debugfs_dir;
+
+@@ -659,7 +664,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ /* We can read the guest memory with __xxx_user() later on. */
+ if (user_alloc &&
+ ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+- !access_ok(VERIFY_WRITE,
++ !__access_ok(VERIFY_WRITE,
+ (void __user *)(unsigned long)mem->userspace_addr,
+ mem->memory_size)))
+ goto out;
+@@ -1660,7 +1665,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
+ return 0;
+ }
+
+-static struct file_operations kvm_vcpu_fops = {
++static file_operations_no_const kvm_vcpu_fops __read_only = {
+ .release = kvm_vcpu_release,
+ .unlocked_ioctl = kvm_vcpu_ioctl,
+ #ifdef CONFIG_COMPAT
+@@ -2183,7 +2188,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
+ return 0;
+ }
+
+-static struct file_operations kvm_vm_fops = {
++static file_operations_no_const kvm_vm_fops __read_only = {
+ .release = kvm_vm_release,
+ .unlocked_ioctl = kvm_vm_ioctl,
+ #ifdef CONFIG_COMPAT
+@@ -2281,7 +2286,7 @@ out:
+ return r;
+ }
+
+-static struct file_operations kvm_chardev_ops = {
++static file_operations_no_const kvm_chardev_ops __read_only = {
+ .unlocked_ioctl = kvm_dev_ioctl,
+ .compat_ioctl = kvm_dev_ioctl,
+ .llseek = noop_llseek,
+@@ -2307,7 +2312,7 @@ static void hardware_enable_nolock(void *junk)
+
+ if (r) {
+ cpumask_clear_cpu(cpu, cpus_hardware_enabled);
+- atomic_inc(&hardware_enable_failed);
++ atomic_inc_unchecked(&hardware_enable_failed);
+ printk(KERN_INFO "kvm: enabling virtualization on "
+ "CPU%d failed\n", cpu);
+ }
+@@ -2361,10 +2366,10 @@ static int hardware_enable_all(void)
+
+ kvm_usage_count++;
+ if (kvm_usage_count == 1) {
+- atomic_set(&hardware_enable_failed, 0);
++ atomic_set_unchecked(&hardware_enable_failed, 0);
+ on_each_cpu(hardware_enable_nolock, NULL, 1);
+
+- if (atomic_read(&hardware_enable_failed)) {
++ if (atomic_read_unchecked(&hardware_enable_failed)) {
+ hardware_disable_all_nolock();
+ r = -EBUSY;
+ }
+@@ -2715,7 +2720,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+ kvm_arch_vcpu_put(vcpu);
+ }
+
+-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module)
+ {
+ int r;
+@@ -2778,7 +2783,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ if (!vcpu_align)
+ vcpu_align = __alignof__(struct kvm_vcpu);
+ kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
+- 0, NULL);
++ SLAB_USERCOPY, NULL);
+ if (!kvm_vcpu_cache) {
+ r = -ENOMEM;
+ goto out_free_3;
+@@ -2788,9 +2793,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ if (r)
+ goto out_free;
+
++ pax_open_kernel();
+ kvm_chardev_ops.owner = module;
+ kvm_vm_fops.owner = module;
+ kvm_vcpu_fops.owner = module;
++ pax_close_kernel();
+
+ r = misc_register(&kvm_dev);
+ if (r) {
+@@ -2800,9 +2807,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+
+ register_syscore_ops(&kvm_syscore_ops);
+
+- kvm_preempt_ops.sched_in = kvm_sched_in;
+- kvm_preempt_ops.sched_out = kvm_sched_out;
+-
+ kvm_init_debug();
+
+ return 0;
diff --git a/3.2.54/4425_grsec_remove_EI_PAX.patch b/3.2.54/4425_grsec_remove_EI_PAX.patch
new file mode 100644
index 0000000..415fda5
--- /dev/null
+++ b/3.2.54/4425_grsec_remove_EI_PAX.patch
@@ -0,0 +1,19 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+Deprecate EI_PAX.
+
+X-Gentoo-Bug: 445600
+X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600
+
+diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig
+--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500
++++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500
+@@ -266,7 +266,7 @@
+
+ config PAX_EI_PAX
+ bool 'Use legacy ELF header marking'
+- default y if GRKERNSEC_CONFIG_AUTO
++ depends on BROKEN
+ help
+ Enabling this option will allow you to control PaX features on
+ a per executable basis via the 'chpax' utility available at
diff --git a/3.2.54/4427_force_XATTR_PAX_tmpfs.patch b/3.2.54/4427_force_XATTR_PAX_tmpfs.patch
new file mode 100644
index 0000000..8c7a533
--- /dev/null
+++ b/3.2.54/4427_force_XATTR_PAX_tmpfs.patch
@@ -0,0 +1,35 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+For users that emerge without XATTR_PAX support enabled, we still want user.pax.flags
+namespace supported on tmpfs so that the PaX markings survive emerge.
+
+diff -Naur a/mm/shmem.c b/mm/shmem.c
+--- a/mm/shmem.c 2013-06-11 21:00:18.000000000 -0400
++++ b/mm/shmem.c 2013-06-11 21:08:18.000000000 -0400
+@@ -1809,11 +1809,7 @@
+ static int shmem_xattr_validate(const char *name)
+ {
+ struct { const char *prefix; size_t len; } arr[] = {
+-
+-#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
+-#endif
+-
+ { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+ { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+ };
+@@ -1867,14 +1863,12 @@
+ if (err)
+ return err;
+
+-#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
+ return -EOPNOTSUPP;
+ if (size > 8)
+ return -EINVAL;
+ }
+-#endif
+
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
diff --git a/3.2.54/4430_grsec-remove-localversion-grsec.patch b/3.2.54/4430_grsec-remove-localversion-grsec.patch
new file mode 100644
index 0000000..31cf878
--- /dev/null
+++ b/3.2.54/4430_grsec-remove-localversion-grsec.patch
@@ -0,0 +1,9 @@
+From: Kerin Millar <kerframil@gmail.com>
+
+Remove grsecurity's localversion-grsec file as it is inconsistent with
+Gentoo's kernel practices and naming scheme.
+
+--- a/localversion-grsec 2008-02-24 14:26:59.000000000 +0000
++++ b/localversion-grsec 1970-01-01 01:00:00.000000000 +0100
+@@ -1 +0,0 @@
+--grsec
diff --git a/3.2.54/4435_grsec-mute-warnings.patch b/3.2.54/4435_grsec-mute-warnings.patch
new file mode 100644
index 0000000..f099757
--- /dev/null
+++ b/3.2.54/4435_grsec-mute-warnings.patch
@@ -0,0 +1,43 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+Updated patch for 2.6.38.6
+
+The credits/description from the original version of this patch remain accurate
+and are included below.
+
+---
+From: Jory A. Pratt <anarchy@gentoo.org>
+Updated patch for kernel 2.6.32
+
+The credits/description from the original version of this patch remain accurate
+and are included below.
+
+---
+From: Gordon Malm <gengor@gentoo.org>
+
+Updated patch for kernel series 2.6.24.
+
+The credits/description from the original version of this patch remain accurate
+and are included below.
+
+---
+From: Alexander Gabert <gaberta@fh-trier.de>
+
+This patch removes the warnings introduced by grsec patch 2.1.9 and later.
+It removes the -W options added by the patch and restores the original
+warning flags of vanilla kernel versions.
+
+Acked-by: Christian Heim <phreak@gentoo.org>
+---
+
+--- a/Makefile 2011-11-18 17:50:11.000000000 -0500
++++ b/Makefile 2011-11-18 17:50:48.000000000 -0500
+@@ -245,7 +245,7 @@
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
+ HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
+ HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
+
+
diff --git a/3.2.54/4440_grsec-remove-protected-paths.patch b/3.2.54/4440_grsec-remove-protected-paths.patch
new file mode 100644
index 0000000..05710b1
--- /dev/null
+++ b/3.2.54/4440_grsec-remove-protected-paths.patch
@@ -0,0 +1,19 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+We don't want GRSEC's Makefile to change permissions on paths in
+the filesystem.
+
+diff -Naur a/grsecurity/Makefile b/grsecurity/Makefile
+--- a/grsecurity/Makefile 2011-10-19 20:42:50.000000000 -0400
++++ b/grsecurity/Makefile 2011-10-19 20:45:08.000000000 -0400
+@@ -34,10 +34,4 @@
+ ifdef CONFIG_GRKERNSEC_HIDESYM
+ extra-y := grsec_hidesym.o
+ $(obj)/grsec_hidesym.o:
+- @-chmod -f 500 /boot
+- @-chmod -f 500 /lib/modules
+- @-chmod -f 500 /lib64/modules
+- @-chmod -f 500 /lib32/modules
+- @-chmod -f 700 .
+- @echo ' grsec: protected kernel image paths'
+ endif
diff --git a/3.2.54/4450_grsec-kconfig-default-gids.patch b/3.2.54/4450_grsec-kconfig-default-gids.patch
new file mode 100644
index 0000000..55a02aa
--- /dev/null
+++ b/3.2.54/4450_grsec-kconfig-default-gids.patch
@@ -0,0 +1,111 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+Updated patch for the new Kconfig system in grsec 2.9.1
+
+---
+From: Kerin Millar <kerframil@gmail.com>
+
+grsecurity contains a number of options which allow certain protections
+to be applied to or exempted from members of a given group. However, the
+default GIDs specified in the upstream patch are entirely arbitrary and
+there is no telling which (if any) groups the GIDs will correlate with
+on an end-user's system. Because some users don't pay a great deal of
+attention to the finer points of kernel configuration, it is probably
+wise to specify some reasonable defaults so as to stop careless users
+from shooting themselves in the foot.
+
+diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
+--- a/grsecurity/Kconfig 2012-10-13 09:51:35.000000000 -0400
++++ b/grsecurity/Kconfig 2012-10-13 09:52:32.000000000 -0400
+@@ -617,7 +617,7 @@
+ config GRKERNSEC_AUDIT_GID
+ int "GID for auditing"
+ depends on GRKERNSEC_AUDIT_GROUP
+- default 1007
++ default 100
+
+ config GRKERNSEC_EXECLOG
+ bool "Exec logging"
+@@ -826,7 +826,7 @@
+ config GRKERNSEC_TPE_UNTRUSTED_GID
+ int "GID for TPE-untrusted users"
+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+- default 1005
++ default 100
+ help
+ Setting this GID determines what group TPE restrictions will be
+ *enabled* for. If the sysctl option is enabled, a sysctl option
+@@ -835,7 +835,7 @@
+ config GRKERNSEC_TPE_TRUSTED_GID
+ int "GID for TPE-trusted users"
+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+- default 1005
++ default 10
+ help
+ Setting this GID determines what group TPE restrictions will be
+ *disabled* for. If the sysctl option is enabled, a sysctl option
+@@ -928,7 +928,7 @@
+ config GRKERNSEC_SOCKET_ALL_GID
+ int "GID to deny all sockets for"
+ depends on GRKERNSEC_SOCKET_ALL
+- default 1004
++ default 65534
+ help
+ Here you can choose the GID to disable socket access for. Remember to
+ add the users you want socket access disabled for to the GID
+@@ -949,7 +949,7 @@
+ config GRKERNSEC_SOCKET_CLIENT_GID
+ int "GID to deny client sockets for"
+ depends on GRKERNSEC_SOCKET_CLIENT
+- default 1003
++ default 65534
+ help
+ Here you can choose the GID to disable client socket access for.
+ Remember to add the users you want client socket access disabled for to
+@@ -967,7 +967,7 @@
+ config GRKERNSEC_SOCKET_SERVER_GID
+ int "GID to deny server sockets for"
+ depends on GRKERNSEC_SOCKET_SERVER
+- default 1002
++ default 65534
+ help
+ Here you can choose the GID to disable server socket access for.
+ Remember to add the users you want server socket access disabled for to
+diff -Nuar a/security/Kconfig b/security/Kconfig
+--- a/security/Kconfig 2012-10-13 09:51:35.000000000 -0400
++++ b/security/Kconfig 2012-10-13 09:52:59.000000000 -0400
+@@ -194,7 +194,7 @@
+
+ config GRKERNSEC_PROC_GID
+ int "GID exempted from /proc restrictions"
+- default 1001
++ default 10
+ help
+ Setting this GID determines which group will be exempted from
+ grsecurity's /proc restrictions, allowing users of the specified
+@@ -205,7 +205,7 @@
+ config GRKERNSEC_TPE_UNTRUSTED_GID
+ int "GID for TPE-untrusted users"
+ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+- default 1005
++ default 100
+ help
+ Setting this GID determines which group untrusted users should
+ be added to. These users will be placed under grsecurity's Trusted Path
+@@ -217,7 +217,7 @@
+ config GRKERNSEC_TPE_TRUSTED_GID
+ int "GID for TPE-trusted users"
+ depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+- default 1005
++ default 10
+ help
+ Setting this GID determines what group TPE restrictions will be
+ *disabled* for. If the sysctl option is enabled, a sysctl option
+@@ -226,7 +226,7 @@
+ config GRKERNSEC_SYMLINKOWN_GID
+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
+ depends on GRKERNSEC_CONFIG_SERVER
+- default 1006
++ default 100
+ help
+ Setting this GID determines what group kernel-enforced
+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
diff --git a/3.2.54/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.54/4465_selinux-avc_audit-log-curr_ip.patch
new file mode 100644
index 0000000..a946b66
--- /dev/null
+++ b/3.2.54/4465_selinux-avc_audit-log-curr_ip.patch
@@ -0,0 +1,73 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+Removed deprecated NIPQUAD macro in favor of %pI4.
+See bug #346333.
+
+---
+From: Gordon Malm <gengor@gentoo.org>
+
+This is a reworked version of the original
+*_selinux-avc_audit-log-curr_ip.patch carried in earlier releases of
+hardened-sources.
+
+Dropping the patch, or simply fixing the #ifdef of the original patch
+could break automated logging setups so this route was necessary.
+
+Suggestions for improving the help text are welcome.
+
+The original patch's description is still accurate and included below.
+
+---
+Provides support for a new field ipaddr within the SELinux
+AVC audit log, relying in task_struct->curr_ip (ipv4 only)
+provided by grSecurity patch to be applied before.
+
+Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
+---
+
+diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
+--- a/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400
++++ b/grsecurity/Kconfig 2011-04-17 19:32:53.000000000 -0400
+@@ -1062,6 +1062,27 @@
+ menu "Logging Options"
+ depends on GRKERNSEC
+
++config GRKERNSEC_SELINUX_AVC_LOG_IPADDR
++ def_bool n
++ prompt "Add source IP address to SELinux AVC log messages"
++ depends on GRKERNSEC && SECURITY_SELINUX
++ help
++ If you say Y here, a new field "ipaddr=" will be added to many SELinux
++ AVC log messages. The value of this field in any given message
++ represents the source IP address of the remote machine/user that created
++ the offending process.
++
++ This information is sourced from task_struct->curr_ip provided by
++ grsecurity's GRKERNSEC top-level configuration option. One limitation
++ is that only IPv4 is supported.
++
++ In many instances SELinux AVC log messages already log a superior level
++ of information that also includes source port and destination ip/port.
++ Additionally, SELinux's AVC log code supports IPv6.
++
++ However, grsecurity's task_struct->curr_ip will sometimes (often?)
++ provide the offender's IP address where stock SELinux logging fails to.
++
+ config GRKERNSEC_FLOODTIME
+ int "Seconds in between log messages (minimum)"
+ default 10
+diff -Naur a/security/selinux/avc.c b/security/selinux/avc.c
+--- a/security/selinux/avc.c 2011-04-17 19:04:47.000000000 -0400
++++ b/security/selinux/avc.c 2011-04-17 19:32:53.000000000 -0400
+@@ -139,6 +139,11 @@
+ char *scontext;
+ u32 scontext_len;
+
++#ifdef CONFIG_GRKERNSEC_SELINUX_AVC_LOG_IPADDR
++ if (current->signal->curr_ip)
++ audit_log_format(ab, "ipaddr=%pI4 ", &current->signal->curr_ip);
++#endif
++
+ rc = security_sid_to_context(ssid, &scontext, &scontext_len);
+ if (rc)
+ audit_log_format(ab, "ssid=%d", ssid);
diff --git a/3.2.54/4470_disable-compat_vdso.patch b/3.2.54/4470_disable-compat_vdso.patch
new file mode 100644
index 0000000..6905571
--- /dev/null
+++ b/3.2.54/4470_disable-compat_vdso.patch
@@ -0,0 +1,46 @@
+No need to wrap vdso calls as gentoo does not use any version of
+glibc <=2.3.3
+---
+From: Gordon Malm <gengor@gentoo.org>
+From: Kerin Millar <kerframil@gmail.com>
+From: Jory A. Pratt <anarchy@gentoo.org>
+
+COMPAT_VDSO is inappropriate for any modern Hardened Gentoo system. It
+conflicts with various parts of PaX, crashing the system if enabled
+while PaX's NOEXEC or UDEREF features are active. Moreover, it prevents
+a number of important PaX options from appearing in the configuration
+menu, including all PaX NOEXEC implementations. Unfortunately, the
+reason for the disappearance of these PaX configuration options is
+often far from obvious to inexperienced users.
+
+Therefore, we disable the COMPAT_VDSO menu entry entirely. However,
+COMPAT_VDSO operation can still be enabled via bootparam and sysctl
+interfaces. Consequently, we must also disable the ability to select
+COMPAT_VDSO operation at boot or runtime. Here we patch the kernel so
+that selecting COMPAT_VDSO operation at boot/runtime has no effect if
+conflicting PaX options are enabled, leaving VDSO_ENABLED operation
+intact.
+
+Closes bug: http://bugs.gentoo.org/show_bug.cgi?id=210138
+
+diff -urp a/arch/x86/Kconfig b/arch/x86/Kconfig
+--- a/arch/x86/Kconfig 2009-07-31 01:36:57.323857684 +0100
++++ b/arch/x86/Kconfig 2009-07-31 01:51:39.395749681 +0100
+@@ -1653,17 +1653,8 @@
+
+ config COMPAT_VDSO
+ def_bool n
+- prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+- ---help---
+- Map the 32-bit VDSO to the predictable old-style address too.
+-
+- Say N here if you are running a sufficiently recent glibc
+- version (2.3.3 or later), to remove the high-mapped
+- VDSO mapping and to exclusively use the randomized VDSO.
+-
+- If unsure, say Y.
+
+ config CMDLINE_BOOL
+ bool "Built-in kernel command line"
diff --git a/3.2.54/4475_emutramp_default_on.patch b/3.2.54/4475_emutramp_default_on.patch
new file mode 100644
index 0000000..cfde6f8
--- /dev/null
+++ b/3.2.54/4475_emutramp_default_on.patch
@@ -0,0 +1,21 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+PAX_EMUTRAMP is needed for libffi to avoid RWX mmap-ings using PaX emulation of trampolines.
+We default PAX_EMUTRAMP='y' since almost all hardened users will want this.
+
+See bug:
+ http://bugs.gentoo.org/show_bug.cgi?id=329499
+ http://bugs.gentoo.org/show_bug.cgi?id=457194
+
+diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/security/Kconfig
+--- linux-3.9.2-hardened.orig/security/Kconfig 2013-05-18 08:53:41.000000000 -0400
++++ linux-3.9.2-hardened/security/Kconfig 2013-05-18 09:17:57.000000000 -0400
+@@ -427,7 +427,7 @@
+
+ config PAX_EMUTRAMP
+ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
+- default y if PARISC
++ default y
+ help
+ There are some programs and libraries that for one reason or
+ another attempt to execute special small code snippets from