summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-10-25 17:44:19 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-10-25 17:44:19 -0400
commitc705e19d72cdb12baa96c66168feae3784a1d4f1 (patch)
tree3082e04e4d19dffe7f1d743ef3609eefe2857f9a
parentGrsec/PaX: 2.9.1-{2.6.32.60,3.2.31,3.6.2}-201210151829 (diff)
downloadhardened-patchset-c705e19d72cdb12baa96c66168feae3784a1d4f1.tar.gz
hardened-patchset-c705e19d72cdb12baa96c66168feae3784a1d4f1.tar.bz2
hardened-patchset-c705e19d72cdb12baa96c66168feae3784a1d4f1.zip
Grsec/PaX: 2.9.1-{2.6.32.60,3.2.32,3.6.3}-20121023193520121023
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210121913.patch)28
-rw-r--r--3.2.32/0000_README (renamed from 3.2.31/0000_README)2
-rw-r--r--3.2.32/1021_linux-3.2.22.patch (renamed from 3.2.31/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.32/1022_linux-3.2.23.patch (renamed from 3.2.31/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.32/1023_linux-3.2.24.patch (renamed from 3.2.31/1023_linux-3.2.24.patch)0
-rw-r--r--3.2.32/1024_linux-3.2.25.patch (renamed from 3.2.31/1024_linux-3.2.25.patch)0
-rw-r--r--3.2.32/1025_linux-3.2.26.patch (renamed from 3.2.31/1025_linux-3.2.26.patch)0
-rw-r--r--3.2.32/1026_linux-3.2.27.patch (renamed from 3.2.31/1026_linux-3.2.27.patch)0
-rw-r--r--3.2.32/1027_linux-3.2.28.patch (renamed from 3.2.31/1027_linux-3.2.28.patch)0
-rw-r--r--3.2.32/1028_linux-3.2.29.patch (renamed from 3.2.31/1028_linux-3.2.29.patch)0
-rw-r--r--3.2.32/1029_linux-3.2.30.patch (renamed from 3.2.31/1029_linux-3.2.30.patch)0
-rw-r--r--3.2.32/1030_linux-3.2.31.patch (renamed from 3.2.31/1030_linux-3.2.31.patch)0
-rw-r--r--3.2.32/1031_linux-3.2.32.patch6206
-rw-r--r--3.2.32/4420_grsecurity-2.9.1-3.2.32-201210231935.patch (renamed from 3.2.31/4420_grsecurity-2.9.1-3.2.31-201210121914.patch)228
-rw-r--r--3.2.32/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.31/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.32/4435_grsec-mute-warnings.patch (renamed from 3.2.31/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.32/4440_grsec-remove-protected-paths.patch (renamed from 3.2.31/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.32/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.31/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.32/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.31/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.32/4470_disable-compat_vdso.patch (renamed from 3.2.31/4470_disable-compat_vdso.patch)0
-rw-r--r--3.6.3/0000_README (renamed from 3.6.2/0000_README)2
-rw-r--r--3.6.3/1002_linux-3.6.3.patch3132
-rw-r--r--3.6.3/4420_grsecurity-2.9.1-3.6.3-201210231942.patch (renamed from 3.6.2/4420_grsecurity-2.9.1-3.6.2-201210151829.patch)107
-rw-r--r--3.6.3/4430_grsec-remove-localversion-grsec.patch (renamed from 3.6.2/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.6.3/4435_grsec-mute-warnings.patch (renamed from 3.6.2/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.6.3/4440_grsec-remove-protected-paths.patch (renamed from 3.6.2/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.6.3/4450_grsec-kconfig-default-gids.patch (renamed from 3.6.2/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.6.3/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.6.2/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.6.3/4470_disable-compat_vdso.patch (renamed from 3.6.2/4470_disable-compat_vdso.patch)0
30 files changed, 9528 insertions, 179 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 1580ab5..d1abd76 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -34,7 +34,7 @@ Patch: 1059_linux-2.6.32.60.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.60-201210121913.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210121913.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
index 1bafac1..db2317b 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210121913.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
@@ -76195,10 +76195,10 @@ index 0000000..1b9afa9
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..7724cb1
+index 0000000..b50e14d
--- /dev/null
+++ b/grsecurity/gracl.c
-@@ -0,0 +1,4175 @@
+@@ -0,0 +1,4187 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -77678,6 +77678,7 @@ index 0000000..7724cb1
+copy_user_acl(struct gr_arg *arg)
+{
+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++ struct acl_subject_label *subj_list;
+ struct sprole_pw *sptmp;
+ struct gr_hash_struct *ghash;
+ uid_t *domainlist;
@@ -77806,14 +77807,21 @@ index 0000000..7724cb1
+ r_tmp->subj_hash_size *
+ sizeof (struct acl_subject_label *));
+
-+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
-+
-+ if (err)
-+ return err;
++ /* acquire the list of subjects, then NULL out
++ the list prior to parsing the subjects for this role,
++ as during this parsing the list is replaced with a list
++ of *nested* subjects for the role
++ */
++ subj_list = r_tmp->hash->first;
+
+ /* set nested subject list to null */
+ r_tmp->hash->first = NULL;
+
++ err = copy_user_subjs(subj_list, r_tmp);
++
++ if (err)
++ return err;
++
+ insert_acl_role_label(r_tmp);
+ }
+
@@ -78814,8 +78822,9 @@ index 0000000..7724cb1
+ matchpo->mode |= GR_DELETED;
+ FOR_EACH_SUBJECT_END(subj,x)
+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+ if (subj->inode == ino && subj->device == dev)
-+ subj->mode |= GR_DELETED;
++ /* nested subjects aren't in the role's subj_hash table */
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
+ FOR_EACH_NESTED_SUBJECT_END(subj)
+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
+ matchps->mode |= GR_DELETED;
@@ -78974,6 +78983,9 @@ index 0000000..7724cb1
+ subj->inode = inode;
+ subj->device = dev;
+ }
++ /* nested subjects aren't in the role's subj_hash table */
++ update_acl_obj_label(matchn->inode, matchn->device,
++ inode, dev, subj);
+ FOR_EACH_NESTED_SUBJECT_END(subj)
+ FOR_EACH_SUBJECT_START(role, subj, x)
+ update_acl_obj_label(matchn->inode, matchn->device,
diff --git a/3.2.31/0000_README b/3.2.32/0000_README
index a999a0f..cbbefef 100644
--- a/3.2.31/0000_README
+++ b/3.2.32/0000_README
@@ -42,7 +42,7 @@ Patch: 1030_linux-3.2.31.patch
From: http://www.kernel.org
Desc: Linux 3.2.31
-Patch: 4420_grsecurity-2.9.1-3.2.31-201210121914.patch
+Patch: 4420_grsecurity-2.9.1-3.2.32-201210231935.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.31/1021_linux-3.2.22.patch b/3.2.32/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.31/1021_linux-3.2.22.patch
+++ b/3.2.32/1021_linux-3.2.22.patch
diff --git a/3.2.31/1022_linux-3.2.23.patch b/3.2.32/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.31/1022_linux-3.2.23.patch
+++ b/3.2.32/1022_linux-3.2.23.patch
diff --git a/3.2.31/1023_linux-3.2.24.patch b/3.2.32/1023_linux-3.2.24.patch
index 4692eb4..4692eb4 100644
--- a/3.2.31/1023_linux-3.2.24.patch
+++ b/3.2.32/1023_linux-3.2.24.patch
diff --git a/3.2.31/1024_linux-3.2.25.patch b/3.2.32/1024_linux-3.2.25.patch
index e95c213..e95c213 100644
--- a/3.2.31/1024_linux-3.2.25.patch
+++ b/3.2.32/1024_linux-3.2.25.patch
diff --git a/3.2.31/1025_linux-3.2.26.patch b/3.2.32/1025_linux-3.2.26.patch
index 44065b9..44065b9 100644
--- a/3.2.31/1025_linux-3.2.26.patch
+++ b/3.2.32/1025_linux-3.2.26.patch
diff --git a/3.2.31/1026_linux-3.2.27.patch b/3.2.32/1026_linux-3.2.27.patch
index 5878eb4..5878eb4 100644
--- a/3.2.31/1026_linux-3.2.27.patch
+++ b/3.2.32/1026_linux-3.2.27.patch
diff --git a/3.2.31/1027_linux-3.2.28.patch b/3.2.32/1027_linux-3.2.28.patch
index 4dbba4b..4dbba4b 100644
--- a/3.2.31/1027_linux-3.2.28.patch
+++ b/3.2.32/1027_linux-3.2.28.patch
diff --git a/3.2.31/1028_linux-3.2.29.patch b/3.2.32/1028_linux-3.2.29.patch
index 3c65179..3c65179 100644
--- a/3.2.31/1028_linux-3.2.29.patch
+++ b/3.2.32/1028_linux-3.2.29.patch
diff --git a/3.2.31/1029_linux-3.2.30.patch b/3.2.32/1029_linux-3.2.30.patch
index 86aea4b..86aea4b 100644
--- a/3.2.31/1029_linux-3.2.30.patch
+++ b/3.2.32/1029_linux-3.2.30.patch
diff --git a/3.2.31/1030_linux-3.2.31.patch b/3.2.32/1030_linux-3.2.31.patch
index c6accf5..c6accf5 100644
--- a/3.2.31/1030_linux-3.2.31.patch
+++ b/3.2.32/1030_linux-3.2.31.patch
diff --git a/3.2.32/1031_linux-3.2.32.patch b/3.2.32/1031_linux-3.2.32.patch
new file mode 100644
index 0000000..247fc0b
--- /dev/null
+++ b/3.2.32/1031_linux-3.2.32.patch
@@ -0,0 +1,6206 @@
+diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
+index c095d79..288dba6 100644
+--- a/Documentation/virtual/lguest/lguest.c
++++ b/Documentation/virtual/lguest/lguest.c
+@@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type)
+ dev->feature_len = 0;
+ dev->num_vq = 0;
+ dev->running = false;
++ dev->next = NULL;
+
+ /*
+ * Append to device list. Prepending to a single-linked list is
+diff --git a/Makefile b/Makefile
+index fd9c414..b6d8282 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
+index a6cbb71..04e703a 100644
+--- a/arch/arm/plat-omap/counter_32k.c
++++ b/arch/arm/plat-omap/counter_32k.c
+@@ -82,22 +82,29 @@ static void notrace omap_update_sched_clock(void)
+ * nsecs and adds to a monotonically increasing timespec.
+ */
+ static struct timespec persistent_ts;
+-static cycles_t cycles, last_cycles;
++static cycles_t cycles;
+ static unsigned int persistent_mult, persistent_shift;
++static DEFINE_SPINLOCK(read_persistent_clock_lock);
++
+ void read_persistent_clock(struct timespec *ts)
+ {
+ unsigned long long nsecs;
+- cycles_t delta;
+- struct timespec *tsp = &persistent_ts;
++ cycles_t last_cycles;
++ unsigned long flags;
++
++ spin_lock_irqsave(&read_persistent_clock_lock, flags);
+
+ last_cycles = cycles;
+ cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+- delta = cycles - last_cycles;
+
+- nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
++ nsecs = clocksource_cyc2ns(cycles - last_cycles,
++ persistent_mult, persistent_shift);
++
++ timespec_add_ns(&persistent_ts, nsecs);
++
++ *ts = persistent_ts;
+
+- timespec_add_ns(tsp, nsecs);
+- *ts = *tsp;
++ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
+ }
+
+ int __init omap_init_clocksource_32k(void)
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 0be3186..aaf7444 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -224,7 +224,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+ LDFLAGS += -m $(ld-emul)
+
+ ifdef CONFIG_MIPS
+-CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
++CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+ ifdef CONFIG_64BIT
+diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
+index 1a96618..ce7dd99 100644
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -102,7 +102,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+
+ obj-$(CONFIG_OF) += prom.o
+
+-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
++CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
+
+diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
+index 7120282..3eb4a52 100644
+--- a/arch/mn10300/Makefile
++++ b/arch/mn10300/Makefile
+@@ -26,7 +26,7 @@ CHECKFLAGS +=
+ PROCESSOR := unset
+ UNIT := unset
+
+-KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33
++KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
+ KBUILD_AFLAGS += -mam33 -DCPU=AM33
+
+ ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
+diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
+index 1b6cb10..a0a4e8a 100644
+--- a/arch/powerpc/platforms/pseries/eeh_driver.c
++++ b/arch/powerpc/platforms/pseries/eeh_driver.c
+@@ -25,6 +25,7 @@
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
++#include <linux/module.h>
+ #include <linux/pci.h>
+ #include <asm/eeh.h>
+ #include <asm/eeh_event.h>
+@@ -41,6 +42,41 @@ static inline const char * pcid_name (struct pci_dev *pdev)
+ return "";
+ }
+
++/**
++ * eeh_pcid_get - Get the PCI device driver
++ * @pdev: PCI device
++ *
++ * The function is used to retrieve the PCI device driver for
++ * the indicated PCI device. Besides, we will increase the reference
++ * of the PCI device driver to prevent that being unloaded on
++ * the fly. Otherwise, kernel crash would be seen.
++ */
++static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
++{
++ if (!pdev || !pdev->driver)
++ return NULL;
++
++ if (!try_module_get(pdev->driver->driver.owner))
++ return NULL;
++
++ return pdev->driver;
++}
++
++/**
++ * eeh_pcid_put - Dereference on the PCI device driver
++ * @pdev: PCI device
++ *
++ * The function is called to do dereference on the PCI device
++ * driver of the indicated PCI device.
++ */
++static inline void eeh_pcid_put(struct pci_dev *pdev)
++{
++ if (!pdev || !pdev->driver)
++ return;
++
++ module_put(pdev->driver->driver.owner);
++}
++
+ #if 0
+ static void print_device_node_tree(struct pci_dn *pdn, int dent)
+ {
+@@ -109,18 +145,20 @@ static void eeh_enable_irq(struct pci_dev *dev)
+ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_frozen;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_disable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->error_detected)
++ !driver->err_handler->error_detected) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
+
+@@ -128,6 +166,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -142,12 +181,15 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+- if (!driver ||
+- !driver->err_handler ||
+- !driver->err_handler->mmio_enabled)
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
++ if (!driver->err_handler ||
++ !driver->err_handler->mmio_enabled) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->mmio_enabled (dev);
+
+@@ -155,6 +197,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -165,18 +208,20 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
+-
+- if (!driver)
+- return 0;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_normal;
+
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
++
+ eeh_enable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->slot_reset)
++ !driver->err_handler->slot_reset) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->slot_reset(dev);
+ if ((*res == PCI_ERS_RESULT_NONE) ||
+@@ -184,6 +229,7 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+ if (*res == PCI_ERS_RESULT_DISCONNECT &&
+ rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -193,21 +239,24 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+
+ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+ {
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_normal;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_enable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->resume)
++ !driver->err_handler->resume) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ driver->err_handler->resume(dev);
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -220,21 +269,24 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+
+ static int eeh_report_failure(struct pci_dev *dev, void *userdata)
+ {
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_perm_failure;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_disable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->error_detected)
++ !driver->err_handler->error_detected) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 18601c8..884507e 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+
+ static inline int pmd_large(pmd_t pte)
+ {
+- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+- (_PAGE_PSE | _PAGE_PRESENT);
++ return pmd_flags(pte) & _PAGE_PSE;
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)
+
+ static inline int pmd_present(pmd_t pmd)
+ {
+- return pmd_flags(pmd) & _PAGE_PRESENT;
++ /*
++ * Checking for _PAGE_PSE is needed too because
++ * split_huge_page will temporarily clear the present bit (but
++ * the _PAGE_PSE flag will remain set at all times while the
++ * _PAGE_PRESENT bit is clear).
++ */
++ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+ }
+
+ static inline int pmd_none(pmd_t pmd)
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 37718f0..4d320b2 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -731,6 +731,7 @@ void __init efi_enter_virtual_mode(void)
+ *
+ * Call EFI services through wrapper functions.
+ */
++ efi.runtime_version = efi_systab.fw_revision;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 9ecec98..5016de5 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -950,8 +950,6 @@ static int __init acpi_bus_init(void)
+ status = acpi_ec_ecdt_probe();
+ /* Ignore result. Not having an ECDT is not fatal. */
+
+- acpi_bus_osc_support();
+-
+ status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
+@@ -959,6 +957,12 @@ static int __init acpi_bus_init(void)
+ }
+
+ /*
++ * _OSC method may exist in module level code,
++ * so it must be run after ACPI_FULL_INITIALIZATION
++ */
++ acpi_bus_osc_support();
++
++ /*
+ * _PDC control method may load dynamic SSDT tables,
+ * and we need to install the table handler before that.
+ */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6f95d98..1f90dab 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -108,7 +108,7 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x413c, 0x8197) },
+
+ /* Foxconn - Hon Hai */
+- { USB_DEVICE(0x0489, 0xe033) },
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
+
+ /*Broadcom devices with vendor specific id */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
+index eedd547..5936691 100644
+--- a/drivers/char/ttyprintk.c
++++ b/drivers/char/ttyprintk.c
+@@ -67,7 +67,7 @@ static int tpk_printk(const unsigned char *buf, int count)
+ tmp[tpk_curr + 1] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+- if (buf[i + 1] == '\n')
++ if ((i + 1) < count && buf[i + 1] == '\n')
+ i++;
+ break;
+ case '\n':
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index b48967b..5991114 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -564,8 +564,8 @@ void dmaengine_get(void)
+ list_del_rcu(&device->global_node);
+ break;
+ } else if (err)
+- pr_err("dmaengine: failed to get %s: (%d)\n",
+- dma_chan_name(chan), err);
++ pr_debug("%s: failed to get %s: (%d)\n",
++ __func__, dma_chan_name(chan), err);
+ }
+ }
+
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 4799393..b97d4f0 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
+ fill_bus_reset_event(&bus_reset, client);
+- ret = copy_to_user(u64_to_uptr(a->bus_reset),
+- &bus_reset, sizeof(bus_reset));
++ /* unaligned size of bus_reset is 36 bytes */
++ ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
+ }
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 0535c21..3e60e8d 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -435,12 +435,23 @@ efivar_attr_read(struct efivar_entry *entry, char *buf)
+ if (status != EFI_SUCCESS)
+ return -EIO;
+
+- if (var->Attributes & 0x1)
++ if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+ str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
+- if (var->Attributes & 0x2)
++ if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
+- if (var->Attributes & 0x4)
++ if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
++ if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
++ str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
++ if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
++ str += sprintf(str,
++ "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
++ if (var->Attributes &
++ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
++ str += sprintf(str,
++ "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
++ if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
++ str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
+ return str - buf;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index e48e01e..33e1555 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1543,16 +1543,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+- if (obj->fenced_gpu_access) {
+- struct drm_i915_fence_reg *reg;
+-
+- BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
++ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+- reg = &dev_priv->fence_regs[obj->fence_reg];
+- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
++ /* Bump MRU to take account of the delayed flush */
++ if (obj->fence_reg != I915_FENCE_REG_NONE) {
++ struct drm_i915_fence_reg *reg;
++
++ reg = &dev_priv->fence_regs[obj->fence_reg];
++ list_move_tail(&reg->lru_list,
++ &dev_priv->mm.fence_list);
++ }
+ }
+ }
+
+@@ -1561,6 +1564,7 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
+ {
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
++ obj->last_fenced_seqno = 0;
+ }
+
+ static void
+@@ -1589,6 +1593,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
++ obj->last_fenced_ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index a6c2f7a..1202198 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -574,7 +574,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ if (ret)
+ break;
+ }
+- obj->pending_fenced_gpu_access = need_fence;
++ obj->pending_fenced_gpu_access =
++ !!(entry->flags & EXEC_OBJECT_NEEDS_FENCE);
+ }
+
+ entry->offset = obj->gtt_offset;
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 31d334d..861223b 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -107,10 +107,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+- } else if (IS_MOBILE(dev)) {
++ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+- /* On mobile 9xx chipsets, channel interleave by the CPU is
++ /* On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index c8b5bc1..2812d7b 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -530,6 +530,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
++ if (de_iir & DE_PIPEA_VBLANK_IVB)
++ drm_handle_vblank(dev, 0);
++
++ if (de_iir & DE_PIPEB_VBLANK_IVB)
++ drm_handle_vblank(dev, 1);
++
+ if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+@@ -540,12 +546,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+- if (de_iir & DE_PIPEA_VBLANK_IVB)
+- drm_handle_vblank(dev, 0);
+-
+- if (de_iir & DE_PIPEB_VBLANK_IVB)
+- drm_handle_vblank(dev, 1);
+-
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+@@ -622,6 +622,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_GSE)
+ intel_opregion_gse_intr(dev);
+
++ if (de_iir & DE_PIPEA_VBLANK)
++ drm_handle_vblank(dev, 0);
++
++ if (de_iir & DE_PIPEB_VBLANK)
++ drm_handle_vblank(dev, 1);
++
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+@@ -632,12 +638,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+- if (de_iir & DE_PIPEA_VBLANK)
+- drm_handle_vblank(dev, 0);
+-
+- if (de_iir & DE_PIPEB_VBLANK)
+- drm_handle_vblank(dev, 1);
+-
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4a5e662..a294a32 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -401,6 +401,9 @@
+ # define VS_TIMER_DISPATCH (1 << 6)
+ # define MI_FLUSH_ENABLE (1 << 11)
+
++#define GEN6_GT_MODE 0x20d0
++#define GEN6_GT_MODE_HI (1 << 9)
++
+ #define GFX_MODE 0x02520
+ #define GFX_MODE_GEN7 0x0229c
+ #define GFX_RUN_LIST_ENABLE (1<<15)
+@@ -1557,6 +1560,10 @@
+
+ /* Video Data Island Packet control */
+ #define VIDEO_DIP_DATA 0x61178
++/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
++ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
++ * of the infoframe structure specified by CEA-861. */
++#define VIDEO_DIP_DATA_SIZE 32
+ #define VIDEO_DIP_CTL 0x61170
+ #define VIDEO_DIP_ENABLE (1 << 31)
+ #define VIDEO_DIP_PORT_B (1 << 29)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6c3fb44..adac0dd 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2850,13 +2850,34 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
+ I915_WRITE_CTL(ring, tmp);
+ }
+
++static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned long flags;
++ bool pending;
++
++ if (atomic_read(&dev_priv->mm.wedged))
++ return false;
++
++ spin_lock_irqsave(&dev->event_lock, flags);
++ pending = to_intel_crtc(crtc)->unpin_work != NULL;
++ spin_unlock_irqrestore(&dev->event_lock, flags);
++
++ return pending;
++}
++
+ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (crtc->fb == NULL)
+ return;
+
++ wait_event(dev_priv->pending_flip_queue,
++ !intel_crtc_has_pending_flip(crtc));
++
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
+@@ -5027,7 +5048,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
++ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+@@ -5495,7 +5516,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ /* determine panel color depth */
+ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
++ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
+@@ -6952,9 +6973,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj->pending_flip.counter);
+- if (atomic_read(&obj->pending_flip) == 0)
+- wake_up(&dev_priv->pending_flip_queue);
+
++ wake_up(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+@@ -7193,7 +7213,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+- goto err;
++ goto err_unpin;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+@@ -8278,6 +8298,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
++
++ /* The default value should be 0x200 according to docs, but the two
++ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
++ I915_WRITE(GEN6_GT_MODE, 0xffff << 16);
++ I915_WRITE(GEN6_GT_MODE, GEN6_GT_MODE_HI << 16 | GEN6_GT_MODE_HI);
+ }
+
+ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index c2a64f4..497da2a 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -138,14 +138,20 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+
++ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
++ /* Write every possible data byte to force correct ECC calculation. */
++ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
++ I915_WRITE(VIDEO_DIP_DATA, 0);
++ mmiowb();
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
++ POSTING_READ(VIDEO_DIP_CTL);
+ }
+
+ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+@@ -168,14 +174,20 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+
++ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
++ /* Write every possible data byte to force correct ECC calculation. */
++ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
++ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
++ mmiowb();
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
++ POSTING_READ(reg);
+ }
+ static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+@@ -546,10 +558,13 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ I915_WRITE(VIDEO_DIP_CTL, 0);
++ POSTING_READ(VIDEO_DIP_CTL);
+ } else {
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+- for_each_pipe(i)
++ for_each_pipe(i) {
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
++ POSTING_READ(TVIDEO_DIP_CTL(i));
++ }
+ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index fc0633c..b61f490 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -37,6 +37,16 @@
+ #define EVERGREEN_PFP_UCODE_SIZE 1120
+ #define EVERGREEN_PM4_UCODE_SIZE 1376
+
++static const u32 crtc_offsets[6] =
++{
++ EVERGREEN_CRTC0_REGISTER_OFFSET,
++ EVERGREEN_CRTC1_REGISTER_OFFSET,
++ EVERGREEN_CRTC2_REGISTER_OFFSET,
++ EVERGREEN_CRTC3_REGISTER_OFFSET,
++ EVERGREEN_CRTC4_REGISTER_OFFSET,
++ EVERGREEN_CRTC5_REGISTER_OFFSET
++};
++
+ static void evergreen_gpu_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+@@ -66,6 +76,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+ }
+ }
+
++void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
++{
++ int i;
++
++ if (crtc >= rdev->num_crtc)
++ return;
++
++ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
++ break;
++ udelay(1);
++ }
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
++ break;
++ udelay(1);
++ }
++ }
++}
++
+ void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+ {
+ /* enable the pflip int */
+@@ -1065,116 +1096,88 @@ void evergreen_agp_enable(struct radeon_device *rdev)
+
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
++ u32 crtc_enabled, tmp, frame_count, blackout;
++ int i, j;
++
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+- /* Stop all video */
++ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+- }
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
++ /* blank the display controllers */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
++ if (crtc_enabled) {
++ save->crtc_enabled[i] = true;
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
++ dce4_wait_for_vblank(rdev, i);
++ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ }
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
+ }
+
+- WREG32(D1VGA_CONTROL, 0);
+- WREG32(D2VGA_CONTROL, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
++ evergreen_mc_wait_for_idle(rdev);
++
++ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ if ((blackout & BLACKOUT_MODE_MASK) != 1) {
++ /* Block CPU access */
++ WREG32(BIF_FB_EN, 0);
++ /* blackout the MC */
++ blackout &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
++ u32 tmp, frame_count;
++ int i, j;
+
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ /* update crtc base addresses */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+-
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+- /* Unlock host access */
++
++ /* unblackout the MC */
++ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ tmp &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
++ /* allow CPU access */
++ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
++
++ for (i = 0; i < rdev->num_crtc; i++) {
++ if (save->crtc_enabled) {
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
++ }
++ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index 7d7f215..e022776 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -210,7 +210,10 @@
+ #define EVERGREEN_CRTC_CONTROL 0x6e70
+ # define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+ # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
++#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
++# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+ #define EVERGREEN_CRTC_STATUS 0x6e8c
++# define EVERGREEN_CRTC_V_BLANK (1 << 0)
+ #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+ #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
+ #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 6ecd23f..fe44a95 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -77,6 +77,10 @@
+
+ #define CONFIG_MEMSIZE 0x5428
+
++#define BIF_FB_EN 0x5490
++#define FB_READ_EN (1 << 0)
++#define FB_WRITE_EN (1 << 1)
++
+ #define CP_ME_CNTL 0x86D8
+ #define CP_ME_HALT (1 << 28)
+ #define CP_PFP_HALT (1 << 26)
+@@ -194,6 +198,9 @@
+ #define NOOFCHAN_MASK 0x00003000
+ #define MC_SHARED_CHREMAP 0x2008
+
++#define MC_SHARED_BLACKOUT_CNTL 0x20ac
++#define BLACKOUT_MODE_MASK 0x00000007
++
+ #define MC_ARB_RAMCFG 0x2760
+ #define NOOFBANK_SHIFT 0
+ #define NOOFBANK_MASK 0x00000003
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 5ce9402..5aa6670 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -386,6 +386,7 @@ void r700_cp_fini(struct radeon_device *rdev);
+ struct evergreen_mc_save {
+ u32 vga_render_control;
+ u32 vga_hdp_control;
++ bool crtc_enabled[RADEON_MAX_CRTCS];
+ };
+
+ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index baa019e..4f9496e 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -143,6 +143,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
++ /* Gateway RS690 only seems to work with MSIs. */
++ if ((rdev->pdev->device == 0x791f) &&
++ (rdev->pdev->subsystem_vendor == 0x107b) &&
++ (rdev->pdev->subsystem_device == 0x0185))
++ return true;
++
++ /* try and enable MSIs by default on all RS690s */
++ if (rdev->family == CHIP_RS690)
++ return true;
++
+ /* RV515 seems to have MSI issues where it loses
+ * MSI rearms occasionally. This leads to lockups and freezes.
+ * disable it by default.
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 78a665b..ebd6c51 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -553,7 +553,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
+ void radeon_pm_resume(struct radeon_device *rdev)
+ {
+ /* set up the default clocks if the MC ucode is loaded */
+- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
++ if ((rdev->family >= CHIP_BARTS) &&
++ (rdev->family <= CHIP_CAYMAN) &&
++ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+@@ -608,7 +610,9 @@ int radeon_pm_init(struct radeon_device *rdev)
+ radeon_pm_print_states(rdev);
+ radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
++ if ((rdev->family >= CHIP_BARTS) &&
++ (rdev->family <= CHIP_CAYMAN) &&
++ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index fe2fdbb..1740b82 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -148,7 +148,7 @@ static int ipoib_stop(struct net_device *dev)
+
+ netif_stop_queue(dev);
+
+- ipoib_ib_dev_down(dev, 0);
++ ipoib_ib_dev_down(dev, 1);
+ ipoib_ib_dev_stop(dev, 0);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index e5069b4..80799c0 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -190,7 +190,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+
+ mcast->mcmember = *mcmember;
+
+- /* Set the cached Q_Key before we attach if it's the broadcast group */
++ /* Set the multicast MTU and cached Q_Key before we attach if it's
++ * the broadcast group.
++ */
+ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid))) {
+ spin_lock_irq(&priv->lock);
+@@ -198,10 +200,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+ spin_unlock_irq(&priv->lock);
+ return -EAGAIN;
+ }
++ priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
+ spin_unlock_irq(&priv->lock);
+ priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
+ set_qkey = 1;
++
++ if (!ipoib_cm_admin_enabled(dev)) {
++ rtnl_lock();
++ dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
++ rtnl_unlock();
++ }
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+@@ -590,14 +599,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
+ return;
+ }
+
+- priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+-
+- if (!ipoib_cm_admin_enabled(dev)) {
+- rtnl_lock();
+- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+- rtnl_unlock();
+- }
+-
+ ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
+
+ clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index c76b051..4ec049d 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -620,9 +620,9 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
+ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+ if (scmnd) {
++ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_RESET << 16;
+ scmnd->scsi_done(scmnd);
+- srp_free_req(target, req, scmnd, 0);
+ }
+ }
+
+@@ -1669,6 +1669,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ SRP_TSK_ABORT_TASK);
+ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
++ scmnd->scsi_done(scmnd);
+
+ return SUCCESS;
+ }
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 96532bc..7be5fd9 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -53,14 +53,19 @@
+ #define ABS_POS_BITS 13
+
+ /*
+- * Any position values from the hardware above the following limits are
+- * treated as "wrapped around negative" values that have been truncated to
+- * the 13-bit reporting range of the hardware. These are just reasonable
+- * guesses and can be adjusted if hardware is found that operates outside
+- * of these parameters.
++ * These values should represent the absolute maximum value that will
++ * be reported for a positive position value. Some Synaptics firmware
++ * uses this value to indicate a finger near the edge of the touchpad
++ * whose precise position cannot be determined.
++ *
++ * At least one touchpad is known to report positions in excess of this
++ * value which are actually negative values truncated to the 13-bit
++ * reporting range. These values have never been observed to be lower
++ * than 8184 (i.e. -8), so we treat all values greater than 8176 as
++ * negative and any other value as positive.
+ */
+-#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
+-#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
++#define X_MAX_POSITIVE 8176
++#define Y_MAX_POSITIVE 8176
+
+ /*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+@@ -561,11 +566,21 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+ }
+
+- /* Convert wrap-around values to negative */
++ /*
++ * Convert wrap-around values to negative. (X|Y)_MAX_POSITIVE
++ * is used by some firmware to indicate a finger at the edge of
++ * the touchpad whose precise position cannot be determined, so
++ * convert these values to the maximum axis value.
++ */
+ if (hw->x > X_MAX_POSITIVE)
+ hw->x -= 1 << ABS_POS_BITS;
++ else if (hw->x == X_MAX_POSITIVE)
++ hw->x = XMAX;
++
+ if (hw->y > Y_MAX_POSITIVE)
+ hw->y -= 1 << ABS_POS_BITS;
++ else if (hw->y == Y_MAX_POSITIVE)
++ hw->y = YMAX;
+
+ return 0;
+ }
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index ccf347f..b9062c0 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -563,7 +563,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
+ {
+ int i;
+
+- domain->iommu_coherency = 1;
++ i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
++
++ domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
+
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ if (!ecap_coherent(g_iommus[i]->ecap)) {
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 0e49c99..c06992e 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ rdev = rc_allocate_device();
+ if (!rdev)
+ goto failure;
++ itdev->rdev = rdev;
+
+ ret = -ENODEV;
+
+@@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ if (ret)
+ goto failure;
+
+- itdev->rdev = rdev;
+ ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+
+ return 0;
+diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
+index 1c44f78..6ddc769 100644
+--- a/drivers/media/video/gspca/pac7302.c
++++ b/drivers/media/video/gspca/pac7302.c
+@@ -1197,6 +1197,8 @@ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x262a)},
+ {USB_DEVICE(0x093a, 0x262c)},
++ {USB_DEVICE(0x145f, 0x013c)},
++ {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */
+ {}
+ };
+ MODULE_DEVICE_TABLE(usb, device_table);
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index d5fe43d..bc27065 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -2188,9 +2188,7 @@ static int omap_hsmmc_suspend(struct device *dev)
+ } else {
+ host->suspended = 0;
+ if (host->pdata->resume) {
+- ret = host->pdata->resume(&pdev->dev,
+- host->slot_id);
+- if (ret)
++ if (host->pdata->resume(&pdev->dev, host->slot_id))
+ dev_dbg(mmc_dev(host->mmc),
+ "Unmask interrupt failed\n");
+ }
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 0d33ff0..06af9e4 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -601,7 +601,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
+
+ sdhci_remove_host(host, 1);
+
+- for (ptr = 0; ptr < 3; ptr++) {
++ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index d5505f3..559d30d 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1003,6 +1003,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+ host->sd_error = true;
+ dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ }
++ if (host->state == STATE_IDLE) {
++ dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
++ return IRQ_HANDLED;
++ }
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
+ complete(&host->intr_wait);
+ else
+diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
+index e5bfd0e..0598d52 100644
+--- a/drivers/mtd/maps/autcpu12-nvram.c
++++ b/drivers/mtd/maps/autcpu12-nvram.c
+@@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
+
+ static int __init init_autcpu12_sram (void)
+ {
+- int err, save0, save1;
++ map_word tmp, save0, save1;
++ int err;
+
+ autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
+ if (!autcpu12_sram_map.virt) {
+@@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
+ err = -EIO;
+ goto out;
+ }
+- simple_map_init(&autcpu_sram_map);
++ simple_map_init(&autcpu12_sram_map);
+
+ /*
+ * Check for 32K/128K
+@@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
+ * Read and check result on ofs 0x0
+ * Restore contents
+ */
+- save0 = map_read32(&autcpu12_sram_map,0);
+- save1 = map_read32(&autcpu12_sram_map,0x10000);
+- map_write32(&autcpu12_sram_map,~save0,0x10000);
++ save0 = map_read(&autcpu12_sram_map, 0);
++ save1 = map_read(&autcpu12_sram_map, 0x10000);
++ tmp.x[0] = ~save0.x[0];
++ map_write(&autcpu12_sram_map, tmp, 0x10000);
+ /* if we find this pattern on 0x0, we have 32K size
+ * restore contents and exit
+ */
+- if ( map_read32(&autcpu12_sram_map,0) != save0) {
+- map_write32(&autcpu12_sram_map,save0,0x0);
++ tmp = map_read(&autcpu12_sram_map, 0);
++ if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
++ map_write(&autcpu12_sram_map, save0, 0x0);
+ goto map;
+ }
+ /* We have a 128K found, restore 0x10000 and set size
+ * to 128K
+ */
+- map_write32(&autcpu12_sram_map,save1,0x10000);
++ map_write(&autcpu12_sram_map, save1, 0x10000);
+ autcpu12_sram_map.size = SZ_128K;
+
+ map:
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index a0bd2de..198da0a 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -748,6 +748,8 @@ static const char *default_mtd_part_types[] = {
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
++ * Note: If there are more then one parser in @types, the kernel only takes the
++ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+@@ -772,11 +774,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
+ if (!parser)
+ continue;
+ ret = (*parser->parse_fn)(master, pparts, data);
++ put_partition_parser(parser);
+ if (ret > 0) {
+ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
++ break;
+ }
+- put_partition_parser(parser);
+ }
+ return ret;
+ }
+diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
+index f024375..532da04 100644
+--- a/drivers/mtd/nand/nand_bbt.c
++++ b/drivers/mtd/nand/nand_bbt.c
+@@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+- mtd->writesize, td);
++ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
+index 83e8e1b..ade0da0 100644
+--- a/drivers/mtd/nand/nandsim.c
++++ b/drivers/mtd/nand/nandsim.c
+@@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
++ retval = -EINVAL;
+ goto err_exit;
+ }
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index f745f00..297c965 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -1132,7 +1132,8 @@ static int omap_nand_remove(struct platform_device *pdev)
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+- kfree(&info->mtd);
++ release_mem_region(info->phys_base, NAND_IO_SIZE);
++ kfree(info);
+ return 0;
+ }
+
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 6c3fb5a..1f9c363 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -816,6 +816,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
++ if (ubi->ro_mode) {
++ ubi_warn("skip auto-resize because of R/O mode");
++ return 0;
++ }
++
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propagate this change
+diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
+index b99318e..b2b62de 100644
+--- a/drivers/mtd/ubi/scan.c
++++ b/drivers/mtd/ubi/scan.c
+@@ -997,7 +997,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+- if (ec_err)
++ if (ec_err || bitflips)
+ err = add_to_list(si, pnum, ec, 1, &si->erase);
+ else
+ err = add_to_list(si, pnum, ec, 0, &si->free);
+diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
+index 5fedc33..d8f2b5b 100644
+--- a/drivers/net/can/mscan/mpc5xxx_can.c
++++ b/drivers/net/can/mscan/mpc5xxx_can.c
+@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+- if (!sys_clk) {
++ if (IS_ERR(sys_clk)) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+- if (!ref_clk) {
++ if (IS_ERR(ref_clk)) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 0549261..c5f6b0e 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -4720,8 +4720,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+
+ netif_device_detach(netdev);
+
+- mutex_lock(&adapter->mutex);
+-
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+@@ -4729,10 +4727,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+
+ #ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+- if (retval) {
+- mutex_unlock(&adapter->mutex);
++ if (retval)
+ return retval;
+- }
+ #endif
+
+ status = er32(STATUS);
+@@ -4789,8 +4785,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
+- mutex_unlock(&adapter->mutex);
+-
+ pci_disable_device(pdev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index ed1be8a..4b43bc5 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -327,6 +327,8 @@ enum rtl_registers {
+ Config0 = 0x51,
+ Config1 = 0x52,
+ Config2 = 0x53,
++#define PME_SIGNAL (1 << 5) /* 8168c and later */
++
+ Config3 = 0x54,
+ Config4 = 0x55,
+ Config5 = 0x56,
+@@ -1360,7 +1362,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+- { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+@@ -1368,16 +1369,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
++ u8 options;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+- u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
++ options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
++ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
++ options = RTL_R8(Config1) & ~PMEnable;
++ if (wolopts)
++ options |= PMEnable;
++ RTL_W8(Config1, options);
++ break;
++ default:
++ options = RTL_R8(Config2) & ~PME_SIGNAL;
++ if (wolopts)
++ options |= PME_SIGNAL;
++ RTL_W8(Config2, options);
++ break;
++ }
++
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+ }
+
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 7145714..c0f097b 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -79,6 +79,7 @@ static int rionet_capable = 1;
+ * on system trade-offs.
+ */
+ static struct rio_dev **rionet_active;
++static int nact; /* total number of active rionet peers */
+
+ #define is_rionet_capable(src_ops, dst_ops) \
+ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
+@@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ u16 destid;
+ unsigned long flags;
++ int add_num = 1;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&rnet->tx_lock)) {
+@@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ return NETDEV_TX_LOCKED;
+ }
+
+- if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
++ if (is_multicast_ether_addr(eth->h_dest))
++ add_num = nact;
++
++ if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&rnet->tx_lock, flags);
+ printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+@@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ }
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
++ int count = 0;
+ for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
+ i++)
+- if (rionet_active[i])
++ if (rionet_active[i]) {
+ rionet_queue_tx_msg(skb, ndev,
+ rionet_active[i]);
++ if (count)
++ atomic_inc(&skb->users);
++ count++;
++ }
+ } else if (RIONET_MAC_MATCH(eth->h_dest)) {
+ destid = RIONET_GET_DESTID(eth->h_dest);
+ if (rionet_active[destid])
+@@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
+ if (info == RIONET_DOORBELL_JOIN) {
+ if (!rionet_active[sid]) {
+ list_for_each_entry(peer, &rionet_peers, node) {
+- if (peer->rdev->destid == sid)
++ if (peer->rdev->destid == sid) {
+ rionet_active[sid] = peer->rdev;
++ nact++;
++ }
+ }
+ rio_mport_send_doorbell(mport, sid,
+ RIONET_DOORBELL_JOIN);
+ }
+ } else if (info == RIONET_DOORBELL_LEAVE) {
+ rionet_active[sid] = NULL;
++ nact--;
+ } else {
+ if (netif_msg_intr(rnet))
+ printk(KERN_WARNING "%s: unhandled doorbell\n",
+@@ -524,6 +537,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+
+ rc = rionet_setup_netdev(rdev->net->hport, ndev);
+ rionet_check = 1;
++ nact = 0;
+ }
+
+ /*
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index 1883d39..f7e17a0 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -122,8 +122,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
+ if (!parent)
+ return;
+
+- if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+- /* Bluetooth coexistance requires disabling ASPM. */
++ if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
++ (AR_SREV_9285(ah))) {
++ /* Bluetooth coexistance requires disabling ASPM for AR9285. */
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
+ aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index dfee1b3..9005380 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -658,8 +658,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+
+ /* Check if setup is sensible at all */
+ if (!pass &&
+- (primary != bus->number || secondary <= bus->number)) {
+- dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
++ (primary != bus->number || secondary <= bus->number ||
++ secondary > subordinate)) {
++ dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
++ secondary, subordinate);
+ broken = 1;
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 0860181..4f1b10b 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+
+ rwlock_init(&port->unit_list_lock);
+ INIT_LIST_HEAD(&port->unit_list);
++ atomic_set(&port->units, 0);
+
+ INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
+ INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
+diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
+index 96f13ad8..79a6afe 100644
+--- a/drivers/s390/scsi/zfcp_ccw.c
++++ b/drivers/s390/scsi/zfcp_ccw.c
+@@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+ }
+
+-static int zfcp_ccw_activate(struct ccw_device *cdev)
+-
++/**
++ * zfcp_ccw_activate - activate adapter and wait for it to finish
++ * @cdev: pointer to belonging ccw device
++ * @clear: Status flags to clear.
++ * @tag: s390dbf trace record tag
++ */
++static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
+ {
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
++ zfcp_erp_clear_adapter_status(adapter, clear);
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+- "ccresu2");
++ tag);
+ zfcp_erp_wait(adapter);
+ flush_work(&adapter->scan_work);
+
+@@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
+ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
+ adapter->req_no = 0;
+
+- zfcp_ccw_activate(cdev);
++ zfcp_ccw_activate(cdev, 0, "ccsonl1");
+ zfcp_ccw_adapter_put(adapter);
+ return 0;
+ }
+
+ /**
+- * zfcp_ccw_set_offline - set_offline function of zfcp driver
++ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
++ * @set: Status flags to set.
++ * @tag: s390dbf trace record tag
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
++static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
+ {
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
+- zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
++ zfcp_erp_set_adapter_status(adapter, set);
++ zfcp_erp_adapter_shutdown(adapter, 0, tag);
+ zfcp_erp_wait(adapter);
+
+ zfcp_ccw_adapter_put(adapter);
+@@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+ }
+
+ /**
++ * zfcp_ccw_set_offline - set_offline function of zfcp driver
++ * @cdev: pointer to belonging ccw device
++ *
++ * This function gets called by the common i/o layer and sets an adapter
++ * into state offline.
++ */
++static int zfcp_ccw_set_offline(struct ccw_device *cdev)
++{
++ return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
++}
++
++/**
+ * zfcp_ccw_notify - ccw notify function
+ * @cdev: pointer to belonging ccw device
+ * @event: indicates if adapter was detached or attached
+@@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+
+ switch (event) {
+ case CIO_GONE:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
++ zfcp_dbf_hba_basic("ccnigo1", adapter);
++ break;
++ }
+ dev_warn(&cdev->dev, "The FCP device has been detached\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
+ break;
+@@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
+ break;
+ case CIO_OPER:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
++ zfcp_dbf_hba_basic("ccniop1", adapter);
++ break;
++ }
+ dev_info(&cdev->dev, "The FCP device is operational again\n");
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING);
+@@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
+ zfcp_ccw_adapter_put(adapter);
+ }
+
++static int zfcp_ccw_suspend(struct ccw_device *cdev)
++{
++ zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
++ return 0;
++}
++
++static int zfcp_ccw_thaw(struct ccw_device *cdev)
++{
++ /* trace records for thaw and final shutdown during suspend
++ can only be found in system dump until the end of suspend
++ but not after resume because it's based on the memory image
++ right after the very first suspend (freeze) callback */
++ zfcp_ccw_activate(cdev, 0, "ccthaw1");
++ return 0;
++}
++
++static int zfcp_ccw_resume(struct ccw_device *cdev)
++{
++ zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
++ return 0;
++}
++
+ struct ccw_driver zfcp_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+@@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = {
+ .set_offline = zfcp_ccw_set_offline,
+ .notify = zfcp_ccw_notify,
+ .shutdown = zfcp_ccw_shutdown,
+- .freeze = zfcp_ccw_set_offline,
+- .thaw = zfcp_ccw_activate,
+- .restore = zfcp_ccw_activate,
++ .freeze = zfcp_ccw_suspend,
++ .thaw = zfcp_ccw_thaw,
++ .restore = zfcp_ccw_resume,
+ };
+diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
+index fab2c25..8ed63aa 100644
+--- a/drivers/s390/scsi/zfcp_cfdc.c
++++ b/drivers/s390/scsi/zfcp_cfdc.c
+@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, port->adapter->scsi_host) {
++ shost_for_each_device(sdev, adapter->scsi_host) {
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ status = atomic_read(&zfcp_sdev->status);
+ if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index a9a816e..79b9848 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ length = min((u16)sizeof(struct qdio_buffer),
+ (u16)ZFCP_DBF_PAY_MAX_REC);
+
+- while ((char *)pl[payload->counter] && payload->counter < scount) {
++ while (payload->counter < scount && (char *)pl[payload->counter]) {
+ memcpy(payload->data, (char *)pl[payload->counter], length);
+ debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+ payload->counter++;
+@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ spin_unlock_irqrestore(&dbf->pay_lock, flags);
+ }
+
++/**
++ * zfcp_dbf_hba_basic - trace event for basic adapter events
++ * @adapter: pointer to struct zfcp_adapter
++ */
++void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
++{
++ struct zfcp_dbf *dbf = adapter->dbf;
++ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dbf->hba_lock, flags);
++ memset(rec, 0, sizeof(*rec));
++
++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++ rec->id = ZFCP_DBF_HBA_BASIC;
++
++ debug_event(dbf->hba, 1, rec, sizeof(*rec));
++ spin_unlock_irqrestore(&dbf->hba_lock, flags);
++}
++
+ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 714f087..3ac7a4b 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
+ ZFCP_DBF_HBA_RES = 1,
+ ZFCP_DBF_HBA_USS = 2,
+ ZFCP_DBF_HBA_BIT = 3,
++ ZFCP_DBF_HBA_BASIC = 4,
+ };
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
+index ed5d921..f172b84 100644
+--- a/drivers/s390/scsi/zfcp_def.h
++++ b/drivers/s390/scsi/zfcp_def.h
+@@ -77,6 +77,7 @@ struct zfcp_reqlist;
+ #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
+ #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
+ #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
++#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
+ #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
+ #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+ #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
+@@ -204,6 +205,7 @@ struct zfcp_port {
+ struct zfcp_adapter *adapter; /* adapter used to access port */
+ struct list_head unit_list; /* head of logical unit list */
+ rwlock_t unit_list_lock; /* unit list lock */
++ atomic_t units; /* zfcp_unit count */
+ atomic_t status; /* status of this remote port */
+ u64 wwnn; /* WWNN if known */
+ u64 wwpn; /* WWPN */
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index 2302e1c..ef9e502 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
++extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+ extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+@@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+ extern struct attribute_group zfcp_sysfs_unit_attrs;
+ extern struct attribute_group zfcp_sysfs_adapter_attrs;
+ extern struct attribute_group zfcp_sysfs_port_attrs;
++extern struct mutex zfcp_sysfs_port_units_mutex;
+ extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
+ extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index e9a787e..8c849f0 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
+ return;
+ }
+
+- zfcp_dbf_hba_fsf_uss("fssrh_2", req);
++ zfcp_dbf_hba_fsf_uss("fssrh_4", req);
+
+ switch (sr_buf->status_type) {
+ case FSF_STATUS_READ_PORT_CLOSED:
+@@ -771,12 +771,14 @@ out:
+ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
+ {
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_PORT_HANDLE_NOT_VALID:
+ if (fsq->word[0] == fsq->word[1]) {
+@@ -885,7 +887,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+
+ switch (header->fsf_status) {
+ case FSF_GOOD:
+- zfcp_dbf_san_res("fsscth1", req);
++ zfcp_dbf_san_res("fsscth2", req);
+ ct->status = 0;
+ break;
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+@@ -1739,13 +1741,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
+ {
+ struct zfcp_adapter *adapter = req->adapter;
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+ struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ ZFCP_STATUS_COMMON_ACCESS_BOXED |
+ ZFCP_STATUS_LUN_SHARED |
+@@ -1856,11 +1860,13 @@ out:
+ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
+ {
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_PORT_HANDLE_NOT_VALID:
+ zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
+@@ -1950,7 +1956,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+ {
+ struct fsf_qual_latency_info *lat_in;
+ struct latency_cont *lat = NULL;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct zfcp_blk_drv_data blktrc;
+ int ticks = req->adapter->timer_ticks;
+
+@@ -1965,6 +1971,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+ !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
++ zfcp_sdev = sdev_to_zfcp(scsi->device);
+ blktrc.flags |= ZFCP_BLK_LAT_VALID;
+ blktrc.channel_lat = lat_in->channel_lat * ticks;
+ blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+@@ -2002,12 +2009,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
+ {
+ struct scsi_cmnd *scmnd = req->data;
+ struct scsi_device *sdev = scmnd->device;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+
+ if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (header->fsf_status) {
+ case FSF_HANDLE_MISMATCH:
+ case FSF_PORT_HANDLE_NOT_VALID:
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index e14da57..e76d003 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+ {
+ struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+ struct zfcp_adapter *adapter = qdio->adapter;
+- struct qdio_buffer_element *sbale;
+ int sbal_no, sbal_idx;
+- void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+- u64 req_id;
+- u8 scount;
+
+ if (unlikely(qdio_err)) {
+- memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
++ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
++ struct qdio_buffer_element *sbale;
++ u64 req_id;
++ u8 scount;
++
++ memset(pl, 0,
++ ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ sbale = qdio->res_q[idx]->element;
+ req_id = (u64) sbale->addr;
+- scount = sbale->scount + 1; /* incl. signaling SBAL */
++ scount = min(sbale->scount + 1,
++ ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
++ /* incl. signaling SBAL */
+
+ for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+ sbal_idx = (idx + sbal_no) %
+diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
+index cdc4ff7..9e62210 100644
+--- a/drivers/s390/scsi/zfcp_sysfs.c
++++ b/drivers/s390/scsi/zfcp_sysfs.c
+@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
+ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
+ zfcp_sysfs_port_rescan_store);
+
++DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
++
+ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+ else
+ retval = 0;
+
++ mutex_lock(&zfcp_sysfs_port_units_mutex);
++ if (atomic_read(&port->units) > 0) {
++ retval = -EBUSY;
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++ goto out;
++ }
++ /* port is about to be removed, so no more unit_add */
++ atomic_set(&port->units, -1);
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++
+ write_lock_irq(&adapter->port_list_lock);
+ list_del(&port->list);
+ write_unlock_irq(&adapter->port_list_lock);
+@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+ {
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+ u64 fcp_lun;
++ int retval;
+
+ if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ return -EINVAL;
+
+- if (zfcp_unit_add(port, fcp_lun))
+- return -EINVAL;
++ retval = zfcp_unit_add(port, fcp_lun);
++ if (retval)
++ return retval;
+
+ return count;
+ }
+diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
+index 20796eb..4e6a535 100644
+--- a/drivers/s390/scsi/zfcp_unit.c
++++ b/drivers/s390/scsi/zfcp_unit.c
+@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
+ {
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+- put_device(&unit->port->dev);
++ atomic_dec(&unit->port->units);
+ kfree(unit);
+ }
+
+@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
+ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+ {
+ struct zfcp_unit *unit;
++ int retval = 0;
++
++ mutex_lock(&zfcp_sysfs_port_units_mutex);
++ if (atomic_read(&port->units) == -1) {
++ /* port is already gone */
++ retval = -ENODEV;
++ goto out;
++ }
+
+ unit = zfcp_unit_find(port, fcp_lun);
+ if (unit) {
+ put_device(&unit->dev);
+- return -EEXIST;
++ retval = -EEXIST;
++ goto out;
+ }
+
+ unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+- if (!unit)
+- return -ENOMEM;
++ if (!unit) {
++ retval = -ENOMEM;
++ goto out;
++ }
+
+ unit->port = port;
+ unit->fcp_lun = fcp_lun;
+@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+ if (dev_set_name(&unit->dev, "0x%016llx",
+ (unsigned long long) fcp_lun)) {
+ kfree(unit);
+- return -ENOMEM;
++ retval = -ENOMEM;
++ goto out;
+ }
+
+- get_device(&port->dev);
+-
+ if (device_register(&unit->dev)) {
+ put_device(&unit->dev);
+- return -ENOMEM;
++ retval = -ENOMEM;
++ goto out;
+ }
+
+ if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
+ device_unregister(&unit->dev);
+- return -EINVAL;
++ retval = -EINVAL;
++ goto out;
+ }
+
++ atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
++
+ write_lock_irq(&port->unit_list_lock);
+ list_add_tail(&unit->list, &port->unit_list);
+ write_unlock_irq(&port->unit_list_lock);
+
+ zfcp_unit_scsi_scan(unit);
+
+- return 0;
++out:
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++ return retval;
+ }
+
+ /**
+diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
+index 7e6eca4..59fc5a1 100644
+--- a/drivers/scsi/atp870u.c
++++ b/drivers/scsi/atp870u.c
+@@ -1174,7 +1174,16 @@ wait_io1:
+ outw(val, tmport);
+ outb(2, 0x80);
+ TCM_SYNC:
+- udelay(0x800);
++ /*
++ * The funny division into multiple delays is to accomodate
++ * arches like ARM where udelay() multiplies its argument by
++ * a large number to initialize a loop counter. To avoid
++ * overflow, the maximum supported udelay is 2000 microseconds.
++ *
++ * XXX it would be more polite to find a way to use msleep()
++ */
++ mdelay(2);
++ udelay(48);
+ if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
+ outw(0, tmport--);
+ outb(0, tmport);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 4ef0212..e5a4423 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -578,8 +578,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+ h->state = TPGS_STATE_STANDBY;
+ break;
+ case TPGS_STATE_OFFLINE:
+- case TPGS_STATE_UNAVAILABLE:
+- /* Path unusable for unavailable/offline */
++ /* Path unusable */
+ err = SCSI_DH_DEV_OFFLINED;
+ break;
+ default:
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index be9aad8..22523aa 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -532,12 +532,42 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+ }
+
++static int is_firmware_flash_cmd(u8 *cdb)
++{
++ return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
++}
++
++/*
++ * During firmware flash, the heartbeat register may not update as frequently
++ * as it should. So we dial down lockup detection during firmware flash. and
++ * dial it back up when firmware flash completes.
++ */
++#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
++#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
++static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
++ struct CommandList *c)
++{
++ if (!is_firmware_flash_cmd(c->Request.CDB))
++ return;
++ atomic_inc(&h->firmware_flash_in_progress);
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
++}
++
++static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
++ struct CommandList *c)
++{
++ if (is_firmware_flash_cmd(c->Request.CDB) &&
++ atomic_dec_and_test(&h->firmware_flash_in_progress))
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
++}
++
+ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+ {
+ unsigned long flags;
+
+ set_performant_mode(h, c);
++ dial_down_lockup_detection_during_fw_flash(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+@@ -2926,7 +2956,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ c->Request.Timeout = 0; /* Don't time out */
+ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
+ c->Request.CDB[0] = cmd;
+- c->Request.CDB[1] = 0x03; /* Reset target above */
++ c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
+ /* If bytes 4-7 are zero, it means reset the */
+ /* LunID device */
+ c->Request.CDB[4] = 0x00;
+@@ -3032,6 +3062,7 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+ {
+ removeQ(c);
++ dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
+ if (likely(c->cmd_type == CMD_SCSI))
+ complete_scsi_command(c);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+@@ -4172,9 +4203,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+
+-#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
+-#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
+-
+ static void detect_controller_lockup(struct ctlr_info *h)
+ {
+ u64 now;
+@@ -4185,7 +4213,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
+ now = get_jiffies_64();
+ /* If we've received an interrupt recently, we're ok. */
+ if (time_after64(h->last_intr_timestamp +
+- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
++ (h->heartbeat_sample_interval), now))
+ return;
+
+ /*
+@@ -4194,7 +4222,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
+ * otherwise don't care about signals in this thread.
+ */
+ if (time_after64(h->last_heartbeat_timestamp +
+- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
++ (h->heartbeat_sample_interval), now))
+ return;
+
+ /* If heartbeat has not changed since we last looked, we're not ok. */
+@@ -4236,6 +4264,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
+ {
+ unsigned long flags;
+
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+ spin_lock_irqsave(&lockup_detector_lock, flags);
+ list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 91edafb..c721509 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -124,6 +124,8 @@ struct ctlr_info {
+ u64 last_intr_timestamp;
+ u32 last_heartbeat;
+ u64 last_heartbeat_timestamp;
++ u32 heartbeat_sample_interval;
++ atomic_t firmware_flash_in_progress;
+ u32 lockup_detected;
+ struct list_head lockup_list;
+ };
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 3fd4715..e4ea0a3 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -163,6 +163,7 @@ struct SenseSubsystem_info {
+ #define BMIC_WRITE 0x27
+ #define BMIC_CACHE_FLUSH 0xc2
+ #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
++#define BMIC_FLASH_FIRMWARE 0xF7
+
+ /* Command List Structure */
+ union SCSI3Addr {
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index 3d391dc..36aca4b 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -1547,6 +1547,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
+
+ host_config = &evt_struct->iu.mad.host_config;
+
++ /* The transport length field is only 16-bit */
++ length = min(0xffff, length);
++
+ /* Set up a lun reset SRP command */
+ memset(host_config, 0x00, sizeof(*host_config));
+ host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index 83d08b6..5c8b0dc 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -469,7 +469,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+- devm_kfree(&pdev->dev, orom);
+ orom = NULL;
+ break;
+ }
+diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
+index b5f4341..7cd637d 100644
+--- a/drivers/scsi/isci/probe_roms.c
++++ b/drivers/scsi/isci/probe_roms.c
+@@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+- devm_kfree(&pdev->dev, rom);
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index bb7c482..08d48a3 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1023,33 +1023,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ void scsi_remove_target(struct device *dev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+- struct scsi_target *starget, *found;
++ struct scsi_target *starget, *last = NULL;
+ unsigned long flags;
+
+- restart:
+- found = NULL;
++ /* remove targets being careful to lookup next entry before
++ * deleting the last
++ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->state == STARGET_DEL)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+- found = starget;
+- found->reap_ref++;
+- break;
++ /* assuming new targets arrive at the end */
++ starget->reap_ref++;
++ spin_unlock_irqrestore(shost->host_lock, flags);
++ if (last)
++ scsi_target_reap(last);
++ last = starget;
++ __scsi_remove_target(starget);
++ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+- if (found) {
+- __scsi_remove_target(found);
+- scsi_target_reap(found);
+- /* in the case where @dev has multiple starget children,
+- * continue removing.
+- *
+- * FIXME: does such a case exist?
+- */
+- goto restart;
+- }
++ if (last)
++ scsi_target_reap(last);
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 4ad2c0e..9465bce 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -843,7 +843,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
+ ret = -EAGAIN;
+ break;
+ }
+- ret = s->async->inttrig(dev, s, insn->data[0]);
++ ret = s->async->inttrig(dev, s, data[0]);
+ if (ret >= 0)
+ ret = 1;
+ break;
+@@ -1088,7 +1088,6 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ goto cleanup;
+ }
+
+- kfree(async->cmd.chanlist);
+ async->cmd = user_cmd;
+ async->cmd.data = NULL;
+ /* load channel/gain list */
+@@ -1833,6 +1832,8 @@ void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s)
+ if (async) {
+ comedi_reset_async_buf(async);
+ async->inttrig = NULL;
++ kfree(async->cmd.chanlist);
++ async->cmd.chanlist = NULL;
+ } else {
+ printk(KERN_ERR
+ "BUG: (?) do_become_nonbusy called with async=0\n");
+diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
+index 8d98cf4..c8b7eed 100644
+--- a/drivers/staging/comedi/drivers/jr3_pci.c
++++ b/drivers/staging/comedi/drivers/jr3_pci.c
+@@ -913,7 +913,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
+ }
+
+ /* Reset DSP card */
+- devpriv->iobase->channel[0].reset = 0;
++ writel(0, &devpriv->iobase->channel[0].reset);
+
+ result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
+ printk("Firmare load %d\n", result);
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index 23fc64b..c72128f 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -2370,7 +2370,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
+ /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
+
+ k->SetMode(dev, k, Setup, TRUE);
+- Preload(dev, k, *(insn->data));
++ Preload(dev, k, data[0]);
+ k->PulseIndex(dev, k);
+ SetLatchSource(dev, k, valueSrclatch);
+ k->SetEnable(dev, k, (uint16_t) (enab != 0));
+diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
+index 42cdafe..b5130c8 100644
+--- a/drivers/staging/speakup/speakup_soft.c
++++ b/drivers/staging/speakup/speakup_soft.c
+@@ -40,7 +40,7 @@ static int softsynth_is_alive(struct spk_synth *synth);
+ static unsigned char get_index(void);
+
+ static struct miscdevice synth_device;
+-static int initialized;
++static int init_pos;
+ static int misc_registered;
+
+ static struct var_t vars[] = {
+@@ -194,7 +194,7 @@ static int softsynth_close(struct inode *inode, struct file *fp)
+ unsigned long flags;
+ spk_lock(flags);
+ synth_soft.alive = 0;
+- initialized = 0;
++ init_pos = 0;
+ spk_unlock(flags);
+ /* Make sure we let applications go before leaving */
+ speakup_start_ttys();
+@@ -239,13 +239,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
+ ch = '\x18';
+ } else if (synth_buffer_empty()) {
+ break;
+- } else if (!initialized) {
+- if (*init) {
+- ch = *init;
+- init++;
+- } else {
+- initialized = 1;
+- }
++ } else if (init[init_pos]) {
++ ch = init[init_pos++];
+ } else {
+ ch = synth_buffer_getc();
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 2ff1255..f35cb10 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3204,7 +3204,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+- spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+@@ -3357,6 +3356,7 @@ static int iscsit_send_reject(
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
++ hdr->ffffffff = 0xffffffff;
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 0f68197..dae283f 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -25,10 +25,10 @@
+ #define NA_DATAOUT_TIMEOUT_RETRIES 5
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+-#define NA_NOPIN_TIMEOUT 5
++#define NA_NOPIN_TIMEOUT 15
+ #define NA_NOPIN_TIMEOUT_MAX 60
+ #define NA_NOPIN_TIMEOUT_MIN 3
+-#define NA_NOPIN_RESPONSE_TIMEOUT 5
++#define NA_NOPIN_RESPONSE_TIMEOUT 30
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index d4cf2cd..309f14c 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -674,6 +674,12 @@ int iscsit_ta_generate_node_acls(
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
++ if (flag == 1 && a->cache_dynamic_acls == 0) {
++ pr_debug("Explicitly setting cache_dynamic_acls=1 when "
++ "generate_node_acls=1\n");
++ a->cache_dynamic_acls = 1;
++ }
++
+ return 0;
+ }
+
+@@ -713,6 +719,12 @@ int iscsit_ta_cache_dynamic_acls(
+ return -EINVAL;
+ }
+
++ if (a->generate_node_acls == 1 && flag == 0) {
++ pr_debug("Skipping cache_dynamic_acls=0 when"
++ " generate_node_acls=1\n");
++ return 0;
++ }
++
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 93d4f6a..0b01bfc 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3123,6 +3123,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3138,6 +3139,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+ config_group_init_type_name(&alua_group,
+@@ -3153,6 +3155,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3164,14 +3167,17 @@ static int __init target_core_init_configfs(void)
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+- if (IS_ERR(lu_gp))
++ if (IS_ERR(lu_gp)) {
++ ret = -ENOMEM;
+ goto out_global;
++ }
+
+ lu_gp_cg = &alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 455a251..cafa477 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -139,6 +139,19 @@ static struct se_device *fd_create_virtdevice(
+ * of pure timestamp updates.
+ */
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
++ /*
++ * Optionally allow fd_buffered_io=1 to be enabled for people
++ * who want use the fs buffer cache as an WriteCache mechanism.
++ *
++ * This means that in event of a hard failure, there is a risk
++ * of silent data-loss if the SCSI client has *not* performed a
++ * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
++ * to write-out the entire device cache.
++ */
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
++ flags &= ~O_DSYNC;
++ }
+
+ file = filp_open(dev_p, flags, 0600);
+ if (IS_ERR(file)) {
+@@ -206,6 +219,12 @@ static struct se_device *fd_create_virtdevice(
+ if (!dev)
+ goto fail;
+
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
++ " with FDBD_HAS_BUFFERED_IO_WCE\n");
++ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
++ }
++
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+@@ -450,6 +469,7 @@ enum {
+ static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
++ {Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_err, NULL}
+ };
+
+@@ -461,7 +481,7 @@ static ssize_t fd_set_configfs_dev_params(
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+- int ret = 0, token;
++ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+@@ -505,6 +525,19 @@ static ssize_t fd_set_configfs_dev_params(
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
++ case Opt_fd_buffered_io:
++ match_int(args, &arg);
++ if (arg != 1) {
++ pr_err("bogus fd_buffered_io=%d value\n", arg);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ pr_debug("FILEIO: Using buffered I/O"
++ " operations for struct fd_dev\n");
++
++ fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
++ break;
+ default:
+ break;
+ }
+@@ -536,8 +569,10 @@ static ssize_t fd_show_configfs_dev_params(
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+- bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+- fd_dev->fd_dev_name, fd_dev->fd_dev_size);
++ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
++ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
++ (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
++ "Buffered-WCE" : "O_DSYNC");
+ return bl;
+ }
+
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 53ece69..6b1b6a9 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -18,6 +18,7 @@ struct fd_request {
+
+ #define FBDF_HAS_PATH 0x01
+ #define FBDF_HAS_SIZE 0x02
++#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+
+ struct fd_dev {
+ u32 fbd_flags;
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index fc7bbba..d190269 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -108,7 +108,7 @@ struct gsm_mux_net {
+ */
+
+ struct gsm_msg {
+- struct gsm_msg *next;
++ struct list_head list;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+@@ -245,8 +245,7 @@ struct gsm_mux {
+ unsigned int tx_bytes; /* TX data outstanding */
+ #define TX_THRESH_HI 8192
+ #define TX_THRESH_LO 2048
+- struct gsm_msg *tx_head; /* Pending data packets */
+- struct gsm_msg *tx_tail;
++ struct list_head tx_list; /* Pending data packets */
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+@@ -663,7 +662,7 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+- m->next = NULL;
++ INIT_LIST_HEAD(&m->list);
+ return m;
+ }
+
+@@ -673,22 +672,21 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
++ * If we have been flow-stopped by a CMD_FCOFF, then we can only
++ * send messages on DLCI0 until CMD_FCON
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+ static void gsm_data_kick(struct gsm_mux *gsm)
+ {
+- struct gsm_msg *msg = gsm->tx_head;
++ struct gsm_msg *msg, *nmsg;
+ int len;
+ int skip_sof = 0;
+
+- /* FIXME: We need to apply this solely to data messages */
+- if (gsm->constipated)
+- return;
+-
+- while (gsm->tx_head != NULL) {
+- msg = gsm->tx_head;
++ list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
++ if (gsm->constipated && msg->addr)
++ continue;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+@@ -711,14 +709,13 @@ static void gsm_data_kick(struct gsm_mux *gsm)
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+- gsm->tx_head = msg->next;
+- if (gsm->tx_head == NULL)
+- gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+- kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
++
++ list_del(&msg->list);
++ kfree(msg);
+ }
+ }
+
+@@ -768,11 +765,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+- if (gsm->tx_tail)
+- gsm->tx_tail->next = msg;
+- else
+- gsm->tx_head = msg;
+- gsm->tx_tail = msg;
++ list_add_tail(&msg->list, &gsm->tx_list);
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+ }
+@@ -875,7 +868,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+- dlci->skb = skb_dequeue(&dlci->skb_list);
++ dlci->skb = skb_dequeue_tail(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+@@ -886,7 +879,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+- kfree_skb(dlci->skb);
++ dev_kfree_skb_any(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+@@ -899,8 +892,11 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+- if (msg == NULL)
++ if (msg == NULL) {
++ skb_queue_tail(&dlci->skb_list, dlci->skb);
++ dlci->skb = NULL;
+ return -ENOMEM;
++ }
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+@@ -912,7 +908,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ skb_pull(dlci->skb, len);
+ __gsm_data_queue(dlci, msg);
+ if (last) {
+- kfree_skb(dlci->skb);
++ dev_kfree_skb_any(dlci->skb);
+ dlci->skb = NULL;
+ }
+ return size;
+@@ -971,16 +967,22 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+ {
+ unsigned long flags;
++ int sweep;
++
++ if (dlci->constipated)
++ return;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
++ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
+ if (dlci->gsm->tx_bytes == 0) {
+ if (dlci->net)
+ gsm_dlci_data_output_framed(dlci->gsm, dlci);
+ else
+ gsm_dlci_data_output(dlci->gsm, dlci);
+- } else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+- gsm_dlci_data_sweep(dlci->gsm);
++ }
++ if (sweep)
++ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ }
+
+@@ -1027,6 +1029,7 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ {
+ int mlines = 0;
+ u8 brk = 0;
++ int fc;
+
+ /* The modem status command can either contain one octet (v.24 signals)
+ or two octets (v.24 signals + break signals). The length field will
+@@ -1038,19 +1041,21 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ else {
+ brk = modem & 0x7f;
+ modem = (modem >> 7) & 0x7f;
+- };
++ }
+
+ /* Flow control/ready to communicate */
+- if (modem & MDM_FC) {
++ fc = (modem & MDM_FC) || !(modem & MDM_RTR);
++ if (fc && !dlci->constipated) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+- }
+- if (modem & MDM_RTC) {
+- mlines |= TIOCM_DSR | TIOCM_DTR;
++ } else if (!fc && dlci->constipated) {
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
++
+ /* Map modem bits */
++ if (modem & MDM_RTC)
++ mlines |= TIOCM_DSR | TIOCM_DTR;
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+@@ -1190,6 +1195,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+ {
+ u8 buf[1];
++ unsigned long flags;
++
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+@@ -1206,16 +1213,18 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+- /* Modem wants us to STFU */
+- gsm->constipated = 1;
+- gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+- break;
+- case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+- gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
++ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ /* Kick the link in case it is idling */
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm);
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
++ break;
++ case CMD_FCOFF:
++ /* Modem wants us to STFU */
++ gsm->constipated = 1;
++ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+@@ -1668,7 +1677,7 @@ static void gsm_dlci_free(struct kref *ref)
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
+- kfree_skb(dlci->skb);
++ dev_kfree_skb(dlci->skb);
+ kfree(dlci);
+ }
+
+@@ -2007,7 +2016,7 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
+ {
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+- struct gsm_msg *txq;
++ struct gsm_msg *txq, *ntxq;
+ struct gsm_control *gc;
+
+ gsm->dead = 1;
+@@ -2042,11 +2051,9 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
+ if (gsm->dlci[i])
+ gsm_dlci_release(gsm->dlci[i]);
+ /* Now wipe the queues */
+- for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+- gsm->tx_head = txq->next;
++ list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ kfree(txq);
+- }
+- gsm->tx_tail = NULL;
++ INIT_LIST_HEAD(&gsm->tx_list);
+ }
+ EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+@@ -2157,6 +2164,7 @@ struct gsm_mux *gsm_alloc_mux(void)
+ }
+ spin_lock_init(&gsm->lock);
+ kref_init(&gsm->ref);
++ INIT_LIST_HEAD(&gsm->tx_list);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+@@ -2273,7 +2281,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+- WARN_ONCE("%s: unknown flag %d\n",
++ WARN_ONCE(1, "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+@@ -2377,12 +2385,12 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO) {
+- spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_dlci_data_sweep(gsm);
+- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
+
+ /**
+@@ -2889,6 +2897,10 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
++ /* If DLCI 0 is not yet fully open return an error. This is ok from a locking
++ perspective as we don't have to worry about this if DLCI0 is lost */
++ if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
++ return -EL2NSYNC;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 39d6ab6..8481aae 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1728,7 +1728,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+
+ do_it_again:
+
+- BUG_ON(!tty->read_buf);
++ if (WARN_ON(!tty->read_buf))
++ return -EAGAIN;
+
+ c = job_control(tty, file);
+ if (c < 0)
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 482d51e..e7d82c1 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1118,6 +1118,8 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
+ #define PCI_SUBDEVICE_ID_POCTAL232 0x0308
+ #define PCI_SUBDEVICE_ID_POCTAL422 0x0408
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
+ #define PCI_VENDOR_ID_ADVANTECH 0x13fe
+ #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
+@@ -3168,8 +3170,11 @@ static struct pci_device_id serial_pci_tbl[] = {
+ * For now just used the hex ID 0x950a.
+ */
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+- PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0,
+- pbn_b0_2_115200 },
++ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00,
++ 0, 0, pbn_b0_2_115200 },
++ { PCI_VENDOR_ID_OXSEMI, 0x950a,
++ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30,
++ 0, 0, pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_2_1130000 },
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 6da8cf8..fe9f111 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1627,13 +1627,26 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ old_cr &= ~ST_UART011_CR_OVSFACT;
+ }
+
++ /*
++ * Workaround for the ST Micro oversampling variants to
++ * increase the bitrate slightly, by lowering the divisor,
++ * to avoid delayed sampling of start bit at high speeds,
++ * else we see data corruption.
++ */
++ if (uap->vendor->oversampling) {
++ if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
++ quot -= 1;
++ else if ((baud > 3250000) && (quot > 2))
++ quot -= 2;
++ }
+ /* Set baud rate */
+ writew(quot & 0x3f, port->membase + UART011_FBRD);
+ writew(quot >> 6, port->membase + UART011_IBRD);
+
+ /*
+ * ----------v----------v----------v----------v-----
+- * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
++ * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
++ * UART011_FBRD & UART011_IBRD.
+ * ----------^----------^----------^----------^-----
+ */
+ writew(lcr_h, port->membase + uap->lcrh_rx);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index a40ab98..4cddbfc 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1680,6 +1680,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ {
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct dev_info *dev_info, *next;
++ struct xhci_cd *cur_cd, *next_cd;
+ unsigned long flags;
+ int size;
+ int i, j, num_ports;
+@@ -1701,6 +1702,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ }
+
+ for (i = 1; i < MAX_HC_SLOTS; ++i)
+ xhci_free_virt_device(xhci, i);
+@@ -2246,6 +2252,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
++ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index bddcbfc..4ed7572 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -99,6 +99,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
++ xhci->quirks |= XHCI_AVOID_BEI;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index c7c530c..950aef8 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -309,12 +309,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ /* Ring the host controller doorbell after placing a command on the ring */
+ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+ {
++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
++ return;
++
+ xhci_dbg(xhci, "// Ding dong!\n");
+ xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
+ /* Flush PCI posted writes */
+ xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ }
+
++static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
++{
++ u64 temp_64;
++ int ret;
++
++ xhci_dbg(xhci, "Abort command ring\n");
++
++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
++ xhci_dbg(xhci, "The command ring isn't running, "
++ "Have the command ring been stopped?\n");
++ return 0;
++ }
++
++ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
++ if (!(temp_64 & CMD_RING_RUNNING)) {
++ xhci_dbg(xhci, "Command ring had been stopped\n");
++ return 0;
++ }
++ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
++ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
++ &xhci->op_regs->cmd_ring);
++
++ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
++ * time the completion od all xHCI commands, including
++ * the Command Abort operation. If software doesn't see
++ * CRR negated in a timely manner (e.g. longer than 5
++ * seconds), then it should assume that the there are
++ * larger problems with the xHC and assert HCRST.
++ */
++ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
++ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
++ if (ret < 0) {
++ xhci_err(xhci, "Stopped the command ring failed, "
++ "maybe the host is dead\n");
++ xhci->xhc_state |= XHCI_STATE_DYING;
++ xhci_quiesce(xhci);
++ xhci_halt(xhci);
++ return -ESHUTDOWN;
++ }
++
++ return 0;
++}
++
++static int xhci_queue_cd(struct xhci_hcd *xhci,
++ struct xhci_command *command,
++ union xhci_trb *cmd_trb)
++{
++ struct xhci_cd *cd;
++ cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
++ if (!cd)
++ return -ENOMEM;
++ INIT_LIST_HEAD(&cd->cancel_cmd_list);
++
++ cd->command = command;
++ cd->cmd_trb = cmd_trb;
++ list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
++
++ return 0;
++}
++
++/*
++ * Cancel the command which has issue.
++ *
++ * Some commands may hang due to waiting for acknowledgement from
++ * usb device. It is outside of the xHC's ability to control and
++ * will cause the command ring is blocked. When it occurs software
++ * should intervene to recover the command ring.
++ * See Section 4.6.1.1 and 4.6.1.2
++ */
++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
++ union xhci_trb *cmd_trb)
++{
++ int retval = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xhci->lock, flags);
++
++ if (xhci->xhc_state & XHCI_STATE_DYING) {
++ xhci_warn(xhci, "Abort the command ring,"
++ " but the xHCI is dead.\n");
++ retval = -ESHUTDOWN;
++ goto fail;
++ }
++
++ /* queue the cmd desriptor to cancel_cmd_list */
++ retval = xhci_queue_cd(xhci, command, cmd_trb);
++ if (retval) {
++ xhci_warn(xhci, "Queuing command descriptor failed.\n");
++ goto fail;
++ }
++
++ /* abort command ring */
++ retval = xhci_abort_cmd_ring(xhci);
++ if (retval) {
++ xhci_err(xhci, "Abort command ring failed\n");
++ if (unlikely(retval == -ESHUTDOWN)) {
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
++ xhci_dbg(xhci, "xHCI host controller is dead.\n");
++ return retval;
++ }
++ }
++
++fail:
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ return retval;
++}
++
+ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index,
+@@ -1043,6 +1154,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
+ }
+ }
+
++/* Complete the command and detele it from the devcie's command queue.
++ */
++static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
++ struct xhci_command *command, u32 status)
++{
++ command->status = status;
++ list_del(&command->cmd_list);
++ if (command->completion)
++ complete(command->completion);
++ else
++ xhci_free_command(xhci, command);
++}
++
++
+ /* Check to see if a command in the device's command queue matches this one.
+ * Signal the completion or free the command, and return 1. Return 0 if the
+ * completed command isn't at the head of the command list.
+@@ -1061,15 +1186,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+ if (xhci->cmd_ring->dequeue != command->command_trb)
+ return 0;
+
+- command->status = GET_COMP_CODE(le32_to_cpu(event->status));
+- list_del(&command->cmd_list);
+- if (command->completion)
+- complete(command->completion);
+- else
+- xhci_free_command(xhci, command);
++ xhci_complete_cmd_in_cmd_wait_list(xhci, command,
++ GET_COMP_CODE(le32_to_cpu(event->status)));
+ return 1;
+ }
+
++/*
++ * Finding the command trb need to be cancelled and modifying it to
++ * NO OP command. And if the command is in device's command wait
++ * list, finishing and freeing it.
++ *
++ * If we can't find the command trb, we think it had already been
++ * executed.
++ */
++static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
++{
++ struct xhci_segment *cur_seg;
++ union xhci_trb *cmd_trb;
++ u32 cycle_state;
++
++ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
++ return;
++
++ /* find the current segment of command ring */
++ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
++ xhci->cmd_ring->dequeue, &cycle_state);
++
++ /* find the command trb matched by cd from command ring */
++ for (cmd_trb = xhci->cmd_ring->dequeue;
++ cmd_trb != xhci->cmd_ring->enqueue;
++ next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
++ /* If the trb is link trb, continue */
++ if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
++ continue;
++
++ if (cur_cd->cmd_trb == cmd_trb) {
++
++ /* If the command in device's command list, we should
++ * finish it and free the command structure.
++ */
++ if (cur_cd->command)
++ xhci_complete_cmd_in_cmd_wait_list(xhci,
++ cur_cd->command, COMP_CMD_STOP);
++
++ /* get cycle state from the origin command trb */
++ cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
++ & TRB_CYCLE;
++
++ /* modify the command trb to NO OP command */
++ cmd_trb->generic.field[0] = 0;
++ cmd_trb->generic.field[1] = 0;
++ cmd_trb->generic.field[2] = 0;
++ cmd_trb->generic.field[3] = cpu_to_le32(
++ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
++ break;
++ }
++ }
++}
++
++static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
++{
++ struct xhci_cd *cur_cd, *next_cd;
++
++ if (list_empty(&xhci->cancel_cmd_list))
++ return;
++
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ xhci_cmd_to_noop(xhci, cur_cd);
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ }
++}
++
++/*
++ * traversing the cancel_cmd_list. If the command descriptor according
++ * to cmd_trb is found, the function free it and return 1, otherwise
++ * return 0.
++ */
++static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
++ union xhci_trb *cmd_trb)
++{
++ struct xhci_cd *cur_cd, *next_cd;
++
++ if (list_empty(&xhci->cancel_cmd_list))
++ return 0;
++
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ if (cur_cd->cmd_trb == cmd_trb) {
++ if (cur_cd->command)
++ xhci_complete_cmd_in_cmd_wait_list(xhci,
++ cur_cd->command, COMP_CMD_STOP);
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
++ * trb pointed by the command ring dequeue pointer is the trb we want to
++ * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
++ * traverse the cancel_cmd_list to trun the all of the commands according
++ * to command descriptor to NO-OP trb.
++ */
++static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
++ int cmd_trb_comp_code)
++{
++ int cur_trb_is_good = 0;
++
++ /* Searching the cmd trb pointed by the command ring dequeue
++ * pointer in command descriptor list. If it is found, free it.
++ */
++ cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
++ xhci->cmd_ring->dequeue);
++
++ if (cmd_trb_comp_code == COMP_CMD_ABORT)
++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
++ else if (cmd_trb_comp_code == COMP_CMD_STOP) {
++ /* traversing the cancel_cmd_list and canceling
++ * the command according to command descriptor
++ */
++ xhci_cancel_cmd_in_cd_list(xhci);
++
++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
++ /*
++ * ring command ring doorbell again to restart the
++ * command ring
++ */
++ if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
++ xhci_ring_cmd_db(xhci);
++ }
++ return cur_trb_is_good;
++}
++
+ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+ {
+@@ -1095,6 +1349,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
++
++ if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
++ (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
++ /* If the return value is 0, we think the trb pointed by
++ * command ring dequeue pointer is a good trb. The good
++ * trb means we don't want to cancel the trb, but it have
++ * been stopped by host. So we should handle it normally.
++ * Otherwise, driver should invoke inc_deq() and return.
++ */
++ if (handle_stopped_cmd_ring(xhci,
++ GET_COMP_CODE(le32_to_cpu(event->status)))) {
++ inc_deq(xhci, xhci->cmd_ring, false);
++ return;
++ }
++ }
++
+ switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
+ & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_ENABLE_SLOT):
+@@ -3356,7 +3626,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ } else {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+- if (xhci->hci_version == 0x100) {
++ if (xhci->hci_version == 0x100 &&
++ !(xhci->quirks &
++ XHCI_AVOID_BEI)) {
+ /* Set BEI bit except for the last td */
+ if (i < num_tds - 1)
+ field |= TRB_BEI;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 09872ee..f5c0f38 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -52,7 +52,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+-static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+ {
+ u32 result;
+@@ -105,8 +105,12 @@ int xhci_halt(struct xhci_hcd *xhci)
+
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+- if (!ret)
++ if (!ret) {
+ xhci->xhc_state |= XHCI_STATE_HALTED;
++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
++ } else
++ xhci_warn(xhci, "Host not halted after %u microseconds.\n",
++ XHCI_MAX_HALT_USEC);
+ return ret;
+ }
+
+@@ -459,6 +463,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+
+ dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
++ if (!dmi_product_name || !dmi_sys_vendor)
++ return false;
+
+ if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+ return false;
+@@ -570,6 +576,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
+ return -ENODEV;
+ }
+ xhci->shared_hcd->state = HC_STATE_RUNNING;
++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_ring_cmd_db(xhci);
+@@ -874,7 +881,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command &= ~CMD_RUN;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_HALT, STS_HALT, 100*100)) {
++ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
+ xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+@@ -2506,6 +2513,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ struct completion *cmd_completion;
+ u32 *cmd_status;
+ struct xhci_virt_device *virt_dev;
++ union xhci_trb *cmd_trb;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev = xhci->devs[udev->slot_id];
+@@ -2551,6 +2559,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ }
+ init_completion(cmd_completion);
+
++ cmd_trb = xhci->cmd_ring->dequeue;
+ if (!ctx_change)
+ ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
+ udev->slot_id, must_succeed);
+@@ -2572,14 +2581,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ cmd_completion,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for %s command\n",
+ timeleft == 0 ? "Timeout" : "Signal",
+ ctx_change == 0 ?
+ "configure endpoint" :
+ "evaluate context");
+- /* FIXME cancel the configure endpoint command */
++ /* cancel the configure endpoint command */
++ ret = xhci_cancel_cmd(xhci, command, cmd_trb);
++ if (ret < 0)
++ return ret;
+ return -ETIME;
+ }
+
+@@ -3528,8 +3540,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ unsigned long flags;
+ int timeleft;
+ int ret;
++ union xhci_trb *cmd_trb;
+
+ spin_lock_irqsave(&xhci->lock, flags);
++ cmd_trb = xhci->cmd_ring->dequeue;
+ ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -3541,12 +3555,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+
+ /* XXX: how much time for xHC slot assignment? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+- /* FIXME cancel the enable slot request */
+- return 0;
++ /* cancel the enable slot request */
++ return xhci_cancel_cmd(xhci, NULL, cmd_trb);
+ }
+
+ if (!xhci->slot_id) {
+@@ -3607,6 +3621,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u64 temp_64;
++ union xhci_trb *cmd_trb;
+
+ if (!udev->slot_id) {
+ xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+@@ -3645,6 +3660,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+
+ spin_lock_irqsave(&xhci->lock, flags);
++ cmd_trb = xhci->cmd_ring->dequeue;
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
+ udev->slot_id);
+ if (ret) {
+@@ -3657,7 +3673,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+
+ /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ /* FIXME: From section 4.3.4: "Software shall be responsible for timing
+ * the SetAddress() "recovery interval" required by USB and aborting the
+ * command on a timeout.
+@@ -3665,7 +3681,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for address device command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+- /* FIXME cancel the address device command */
++ /* cancel the address device command */
++ ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
++ if (ret < 0)
++ return ret;
+ return -ETIME;
+ }
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 44d518a..cc368c2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1255,6 +1255,16 @@ struct xhci_td {
+ union xhci_trb *last_trb;
+ };
+
++/* xHCI command default timeout value */
++#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
++
++/* command descriptor */
++struct xhci_cd {
++ struct list_head cancel_cmd_list;
++ struct xhci_command *command;
++ union xhci_trb *cmd_trb;
++};
++
+ struct xhci_dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+@@ -1402,6 +1412,11 @@ struct xhci_hcd {
+ /* data structures */
+ struct xhci_device_context_array *dcbaa;
+ struct xhci_ring *cmd_ring;
++ unsigned int cmd_ring_state;
++#define CMD_RING_STATE_RUNNING (1 << 0)
++#define CMD_RING_STATE_ABORTED (1 << 1)
++#define CMD_RING_STATE_STOPPED (1 << 2)
++ struct list_head cancel_cmd_list;
+ unsigned int cmd_ring_reserved_trbs;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+@@ -1473,6 +1488,7 @@ struct xhci_hcd {
+ #define XHCI_TRUST_TX_LENGTH (1 << 10)
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
++#define XHCI_AVOID_BEI (1 << 15)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+@@ -1666,6 +1682,8 @@ static inline void xhci_unregister_pci(void) {}
+
+ /* xHCI host controller glue */
+ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
++ u32 mask, u32 done, int usec);
+ void xhci_quiesce(struct xhci_hcd *xhci);
+ int xhci_halt(struct xhci_hcd *xhci);
+ int xhci_reset(struct xhci_hcd *xhci);
+@@ -1756,6 +1774,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state);
+ void xhci_stop_endpoint_command_watchdog(unsigned long arg);
++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
++ union xhci_trb *cmd_trb);
+ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, unsigned int stream_id);
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 7324bea..e29a664 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -584,6 +584,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /*
+ * ELV devices:
+ */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 06f6fd2..7b5eb74 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -517,6 +517,11 @@
+ */
+ #define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
+
++/*
++ * TIAO product ids (FTDI_VID)
++ * http://www.tiaowiki.com/w/Main_Page
++ */
++#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
+
+
+ /********************************/
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c068b4d..3fd4e6f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -870,7 +870,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index a348198..87271e3 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -36,8 +36,6 @@
+ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+ #define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+ #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+-#define PANTECH_PRODUCT_UML190_VZW 0x3716
+-#define PANTECH_PRODUCT_UML290_VZW 0x3718
+
+ /* CMOTECH devices */
+ #define CMOTECH_VENDOR_ID 0x16d8
+@@ -68,11 +66,9 @@ static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index f55ae23..790fa63 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ status = autofs4_mount_wait(dentry);
+- if (status)
+- return ERR_PTR(status);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
++ if (status) {
++ spin_unlock(&sbi->fs_lock);
++ return ERR_PTR(status);
++ }
+ }
+ done:
+ if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 6ff96c6..8dd615c 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1668,30 +1668,19 @@ static int elf_note_info_init(struct elf_note_info *info)
+ return 0;
+ info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
+ if (!info->psinfo)
+- goto notes_free;
++ return 0;
+ info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
+ if (!info->prstatus)
+- goto psinfo_free;
++ return 0;
+ info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
+ if (!info->fpu)
+- goto prstatus_free;
++ return 0;
+ #ifdef ELF_CORE_COPY_XFPREGS
+ info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
+ if (!info->xfpu)
+- goto fpu_free;
++ return 0;
+ #endif
+ return 1;
+-#ifdef ELF_CORE_COPY_XFPREGS
+- fpu_free:
+- kfree(info->fpu);
+-#endif
+- prstatus_free:
+- kfree(info->prstatus);
+- psinfo_free:
+- kfree(info->psinfo);
+- notes_free:
+- kfree(info->notes);
+- return 0;
+ }
+
+ static int fill_note_info(struct elfhdr *elf, int phdrs,
+diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
+index a9f29b1..2262a77 100644
+--- a/fs/ecryptfs/ecryptfs_kernel.h
++++ b/fs/ecryptfs/ecryptfs_kernel.h
+@@ -559,6 +559,8 @@ struct ecryptfs_open_req {
+ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+ struct super_block *sb);
+ void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
++int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
++ struct inode *ecryptfs_inode);
+ int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
+ size_t *decrypted_name_size,
+ struct dentry *ecryptfs_dentry,
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index d3f95f9..841f24f 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -139,29 +139,50 @@ out:
+ return rc;
+ }
+
+-static void ecryptfs_vma_close(struct vm_area_struct *vma)
+-{
+- filemap_write_and_wait(vma->vm_file->f_mapping);
+-}
+-
+-static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+- .close = ecryptfs_vma_close,
+- .fault = filemap_fault,
+-};
++struct kmem_cache *ecryptfs_file_info_cache;
+
+-static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
++static int read_or_initialize_metadata(struct dentry *dentry)
+ {
++ struct inode *inode = dentry->d_inode;
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
++ struct ecryptfs_crypt_stat *crypt_stat;
+ int rc;
+
+- rc = generic_file_mmap(file, vma);
++ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
++ mount_crypt_stat = &ecryptfs_superblock_to_private(
++ inode->i_sb)->mount_crypt_stat;
++ mutex_lock(&crypt_stat->cs_mutex);
++
++ if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED &&
++ crypt_stat->flags & ECRYPTFS_KEY_VALID) {
++ rc = 0;
++ goto out;
++ }
++
++ rc = ecryptfs_read_metadata(dentry);
+ if (!rc)
+- vma->vm_ops = &ecryptfs_file_vm_ops;
++ goto out;
++
++ if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) {
++ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
++ | ECRYPTFS_ENCRYPTED);
++ rc = 0;
++ goto out;
++ }
++
++ if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) &&
++ !i_size_read(ecryptfs_inode_to_lower(inode))) {
++ rc = ecryptfs_initialize_file(dentry, inode);
++ if (!rc)
++ goto out;
++ }
+
++ rc = -EIO;
++out:
++ mutex_unlock(&crypt_stat->cs_mutex);
+ return rc;
+ }
+
+-struct kmem_cache *ecryptfs_file_info_cache;
+-
+ /**
+ * ecryptfs_open
+ * @inode: inode speciying file to open
+@@ -237,32 +258,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ rc = 0;
+ goto out;
+ }
+- mutex_lock(&crypt_stat->cs_mutex);
+- if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
+- || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
+- rc = ecryptfs_read_metadata(ecryptfs_dentry);
+- if (rc) {
+- ecryptfs_printk(KERN_DEBUG,
+- "Valid headers not found\n");
+- if (!(mount_crypt_stat->flags
+- & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
+- rc = -EIO;
+- printk(KERN_WARNING "Either the lower file "
+- "is not in a valid eCryptfs format, "
+- "or the key could not be retrieved. "
+- "Plaintext passthrough mode is not "
+- "enabled; returning -EIO\n");
+- mutex_unlock(&crypt_stat->cs_mutex);
+- goto out_put;
+- }
+- rc = 0;
+- crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+- | ECRYPTFS_ENCRYPTED);
+- mutex_unlock(&crypt_stat->cs_mutex);
+- goto out;
+- }
+- }
+- mutex_unlock(&crypt_stat->cs_mutex);
++ rc = read_or_initialize_metadata(ecryptfs_dentry);
++ if (rc)
++ goto out_put;
+ ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
+ "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
+ (unsigned long long)i_size_read(inode));
+@@ -278,8 +276,14 @@ out:
+
+ static int ecryptfs_flush(struct file *file, fl_owner_t td)
+ {
+- return file->f_mode & FMODE_WRITE
+- ? filemap_write_and_wait(file->f_mapping) : 0;
++ struct file *lower_file = ecryptfs_file_to_lower(file);
++
++ if (lower_file->f_op && lower_file->f_op->flush) {
++ filemap_write_and_wait(file->f_mapping);
++ return lower_file->f_op->flush(lower_file, td);
++ }
++
++ return 0;
+ }
+
+ static int ecryptfs_release(struct inode *inode, struct file *file)
+@@ -293,15 +297,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
+ static int
+ ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+- int rc = 0;
+-
+- rc = generic_file_fsync(file, start, end, datasync);
+- if (rc)
+- goto out;
+- rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end,
+- datasync);
+-out:
+- return rc;
++ return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+ }
+
+ static int ecryptfs_fasync(int fd, struct file *file, int flag)
+@@ -370,7 +366,7 @@ const struct file_operations ecryptfs_main_fops = {
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+- .mmap = ecryptfs_file_mmap,
++ .mmap = generic_file_mmap,
+ .open = ecryptfs_open,
+ .flush = ecryptfs_flush,
+ .release = ecryptfs_release,
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 7c7556b..a9be90d 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -161,6 +161,31 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
+ return vfs_create(lower_dir_inode, lower_dentry, mode, NULL);
+ }
+
++static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
++ struct inode *inode)
++{
++ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
++ struct dentry *lower_dir_dentry;
++ int rc;
++
++ dget(lower_dentry);
++ lower_dir_dentry = lock_parent(lower_dentry);
++ rc = vfs_unlink(lower_dir_inode, lower_dentry);
++ if (rc) {
++ printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
++ goto out_unlock;
++ }
++ fsstack_copy_attr_times(dir, lower_dir_inode);
++ set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
++ inode->i_ctime = dir->i_ctime;
++ d_drop(dentry);
++out_unlock:
++ unlock_dir(lower_dir_dentry);
++ dput(lower_dentry);
++ return rc;
++}
++
+ /**
+ * ecryptfs_do_create
+ * @directory_inode: inode of the new file's dentry's parent in ecryptfs
+@@ -201,8 +226,10 @@ ecryptfs_do_create(struct inode *directory_inode,
+ }
+ inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+ directory_inode->i_sb);
+- if (IS_ERR(inode))
++ if (IS_ERR(inode)) {
++ vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
+ goto out_lock;
++ }
+ fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
+ fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
+ out_lock:
+@@ -219,8 +246,8 @@ out:
+ *
+ * Returns zero on success
+ */
+-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+- struct inode *ecryptfs_inode)
++int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
++ struct inode *ecryptfs_inode)
+ {
+ struct ecryptfs_crypt_stat *crypt_stat =
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+@@ -284,7 +311,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
+ * that this on disk file is prepared to be an ecryptfs file */
+ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
+ if (rc) {
+- drop_nlink(ecryptfs_inode);
++ ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
++ ecryptfs_inode);
++ make_bad_inode(ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
+ iput(ecryptfs_inode);
+ goto out;
+@@ -496,27 +525,7 @@ out_lock:
+
+ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
+ {
+- int rc = 0;
+- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
+- struct dentry *lower_dir_dentry;
+-
+- dget(lower_dentry);
+- lower_dir_dentry = lock_parent(lower_dentry);
+- rc = vfs_unlink(lower_dir_inode, lower_dentry);
+- if (rc) {
+- printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
+- goto out_unlock;
+- }
+- fsstack_copy_attr_times(dir, lower_dir_inode);
+- set_nlink(dentry->d_inode,
+- ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink);
+- dentry->d_inode->i_ctime = dir->i_ctime;
+- d_drop(dentry);
+-out_unlock:
+- unlock_dir(lower_dir_dentry);
+- dput(lower_dentry);
+- return rc;
++ return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
+ }
+
+ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
+@@ -1026,12 +1035,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+ goto out;
+ }
+
+- if (S_ISREG(inode->i_mode)) {
+- rc = filemap_write_and_wait(inode->i_mapping);
+- if (rc)
+- goto out;
+- fsstack_copy_attr_all(inode, lower_inode);
+- }
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ if (ia->ia_valid & ATTR_FILE)
+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index b4a6bef..1cfef9f 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode)
+ inode_info = ecryptfs_inode_to_private(inode);
+ if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
+ &inode_info->lower_file_mutex)) {
++ filemap_write_and_wait(inode->i_mapping);
+ fput(inode_info->lower_file);
+ inode_info->lower_file = NULL;
+ mutex_unlock(&inode_info->lower_file_mutex);
+diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
+index 6a44148..93a998a 100644
+--- a/fs/ecryptfs/mmap.c
++++ b/fs/ecryptfs/mmap.c
+@@ -62,18 +62,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ int rc;
+
+- /*
+- * Refuse to write the page out if we are called from reclaim context
+- * since our writepage() path may potentially allocate memory when
+- * calling into the lower fs vfs_write() which may in turn invoke
+- * us again.
+- */
+- if (current->flags & PF_MEMALLOC) {
+- redirty_page_for_writepage(wbc, page);
+- rc = 0;
+- goto out;
+- }
+-
+ rc = ecryptfs_encrypt_page(page);
+ if (rc) {
+ ecryptfs_printk(KERN_WARNING, "Error encrypting "
+@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
+ struct ecryptfs_crypt_stat *crypt_stat =
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+ int rc;
+- int need_unlock_page = 1;
+
+ ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
+ "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
+@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
+ "zeros in page with index = [0x%.16lx]\n", index);
+ goto out;
+ }
+- set_page_dirty(page);
+- unlock_page(page);
+- need_unlock_page = 0;
++ rc = ecryptfs_encrypt_page(page);
++ if (rc) {
++ ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
++ "index [0x%.16lx])\n", index);
++ goto out;
++ }
+ if (pos + copied > i_size_read(ecryptfs_inode)) {
+ i_size_write(ecryptfs_inode, pos + copied);
+ ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
+ "[0x%.16llx]\n",
+ (unsigned long long)i_size_read(ecryptfs_inode));
+- balance_dirty_pages_ratelimited(mapping);
+- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+- if (rc) {
+- printk(KERN_ERR "Error writing inode size to metadata; "
+- "rc = [%d]\n", rc);
+- goto out;
+- }
+ }
+- rc = copied;
++ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
++ if (rc)
++ printk(KERN_ERR "Error writing inode size to metadata; "
++ "rc = [%d]\n", rc);
++ else
++ rc = copied;
+ out:
+- if (need_unlock_page)
+- unlock_page(page);
++ unlock_page(page);
+ page_cache_release(page);
+ return rc;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8b01f9f..bac2330 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2382,6 +2382,16 @@ static int ext4_nonda_switch(struct super_block *sb)
+ free_blocks = EXT4_C2B(sbi,
+ percpu_counter_read_positive(&sbi->s_freeclusters_counter));
+ dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
++ /*
++ * Start pushing delalloc when 1/2 of free blocks are dirty.
++ */
++ if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
++ !writeback_in_progress(sb->s_bdi) &&
++ down_read_trylock(&sb->s_umount)) {
++ writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
++ up_read(&sb->s_umount);
++ }
++
+ if (2 * free_blocks < 3 * dirty_blocks ||
+ free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
+ /*
+@@ -2390,13 +2400,6 @@ static int ext4_nonda_switch(struct super_block *sb)
+ */
+ return 1;
+ }
+- /*
+- * Even if we don't switch but are nearing capacity,
+- * start pushing delalloc when 1/2 of free blocks are dirty.
+- */
+- if (free_blocks < 2 * dirty_blocks)
+- writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
+-
+ return 0;
+ }
+
+@@ -4004,6 +4007,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct buffer_head *bh = iloc->bh;
+ int err = 0, rc, block;
++ int need_datasync = 0;
+
+ /* For fields not not tracking in the in-memory inode,
+ * initialise them to zero for new inodes. */
+@@ -4052,7 +4056,10 @@ static int ext4_do_update_inode(handle_t *handle,
+ raw_inode->i_file_acl_high =
+ cpu_to_le16(ei->i_file_acl >> 32);
+ raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
+- ext4_isize_set(raw_inode, ei->i_disksize);
++ if (ei->i_disksize != ext4_isize(raw_inode)) {
++ ext4_isize_set(raw_inode, ei->i_disksize);
++ need_datasync = 1;
++ }
+ if (ei->i_disksize > 0x7fffffffULL) {
+ struct super_block *sb = inode->i_sb;
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+@@ -4105,7 +4112,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ err = rc;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
+
+- ext4_update_inode_fsync_trans(handle, inode, 0);
++ ext4_update_inode_fsync_trans(handle, inode, need_datasync);
+ out_brelse:
+ brelse(bh);
+ ext4_std_error(inode->i_sb, err);
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index c5826c6..e2016f3 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
+ }
+
+ /**
+- * mext_check_null_inode - NULL check for two inodes
+- *
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+- */
+-static int
+-mext_check_null_inode(struct inode *inode1, struct inode *inode2,
+- const char *function, unsigned int line)
+-{
+- int ret = 0;
+-
+- if (inode1 == NULL) {
+- __ext4_error(inode2->i_sb, function, line,
+- "Both inodes should not be NULL: "
+- "inode1 NULL inode2 %lu", inode2->i_ino);
+- ret = -EIO;
+- } else if (inode2 == NULL) {
+- __ext4_error(inode1->i_sb, function, line,
+- "Both inodes should not be NULL: "
+- "inode1 %lu inode2 NULL", inode1->i_ino);
+- ret = -EIO;
+- }
+- return ret;
+-}
+-
+-/**
+ * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
+ *
+- * @orig_inode: original inode structure
+- * @donor_inode: donor inode structure
+- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
+- * i_ino order.
++ * Acquire write lock of i_data_sem of the two inodes
+ */
+ static void
+-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
++double_down_write_data_sem(struct inode *first, struct inode *second)
+ {
+- struct inode *first = orig_inode, *second = donor_inode;
++ if (first < second) {
++ down_write(&EXT4_I(first)->i_data_sem);
++ down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
++ } else {
++ down_write(&EXT4_I(second)->i_data_sem);
++ down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+
+- /*
+- * Use the inode number to provide the stable locking order instead
+- * of its address, because the C language doesn't guarantee you can
+- * compare pointers that don't come from the same array.
+- */
+- if (donor_inode->i_ino < orig_inode->i_ino) {
+- first = donor_inode;
+- second = orig_inode;
+ }
+-
+- down_write(&EXT4_I(first)->i_data_sem);
+- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ }
+
+ /**
+@@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode,
+ return -EINVAL;
+ }
+
+- /* Files should be in the same ext4 FS */
+- if (orig_inode->i_sb != donor_inode->i_sb) {
+- ext4_debug("ext4 move extent: The argument files "
+- "should be in same FS [ino:orig %lu, donor %lu]\n",
+- orig_inode->i_ino, donor_inode->i_ino);
+- return -EINVAL;
+- }
+-
+ /* Ext4 move extent supports only extent based file */
+ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+ ext4_debug("ext4 move extent: orig file is not extents "
+@@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode,
+ * @inode1: the inode structure
+ * @inode2: the inode structure
+ *
+- * Lock two inodes' i_mutex by i_ino order.
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
++ * Lock two inodes' i_mutex
+ */
+-static int
++static void
+ mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
+ {
+- int ret = 0;
+-
+- BUG_ON(inode1 == NULL && inode2 == NULL);
+-
+- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
+- if (ret < 0)
+- goto out;
+-
+- if (inode1 == inode2) {
+- mutex_lock(&inode1->i_mutex);
+- goto out;
+- }
+-
+- if (inode1->i_ino < inode2->i_ino) {
++ BUG_ON(inode1 == inode2);
++ if (inode1 < inode2) {
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+ } else {
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
+ }
+-
+-out:
+- return ret;
+ }
+
+ /**
+@@ -1109,28 +1051,13 @@ out:
+ * @inode1: the inode that is released first
+ * @inode2: the inode that is released second
+ *
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+ */
+
+-static int
++static void
+ mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
+ {
+- int ret = 0;
+-
+- BUG_ON(inode1 == NULL && inode2 == NULL);
+-
+- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
+- if (ret < 0)
+- goto out;
+-
+- if (inode1)
+- mutex_unlock(&inode1->i_mutex);
+-
+- if (inode2 && inode2 != inode1)
+- mutex_unlock(&inode2->i_mutex);
+-
+-out:
+- return ret;
++ mutex_unlock(&inode1->i_mutex);
++ mutex_unlock(&inode2->i_mutex);
+ }
+
+ /**
+@@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
+ ext4_lblk_t rest_blocks;
+ pgoff_t orig_page_offset = 0, seq_end_page;
+- int ret1, ret2, depth, last_extent = 0;
++ int ret, depth, last_extent = 0;
+ int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ int data_offset_in_page;
+ int block_len_in_page;
+ int uninit;
+
+- /* orig and donor should be different file */
+- if (orig_inode->i_ino == donor_inode->i_ino) {
++ if (orig_inode->i_sb != donor_inode->i_sb) {
++ ext4_debug("ext4 move extent: The argument files "
++ "should be in same FS [ino:orig %lu, donor %lu]\n",
++ orig_inode->i_ino, donor_inode->i_ino);
++ return -EINVAL;
++ }
++
++ /* orig and donor should be different inodes */
++ if (orig_inode == donor_inode) {
+ ext4_debug("ext4 move extent: The argument files should not "
+- "be same file [ino:orig %lu, donor %lu]\n",
++ "be same inode [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+@@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+-
++ /* TODO: This is non obvious task to swap blocks for inodes with full
++ jornaling enabled */
++ if (ext4_should_journal_data(orig_inode) ||
++ ext4_should_journal_data(donor_inode)) {
++ return -EINVAL;
++ }
+ /* Protect orig and donor inodes against a truncate */
+- ret1 = mext_inode_double_lock(orig_inode, donor_inode);
+- if (ret1 < 0)
+- return ret1;
++ mext_inode_double_lock(orig_inode, donor_inode);
+
+ /* Protect extent tree against block allocations via delalloc */
+ double_down_write_data_sem(orig_inode, donor_inode);
+ /* Check the filesystem environment whether move_extent can be done */
+- ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
++ ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
+ donor_start, &len);
+- if (ret1)
++ if (ret)
+ goto out;
+
+ file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
+@@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ if (file_end < block_end)
+ len -= block_end - file_end;
+
+- ret1 = get_ext_path(orig_inode, block_start, &orig_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, block_start, &orig_path);
++ if (ret)
+ goto out;
+
+ /* Get path structure to check the hole */
+- ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, block_start, &holecheck_path);
++ if (ret)
+ goto out;
+
+ depth = ext_depth(orig_inode);
+@@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ last_extent = mext_next_extent(orig_inode,
+ holecheck_path, &ext_cur);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ goto out;
+ }
+ last_extent = mext_next_extent(orig_inode, orig_path,
+ &ext_dummy);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ goto out;
+ }
+ seq_start = le32_to_cpu(ext_cur->ee_block);
+@@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ if (le32_to_cpu(ext_cur->ee_block) > block_end) {
+ ext4_debug("ext4 move extent: The specified range of file "
+ "may be the hole\n");
+- ret1 = -EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ last_extent = mext_next_extent(orig_inode, holecheck_path,
+ &ext_cur);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ break;
+ }
+ add_blocks = ext4_ext_get_actual_len(ext_cur);
+@@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ orig_page_offset,
+ data_offset_in_page,
+ block_len_in_page, uninit,
+- &ret1);
++ &ret);
+
+ /* Count how many blocks we have exchanged */
+ *moved_len += block_len_in_page;
+- if (ret1 < 0)
++ if (ret < 0)
+ break;
+ if (*moved_len > len) {
+ EXT4_ERROR_INODE(orig_inode,
+ "We replaced blocks too much! "
+ "sum of replaced: %llu requested: %llu",
+ *moved_len, len);
+- ret1 = -EIO;
++ ret = -EIO;
+ break;
+ }
+
+@@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ }
+
+ double_down_write_data_sem(orig_inode, donor_inode);
+- if (ret1 < 0)
++ if (ret < 0)
+ break;
+
+ /* Decrease buffer counter */
+ if (holecheck_path)
+ ext4_ext_drop_refs(holecheck_path);
+- ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
++ if (ret)
+ break;
+ depth = holecheck_path->p_depth;
+
+ /* Decrease buffer counter */
+ if (orig_path)
+ ext4_ext_drop_refs(orig_path);
+- ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, seq_start, &orig_path);
++ if (ret)
+ break;
+
+ ext_cur = holecheck_path[depth].p_ext;
+@@ -1412,12 +1349,7 @@ out:
+ kfree(holecheck_path);
+ }
+ double_up_write_data_sem(orig_inode, donor_inode);
+- ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
+-
+- if (ret1)
+- return ret1;
+- else if (ret2)
+- return ret2;
++ mext_inode_double_unlock(orig_inode, donor_inode);
+
+- return 0;
++ return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4dd0890..88f97e5 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1801,9 +1801,7 @@ retry:
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, inode->i_mode, rdev);
+-#ifdef CONFIG_EXT4_FS_XATTR
+ inode->i_op = &ext4_special_inode_operations;
+-#endif
+ err = ext4_add_nondir(handle, dentry, inode);
+ }
+ ext4_journal_stop(handle);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 54f5786..13bfa07 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -63,6 +63,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
+ {
+ return test_bit(BDI_writeback_running, &bdi->state);
+ }
++EXPORT_SYMBOL(writeback_in_progress);
+
+ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+ {
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index b09e51d..464cd76 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1032,11 +1032,11 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
+ ops.datbuf = NULL;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+- if (ret || ops.oobretlen != ops.ooblen) {
++ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+ printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
+ " bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
+- if (!ret)
++ if (!ret || mtd_is_bitflip(ret))
+ ret = -EIO;
+ return ret;
+ }
+@@ -1075,11 +1075,11 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
+ ops.datbuf = NULL;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+- if (ret || ops.oobretlen != ops.ooblen) {
++ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+ printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
+ " bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
+- if (!ret)
++ if (!ret || mtd_is_bitflip(ret))
+ ret = -EIO;
+ return ret;
+ }
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 23d7451..df753a1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -40,6 +40,7 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
++ char *nodename;
+ };
+
+ struct nsm_res {
+@@ -93,6 +94,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
++ .nodename = utsname()->nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+@@ -429,7 +431,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, utsname()->nodename);
++ encode_nsm_string(xdr, argp->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index d774309..1aaa0ee 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -164,25 +164,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ return bio;
+ }
+
+-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+- struct parallel_io *par)
++ struct parallel_io *par,
++ unsigned int offset, int len)
+ {
++ isect = isect + (offset >> SECTOR_SHIFT);
++ dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
++ npg, rw, (unsigned long long)isect, offset, len);
+ retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++ if (bio_add_page(bio, page, len, offset) < len) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+ }
+
++static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++ sector_t isect, struct page *page,
++ struct pnfs_block_extent *be,
++ void (*end_io)(struct bio *, int err),
++ struct parallel_io *par)
++{
++ return do_add_page_to_bio(bio, npg, rw, isect, page, be,
++ end_io, par, 0, PAGE_CACHE_SIZE);
++}
++
+ /* This is basically copied from mpage_end_io_read */
+ static void bl_end_io_read(struct bio *bio, int err)
+ {
+@@ -446,6 +460,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+ return;
+ }
+
++static void
++bl_read_single_end_io(struct bio *bio, int error)
++{
++ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++ struct page *page = bvec->bv_page;
++
++ /* Only one page in bvec */
++ unlock_page(page);
++}
++
++static int
++bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int offset, unsigned int len)
++{
++ struct bio *bio;
++ struct page *shadow_page;
++ sector_t isect;
++ char *kaddr, *kshadow_addr;
++ int ret = 0;
++
++ dprintk("%s: offset %u len %u\n", __func__, offset, len);
++
++ shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (shadow_page == NULL)
++ return -ENOMEM;
++
++ bio = bio_alloc(GFP_NOIO, 1);
++ if (bio == NULL)
++ return -ENOMEM;
++
++ isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
++ (offset / SECTOR_SIZE);
++
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = bl_read_single_end_io;
++
++ lock_page(shadow_page);
++ if (bio_add_page(bio, shadow_page,
++ SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
++ unlock_page(shadow_page);
++ bio_put(bio);
++ return -EIO;
++ }
++
++ submit_bio(READ, bio);
++ wait_on_page_locked(shadow_page);
++ if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
++ ret = -EIO;
++ } else {
++ kaddr = kmap_atomic(page);
++ kshadow_addr = kmap_atomic(shadow_page);
++ memcpy(kaddr + offset, kshadow_addr + offset, len);
++ kunmap_atomic(kshadow_addr);
++ kunmap_atomic(kaddr);
++ }
++ __free_page(shadow_page);
++ bio_put(bio);
++
++ return ret;
++}
++
++static int
++bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int dirty_offset, unsigned int dirty_len,
++ bool full_page)
++{
++ int ret = 0;
++ unsigned int start, end;
++
++ if (full_page) {
++ start = 0;
++ end = PAGE_CACHE_SIZE;
++ } else {
++ start = round_down(dirty_offset, SECTOR_SIZE);
++ end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
++ }
++
++ dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
++ if (!be) {
++ zero_user_segments(page, start, dirty_offset,
++ dirty_offset + dirty_len, end);
++ if (start == 0 && end == PAGE_CACHE_SIZE &&
++ trylock_page(page)) {
++ SetPageUptodate(page);
++ unlock_page(page);
++ }
++ return ret;
++ }
++
++ if (start != dirty_offset)
++ ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
++
++ if (!ret && (dirty_offset + dirty_len < end))
++ ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
++ end - dirty_offset - dirty_len);
++
++ return ret;
++}
++
+ /* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+@@ -479,7 +593,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+ SetPageUptodate(page);
+
+ cleanup:
+- bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+@@ -501,6 +614,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+ struct parallel_io *par;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
++ unsigned int pg_offset, pg_len, saved_len;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+@@ -615,10 +729,11 @@ next_page:
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+- isect, NULL);
++ isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ wdata->pnfs_error = -EINVAL;
+ goto out;
+@@ -626,7 +741,26 @@ next_page:
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+- if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
++
++ dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
++ pg_offset = offset & ~PAGE_CACHE_MASK;
++ if (pg_offset + count > PAGE_CACHE_SIZE)
++ pg_len = PAGE_CACHE_SIZE - pg_offset;
++ else
++ pg_len = count;
++
++ saved_len = pg_len;
++ if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
++ !bl_is_sector_init(be->be_inval, isect)) {
++ ret = bl_read_partial_page_sync(pages[i], cow_read,
++ pg_offset, pg_len, true);
++ if (ret) {
++ dprintk("%s bl_read_partial_page_sync fail %d\n",
++ __func__, ret);
++ wdata->pnfs_error = ret;
++ goto out;
++ }
++
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS,
+ NULL);
+@@ -636,15 +770,35 @@ next_page:
+ wdata->pnfs_error = ret;
+ goto out;
+ }
++
++ /* Expand to full page write */
++ pg_offset = 0;
++ pg_len = PAGE_CACHE_SIZE;
++ } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
++ (pg_len & (SECTOR_SIZE - 1))){
++ /* ahh, nasty case. We have to do sync full sector
++ * read-modify-write cycles.
++ */
++ unsigned int saved_offset = pg_offset;
++ ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
++ pg_len, false);
++ pg_offset = round_down(pg_offset, SECTOR_SIZE);
++ pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
++ - pg_offset;
+ }
+- bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
++
++
++ bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ isect, pages[i], be,
+- bl_end_io_write, par);
++ bl_end_io_write, par,
++ pg_offset, pg_len);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
++ offset += saved_len;
++ count -= saved_len;
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+@@ -662,12 +816,10 @@ next_page:
+ }
+
+ write_done:
+- wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+- if (count < wdata->res.count) {
+- wdata->res.count = count;
+- }
++ wdata->res.count = wdata->args.count;
+ out:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
+index 42acf7e..519a9de 100644
+--- a/fs/nfs/blocklayout/blocklayout.h
++++ b/fs/nfs/blocklayout/blocklayout.h
+@@ -40,6 +40,7 @@
+
+ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+ #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+
+ struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 516b7f0..f66439e 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1289,6 +1289,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Partition table too long (%u > %lu)\n", table_len,
+ sb->s_blocksize - sizeof(*lvd));
++ ret = 1;
+ goto out_bh;
+ }
+
+@@ -1333,8 +1334,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ UDF_ID_SPARABLE,
+ strlen(UDF_ID_SPARABLE))) {
+ if (udf_load_sparable_map(sb, map,
+- (struct sparablePartitionMap *)gpm) < 0)
++ (struct sparablePartitionMap *)gpm) < 0) {
++ ret = 1;
+ goto out_bh;
++ }
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_METADATA,
+ strlen(UDF_ID_METADATA))) {
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 7978eec..3e8f2f7 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -188,7 +188,7 @@ struct sp_node {
+
+ struct shared_policy {
+ struct rb_root root;
+- spinlock_t lock;
++ struct mutex mutex;
+ };
+
+ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 67cc215..1874c5e 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1823,7 +1823,6 @@
+ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
+ #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
+ #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
+-#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530
+
+ #define PCI_VENDOR_ID_RADISYS 0x1331
+
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index e5a7b9a..416dcb0 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1353,7 +1353,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ if (!ct || !nf_ct_is_untracked(ct)) {
+- nf_reset(skb);
++ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 6b76d81..a122196 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -292,7 +292,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
+ static int
+ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
+- return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
++ return *rdp->nxttail[RCU_DONE_TAIL +
++ ACCESS_ONCE(rsp->completed) != rdp->completed] &&
++ !rcu_gp_in_progress(rsp);
+ }
+
+ /*
+diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
+index 8b44e7f..85e9da2 100644
+--- a/kernel/sched_stoptask.c
++++ b/kernel/sched_stoptask.c
+@@ -25,8 +25,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
+ {
+ struct task_struct *stop = rq->stop;
+
+- if (stop && stop->on_rq)
++ if (stop && stop->on_rq) {
++ stop->se.exec_start = rq->clock_task;
+ return stop;
++ }
+
+ return NULL;
+ }
+@@ -50,6 +52,21 @@ static void yield_task_stop(struct rq *rq)
+
+ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+ {
++ struct task_struct *curr = rq->curr;
++ u64 delta_exec;
++
++ delta_exec = rq->clock_task - curr->se.exec_start;
++ if (unlikely((s64)delta_exec < 0))
++ delta_exec = 0;
++
++ schedstat_set(curr->se.statistics.exec_max,
++ max(curr->se.statistics.exec_max, delta_exec));
++
++ curr->se.sum_exec_runtime += delta_exec;
++ account_group_exec_runtime(curr, delta_exec);
++
++ curr->se.exec_start = rq->clock_task;
++ cpuacct_charge(curr, delta_exec);
+ }
+
+ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+@@ -58,6 +75,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+
+ static void set_curr_task_stop(struct rq *rq)
+ {
++ struct task_struct *stop = rq->stop;
++
++ stop->se.exec_start = rq->clock_task;
+ }
+
+ static void switched_to_stop(struct rq *rq, struct task_struct *p)
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 481611f..c504302 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
+ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
++ disable_nonboot_cpus();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index b413138..43a19c5 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1726,10 +1726,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
+ *nextp = n;
+ }
+
+-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
++static void cwq_activate_delayed_work(struct work_struct *work)
+ {
+- struct work_struct *work = list_first_entry(&cwq->delayed_works,
+- struct work_struct, entry);
++ struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+ struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
+
+ trace_workqueue_activate_work(work);
+@@ -1738,6 +1737,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+ cwq->nr_active++;
+ }
+
++static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
++{
++ struct work_struct *work = list_first_entry(&cwq->delayed_works,
++ struct work_struct, entry);
++
++ cwq_activate_delayed_work(work);
++}
++
+ /**
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
+ * @cwq: cwq of interest
+@@ -1869,7 +1876,9 @@ __acquires(&gcwq->lock)
+
+ spin_unlock_irq(&gcwq->lock);
+
++ smp_wmb(); /* paired with test_and_set_bit(PENDING) */
+ work_clear_pending(work);
++
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+ trace_workqueue_execute_start(work);
+@@ -2626,6 +2635,18 @@ static int try_to_grab_pending(struct work_struct *work)
+ smp_rmb();
+ if (gcwq == get_work_gcwq(work)) {
+ debug_work_deactivate(work);
++
++ /*
++ * A delayed work item cannot be grabbed directly
++ * because it might have linked NO_COLOR work items
++ * which, if left on the delayed_list, will confuse
++ * cwq->nr_active management later on and cause
++ * stall. Make sure the work item is activated
++ * before grabbing.
++ */
++ if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
++ cwq_activate_delayed_work(work);
++
+ list_del_init(&work->entry);
+ cwq_dec_nr_in_flight(get_work_cwq(work),
+ get_work_color(work),
+diff --git a/lib/gcd.c b/lib/gcd.c
+index f879033..433d89b 100644
+--- a/lib/gcd.c
++++ b/lib/gcd.c
+@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
+
+ if (a < b)
+ swap(a, b);
++
++ if (!b)
++ return a;
+ while ((r = a % b) != 0) {
+ a = b;
+ b = r;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0f897b8..d6c0fdf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2429,8 +2429,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ * from page cache lookup which is in HPAGE_SIZE units.
+ */
+ address = address & huge_page_mask(h);
+- pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+- + (vma->vm_pgoff >> PAGE_SHIFT);
++ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ vma->vm_pgoff;
+ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ /*
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 11b8d47..4c82c21 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -607,24 +607,39 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ return first;
+ }
+
+-/* Apply policy to a single VMA */
+-static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
++/*
++ * Apply policy to a single VMA
++ * This must be called with the mmap_sem held for writing.
++ */
++static int vma_replace_policy(struct vm_area_struct *vma,
++ struct mempolicy *pol)
+ {
+- int err = 0;
+- struct mempolicy *old = vma->vm_policy;
++ int err;
++ struct mempolicy *old;
++ struct mempolicy *new;
+
+ pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff,
+ vma->vm_ops, vma->vm_file,
+ vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+
+- if (vma->vm_ops && vma->vm_ops->set_policy)
++ new = mpol_dup(pol);
++ if (IS_ERR(new))
++ return PTR_ERR(new);
++
++ if (vma->vm_ops && vma->vm_ops->set_policy) {
+ err = vma->vm_ops->set_policy(vma, new);
+- if (!err) {
+- mpol_get(new);
+- vma->vm_policy = new;
+- mpol_put(old);
++ if (err)
++ goto err_out;
+ }
++
++ old = vma->vm_policy;
++ vma->vm_policy = new; /* protected by mmap_sem */
++ mpol_put(old);
++
++ return 0;
++ err_out:
++ mpol_put(new);
+ return err;
+ }
+
+@@ -675,7 +690,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ if (err)
+ goto out;
+ }
+- err = policy_vma(vma, new_pol);
++ err = vma_replace_policy(vma, new_pol);
+ if (err)
+ goto out;
+ }
+@@ -1507,8 +1522,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
+ addr);
+ if (vpol)
+ pol = vpol;
+- } else if (vma->vm_policy)
++ } else if (vma->vm_policy) {
+ pol = vma->vm_policy;
++
++ /*
++ * shmem_alloc_page() passes MPOL_F_SHARED policy with
++ * a pseudo vma whose vma->vm_ops=NULL. Take a reference
++ * count on these policies which will be dropped by
++ * mpol_cond_put() later
++ */
++ if (mpol_needs_cond_ref(pol))
++ mpol_get(pol);
++ }
+ }
+ if (!pol)
+ pol = &default_policy;
+@@ -2032,7 +2057,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+ */
+
+ /* lookup first element intersecting start-end */
+-/* Caller holds sp->lock */
++/* Caller holds sp->mutex */
+ static struct sp_node *
+ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
+ {
+@@ -2096,36 +2121,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+
+ if (!sp->root.rb_node)
+ return NULL;
+- spin_lock(&sp->lock);
++ mutex_lock(&sp->mutex);
+ sn = sp_lookup(sp, idx, idx+1);
+ if (sn) {
+ mpol_get(sn->policy);
+ pol = sn->policy;
+ }
+- spin_unlock(&sp->lock);
++ mutex_unlock(&sp->mutex);
+ return pol;
+ }
+
++static void sp_free(struct sp_node *n)
++{
++ mpol_put(n->policy);
++ kmem_cache_free(sn_cache, n);
++}
++
+ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
+ {
+ pr_debug("deleting %lx-l%lx\n", n->start, n->end);
+ rb_erase(&n->nd, &sp->root);
+- mpol_put(n->policy);
+- kmem_cache_free(sn_cache, n);
++ sp_free(n);
+ }
+
+ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
+ struct mempolicy *pol)
+ {
+- struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
++ struct sp_node *n;
++ struct mempolicy *newpol;
+
++ n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+ if (!n)
+ return NULL;
++
++ newpol = mpol_dup(pol);
++ if (IS_ERR(newpol)) {
++ kmem_cache_free(sn_cache, n);
++ return NULL;
++ }
++ newpol->flags |= MPOL_F_SHARED;
++
+ n->start = start;
+ n->end = end;
+- mpol_get(pol);
+- pol->flags |= MPOL_F_SHARED; /* for unref */
+- n->policy = pol;
++ n->policy = newpol;
++
+ return n;
+ }
+
+@@ -2133,10 +2172,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
+ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
+ unsigned long end, struct sp_node *new)
+ {
+- struct sp_node *n, *new2 = NULL;
++ struct sp_node *n;
++ int ret = 0;
+
+-restart:
+- spin_lock(&sp->lock);
++ mutex_lock(&sp->mutex);
+ n = sp_lookup(sp, start, end);
+ /* Take care of old policies in the same range. */
+ while (n && n->start < end) {
+@@ -2149,16 +2188,14 @@ restart:
+ } else {
+ /* Old policy spanning whole new range. */
+ if (n->end > end) {
++ struct sp_node *new2;
++ new2 = sp_alloc(end, n->end, n->policy);
+ if (!new2) {
+- spin_unlock(&sp->lock);
+- new2 = sp_alloc(end, n->end, n->policy);
+- if (!new2)
+- return -ENOMEM;
+- goto restart;
++ ret = -ENOMEM;
++ goto out;
+ }
+ n->end = start;
+ sp_insert(sp, new2);
+- new2 = NULL;
+ break;
+ } else
+ n->end = start;
+@@ -2169,12 +2206,9 @@ restart:
+ }
+ if (new)
+ sp_insert(sp, new);
+- spin_unlock(&sp->lock);
+- if (new2) {
+- mpol_put(new2->policy);
+- kmem_cache_free(sn_cache, new2);
+- }
+- return 0;
++out:
++ mutex_unlock(&sp->mutex);
++ return ret;
+ }
+
+ /**
+@@ -2192,7 +2226,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
+ int ret;
+
+ sp->root = RB_ROOT; /* empty tree == default mempolicy */
+- spin_lock_init(&sp->lock);
++ mutex_init(&sp->mutex);
+
+ if (mpol) {
+ struct vm_area_struct pvma;
+@@ -2246,7 +2280,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
+ }
+ err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
+ if (err && new)
+- kmem_cache_free(sn_cache, new);
++ sp_free(new);
+ return err;
+ }
+
+@@ -2258,16 +2292,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
+
+ if (!p->root.rb_node)
+ return;
+- spin_lock(&p->lock);
++ mutex_lock(&p->mutex);
+ next = rb_first(&p->root);
+ while (next) {
+ n = rb_entry(next, struct sp_node, nd);
+ next = rb_next(&n->nd);
+- rb_erase(&n->nd, &p->root);
+- mpol_put(n->policy);
+- kmem_cache_free(sn_cache, n);
++ sp_delete(p, n);
+ }
+- spin_unlock(&p->lock);
++ mutex_unlock(&p->mutex);
+ }
+
+ /* assumes fs == KERNEL_DS */
+diff --git a/mm/slab.c b/mm/slab.c
+index cd3ab93..4c3b671 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1669,9 +1669,6 @@ void __init kmem_cache_init_late(void)
+
+ g_cpucache_up = LATE;
+
+- /* Annotate slab for lockdep -- annotate the malloc caches */
+- init_lock_keys();
+-
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+@@ -1679,6 +1676,9 @@ void __init kmem_cache_init_late(void)
+ BUG();
+ mutex_unlock(&cache_chain_mutex);
+
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
+ /* Done! */
+ g_cpucache_up = FULL;
+
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 632b15e..00fb58a 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
+ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
+ return 0;
+
++ clear_page_mlock(page);
++
+ spin_lock_irq(&mapping->tree_lock);
+ if (PageDirty(page))
+ goto failed;
+
+- clear_page_mlock(page);
+ BUG_ON(page_has_private(page));
+ __delete_from_page_cache(page);
+ spin_unlock_irq(&mapping->tree_lock);
+diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+index de9da21..d7d63f4 100644
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -84,6 +84,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ *dataoff = nhoff + (iph->ihl << 2);
+ *protonum = iph->protocol;
+
++ /* Check bogus IP headers */
++ if (*dataoff > skb->len) {
++ pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
++ "nhoff %u, ihl %u, skblen %u\n",
++ nhoff, iph->ihl << 2, skb->len);
++ return -NF_ACCEPT;
++ }
++
+ return NF_ACCEPT;
+ }
+
+diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
+index 78844d9..6609a84 100644
+--- a/net/ipv4/netfilter/nf_nat_sip.c
++++ b/net/ipv4/netfilter/nf_nat_sip.c
+@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
+ hdr, NULL, &matchoff, &matchlen,
+ &addr, &port) > 0) {
+- unsigned int matchend, poff, plen, buflen, n;
++ unsigned int olen, matchend, poff, plen, buflen, n;
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+
+ /* We're only interested in headers related to this
+@@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+ goto next;
+ }
+
++ olen = *datalen;
+ if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
+ &addr, port))
+ return NF_DROP;
+
+- matchend = matchoff + matchlen;
++ matchend = matchoff + matchlen + *datalen - olen;
+
+ /* The maddr= parameter (RFC 2361) specifies where to send
+ * the reply. */
+@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
+ ret = nf_ct_expect_related(rtcp_exp);
+ if (ret == 0)
+ break;
+- else if (ret != -EBUSY) {
++ else if (ret == -EBUSY) {
++ nf_ct_unexpect_related(rtp_exp);
++ continue;
++ } else if (ret < 0) {
+ nf_ct_unexpect_related(rtp_exp);
+ port = 0;
+ break;
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 340c80d..7918eb7 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -366,23 +366,6 @@ static void evict_oldest_expect(struct nf_conn *master,
+ }
+ }
+
+-static inline int refresh_timer(struct nf_conntrack_expect *i)
+-{
+- struct nf_conn_help *master_help = nfct_help(i->master);
+- const struct nf_conntrack_expect_policy *p;
+-
+- if (!del_timer(&i->timeout))
+- return 0;
+-
+- p = &rcu_dereference_protected(
+- master_help->helper,
+- lockdep_is_held(&nf_conntrack_lock)
+- )->expect_policy[i->class];
+- i->timeout.expires = jiffies + p->timeout * HZ;
+- add_timer(&i->timeout);
+- return 1;
+-}
+-
+ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ {
+ const struct nf_conntrack_expect_policy *p;
+@@ -390,7 +373,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ struct nf_conn *master = expect->master;
+ struct nf_conn_help *master_help = nfct_help(master);
+ struct net *net = nf_ct_exp_net(expect);
+- struct hlist_node *n;
++ struct hlist_node *n, *next;
+ unsigned int h;
+ int ret = 1;
+
+@@ -401,12 +384,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ goto out;
+ }
+ h = nf_ct_expect_dst_hash(&expect->tuple);
+- hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
++ hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
+ if (expect_matches(i, expect)) {
+- /* Refresh timer: if it's dying, ignore.. */
+- if (refresh_timer(i)) {
+- ret = 0;
+- goto out;
++ if (del_timer(&i->timeout)) {
++ nf_ct_unlink_expect(i);
++ nf_ct_expect_put(i);
++ break;
+ }
+ } else if (expect_clash(i, expect)) {
+ ret = -EBUSY;
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index dfd52ba..8f3f280 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -389,8 +389,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+
+ /* Precision saver. */
+-static inline u_int32_t
+-user2credits(u_int32_t user)
++static u32 user2credits(u32 user)
+ {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+@@ -400,7 +399,7 @@ user2credits(u_int32_t user)
+ return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
+ }
+
+-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
++static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+ {
+ dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
+ if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
+@@ -531,8 +530,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ dh->rateinfo.prev = jiffies;
+ dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+ hinfo->cfg.burst);
+- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
+- hinfo->cfg.burst);
++ dh->rateinfo.credit_cap = dh->rateinfo.credit;
+ dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ } else {
+ /* update expiration timeout */
+diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
+index 32b7a57..a4c1e45 100644
+--- a/net/netfilter/xt_limit.c
++++ b/net/netfilter/xt_limit.c
+@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ }
+
+ /* Precision saver. */
+-static u_int32_t
+-user2credits(u_int32_t user)
++static u32 user2credits(u32 user)
+ {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+@@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
+
+ /* For SMP, we only want to use one set of state. */
+ r->master = priv;
++ /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
++ 128. */
++ priv->prev = jiffies;
++ priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
+ if (r->cost == 0) {
+- /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
+- 128. */
+- priv->prev = jiffies;
+- priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
+- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
++ r->credit_cap = priv->credit; /* Credits full. */
+ r->cost = user2credits(r->avg);
+ }
+ return 0;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index c5391af..10a385b 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1028,6 +1028,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
++/*
++ * Helper function to force a TCP close if the server is sending
++ * junk and/or it has put us in CLOSE_WAIT
++ */
++static void xs_tcp_force_close(struct rpc_xprt *xprt)
++{
++ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
++ xprt_force_disconnect(xprt);
++}
++
+ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1054,7 +1064,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
+ /* Sanity check of the record length */
+ if (unlikely(transport->tcp_reclen < 8)) {
+ dprintk("RPC: invalid TCP record fragment length\n");
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ return;
+ }
+ dprintk("RPC: reading TCP record fragment of length %d\n",
+@@ -1135,7 +1145,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+- xprt_force_disconnect(&transport->xprt);
++ xs_tcp_force_close(&transport->xprt);
+ }
+ xs_tcp_check_fraghdr(transport);
+ }
+@@ -1458,6 +1468,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
++ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
++ clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
+@@ -1515,8 +1527,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ break;
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+- xprt_force_disconnect(xprt);
+ xprt->connect_cookie++;
++ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+ * If the server closed down the connection, make sure that
+@@ -2159,8 +2171,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ /* We're probably in TIME_WAIT. Get rid of existing socket,
+ * and retry
+ */
+- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ break;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
+index d897278..978416d 100644
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -98,24 +98,24 @@ try-run = $(shell set -e; \
+ # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
+
+ as-option = $(call try-run,\
+- $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
+
+ # as-instr
+ # Usage: cflags-y += $(call as-instr,instr,option1,option2)
+
+ as-instr = $(call try-run,\
+- /bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
++ printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+
+ # cc-option
+ # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+
+ cc-option = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+
+ # cc-option-yn
+ # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
+ cc-option-yn = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+
+ # cc-option-align
+ # Prefix align with either -falign or -malign
+@@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
+ # cc-disable-warning
+ # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
+ cc-disable-warning = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+
+ # cc-version
+ # Usage gcc-ver := $(call cc-version)
+@@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
+ # cc-ldoption
+ # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ cc-ldoption = $(call try-run,\
+- $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+
+ # ld-option
+ # Usage: LDFLAGS += $(call ld-option, -X)
+@@ -209,7 +209,7 @@ endif
+ # >$< substitution to preserve $ when reloading .cmd file
+ # note: when using inline perl scripts [perl -e '...$$t=1;...']
+ # in $(cmd_xxx) double $$ your perl vars
+-make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
++make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
+
+ # Find any prerequisites that is newer than target or that does not exist.
+ # PHONY targets skipped in both cases.
+diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
+index debecb5..7f2126d 100644
+--- a/scripts/gcc-version.sh
++++ b/scripts/gcc-version.sh
+@@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
+ exit 1
+ fi
+
+-MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
+-MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
++MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
++MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
+ if [ "x$with_patchlevel" != "x" ] ; then
+- PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
++ PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
+ printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
+ else
+ printf "%02d%02d\\n" $MAJOR $MINOR
+diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
+index 29493dc..12dbd0b 100644
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index afaec61..973e8c1 100644
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
+index fa59cbf..854d9c7 100755
+--- a/scripts/kconfig/check.sh
++++ b/scripts/kconfig/check.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ # Needed for systems without gettext
+-$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
++$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
+ #include <libintl.h>
+ int main()
+ {
+diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
+index 82cc3a8..50df490 100644
+--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
++++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
+@@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15
+
+ # Check if we can link to ncurses
+ check() {
+- $cc -xc - -o $tmp 2>/dev/null <<'EOF'
++ $cc -x c - -o $tmp 2>/dev/null <<'EOF'
+ #include CURSES_LOC
+ main() {}
+ EOF
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index bccf07dd..3346f42 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -463,6 +463,8 @@ while(<CIN>) {
+ if (defined($configs{$1})) {
+ if ($localyesconfig) {
+ $setconfigs{$1} = 'y';
++ print "$1=y\n";
++ next;
+ } else {
+ $setconfigs{$1} = $2;
+ }
+diff --git a/scripts/package/buildtar b/scripts/package/buildtar
+index 8a7b155..d0d748e 100644
+--- a/scripts/package/buildtar
++++ b/scripts/package/buildtar
+@@ -109,7 +109,7 @@ esac
+ if tar --owner=root --group=root --help >/dev/null 2>&1; then
+ opts="--owner=root --group=root"
+ fi
+- tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
++ tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
+ )
+
+ echo "Tarball successfully created in ${tarball}${file_ext}"
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index d83bafc..193ce81 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -119,6 +119,7 @@ struct loopback_pcm {
+ unsigned int period_size_frac;
+ unsigned long last_jiffies;
+ struct timer_list timer;
++ spinlock_t timer_lock;
+ };
+
+ static struct platform_device *devices[SNDRV_CARDS];
+@@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
+ unsigned long tick;
+ unsigned int rate_shift = get_rate_shift(dpcm);
+
++ spin_lock(&dpcm->timer_lock);
+ if (rate_shift != dpcm->pcm_rate_shift) {
+ dpcm->pcm_rate_shift = rate_shift;
+ dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
+@@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
+ tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
+ dpcm->timer.expires = jiffies + tick;
+ add_timer(&dpcm->timer);
++ spin_unlock(&dpcm->timer_lock);
+ }
+
+ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
+ {
++ spin_lock(&dpcm->timer_lock);
+ del_timer(&dpcm->timer);
+ dpcm->timer.expires = 0;
++ spin_unlock(&dpcm->timer_lock);
+ }
+
+ #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
+@@ -659,6 +664,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ dpcm->substream = substream;
+ setup_timer(&dpcm->timer, loopback_timer_function,
+ (unsigned long)dpcm);
++ spin_lock_init(&dpcm->timer_lock);
+
+ cable = loopback->cables[substream->number][dev];
+ if (!cable) {
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 402f330..94f0c4a 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -139,6 +139,7 @@ struct conexant_spec {
+ unsigned int asus:1;
+ unsigned int pin_eapd_ctrls:1;
+ unsigned int single_adc_amp:1;
++ unsigned int fixup_stereo_dmic:1;
+
+ unsigned int adc_switching:1;
+
+@@ -4113,9 +4114,9 @@ static int cx_auto_init(struct hda_codec *codec)
+
+ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ const char *dir, int cidx,
+- hda_nid_t nid, int hda_dir, int amp_idx)
++ hda_nid_t nid, int hda_dir, int amp_idx, int chs)
+ {
+- static char name[32];
++ static char name[44];
+ static struct snd_kcontrol_new knew[] = {
+ HDA_CODEC_VOLUME(name, 0, 0, 0),
+ HDA_CODEC_MUTE(name, 0, 0, 0),
+@@ -4125,7 +4126,7 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+
+ for (i = 0; i < 2; i++) {
+ struct snd_kcontrol *kctl;
+- knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
++ knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, chs, amp_idx,
+ hda_dir);
+ knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
+ knew[i].index = cidx;
+@@ -4144,7 +4145,7 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ }
+
+ #define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \
+- cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
++ cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0, 3)
+
+ #define cx_auto_add_pb_volume(codec, nid, str, idx) \
+ cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
+@@ -4214,6 +4215,36 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
+ return 0;
+ }
+
++/* Returns zero if this is a normal stereo channel, and non-zero if it should
++ be split in two independent channels.
++ dest_label must be at least 44 characters. */
++static int cx_auto_get_rightch_label(struct hda_codec *codec, const char *label,
++ char *dest_label, int nid)
++{
++ struct conexant_spec *spec = codec->spec;
++ int i;
++
++ if (!spec->fixup_stereo_dmic)
++ return 0;
++
++ for (i = 0; i < AUTO_CFG_MAX_INS; i++) {
++ int def_conf;
++ if (spec->autocfg.inputs[i].pin != nid)
++ continue;
++
++ if (spec->autocfg.inputs[i].type != AUTO_PIN_MIC)
++ return 0;
++ def_conf = snd_hda_codec_get_pincfg(codec, nid);
++ if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT)
++ return 0;
++
++ /* Finally found the inverted internal mic! */
++ snprintf(dest_label, 44, "Inverted %s", label);
++ return 1;
++ }
++ return 0;
++}
++
+ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
+ const char *label, const char *pfx,
+ int cidx)
+@@ -4222,14 +4253,25 @@ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
+ int i;
+
+ for (i = 0; i < spec->num_adc_nids; i++) {
++ char rightch_label[44];
+ hda_nid_t adc_nid = spec->adc_nids[i];
+ int idx = get_input_connection(codec, adc_nid, nid);
+ if (idx < 0)
+ continue;
+ if (spec->single_adc_amp)
+ idx = 0;
++
++ if (cx_auto_get_rightch_label(codec, label, rightch_label, nid)) {
++ /* Make two independent kcontrols for left and right */
++ int err = cx_auto_add_volume_idx(codec, label, pfx,
++ cidx, adc_nid, HDA_INPUT, idx, 1);
++ if (err < 0)
++ return err;
++ return cx_auto_add_volume_idx(codec, rightch_label, pfx,
++ cidx, adc_nid, HDA_INPUT, idx, 2);
++ }
+ return cx_auto_add_volume_idx(codec, label, pfx,
+- cidx, adc_nid, HDA_INPUT, idx);
++ cidx, adc_nid, HDA_INPUT, idx, 3);
+ }
+ return 0;
+ }
+@@ -4242,9 +4284,19 @@ static int cx_auto_add_boost_volume(struct hda_codec *codec, int idx,
+ int i, con;
+
+ nid = spec->imux_info[idx].pin;
+- if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
++ if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) {
++ char rightch_label[44];
++ if (cx_auto_get_rightch_label(codec, label, rightch_label, nid)) {
++ int err = cx_auto_add_volume_idx(codec, label, " Boost",
++ cidx, nid, HDA_INPUT, 0, 1);
++ if (err < 0)
++ return err;
++ return cx_auto_add_volume_idx(codec, rightch_label, " Boost",
++ cidx, nid, HDA_INPUT, 0, 2);
++ }
+ return cx_auto_add_volume(codec, label, " Boost", cidx,
+ nid, HDA_INPUT);
++ }
+ con = __select_input_connection(codec, spec->imux_info[idx].adc, nid,
+ &mux, false, 0);
+ if (con < 0)
+@@ -4398,23 +4450,31 @@ static void apply_pincfg(struct hda_codec *codec, const struct cxt_pincfg *cfg)
+
+ }
+
+-static void apply_pin_fixup(struct hda_codec *codec,
++enum {
++ CXT_PINCFG_LENOVO_X200,
++ CXT_PINCFG_LENOVO_TP410,
++ CXT_FIXUP_STEREO_DMIC,
++};
++
++static void apply_fixup(struct hda_codec *codec,
+ const struct snd_pci_quirk *quirk,
+ const struct cxt_pincfg **table)
+ {
++ struct conexant_spec *spec = codec->spec;
++
+ quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+- if (quirk) {
++ if (quirk && table[quirk->value]) {
+ snd_printdd(KERN_INFO "hda_codec: applying pincfg for %s\n",
+ quirk->name);
+ apply_pincfg(codec, table[quirk->value]);
+ }
++ if (quirk->value == CXT_FIXUP_STEREO_DMIC) {
++ snd_printdd(KERN_INFO "hda_codec: applying internal mic workaround for %s\n",
++ quirk->name);
++ spec->fixup_stereo_dmic = 1;
++ }
+ }
+
+-enum {
+- CXT_PINCFG_LENOVO_X200,
+- CXT_PINCFG_LENOVO_TP410,
+-};
+-
+ /* ThinkPad X200 & co with cxt5051 */
+ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
+ { 0x16, 0x042140ff }, /* HP (seq# overridden) */
+@@ -4434,6 +4494,7 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
+ static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
+ [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
+ [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
++ [CXT_FIXUP_STEREO_DMIC] = NULL,
+ };
+
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
+@@ -4447,6 +4508,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ {}
+ };
+
+@@ -4486,10 +4550,10 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ break;
+ case 0x14f15051:
+ add_cx5051_fake_mutes(codec);
+- apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
++ apply_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
+ break;
+ default:
+- apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
++ apply_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
+ break;
+ }
+
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 323d4d9..0961d88 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -348,7 +348,7 @@ int main(void)
+ fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
+ if (fd < 0) {
+ syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ addr.nl_family = AF_NETLINK;
+ addr.nl_pad = 0;
+@@ -360,7 +360,7 @@ int main(void)
+ if (error < 0) {
+ syslog(LOG_ERR, "bind failed; error:%d", error);
+ close(fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ sock_opt = addr.nl_groups;
+ setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt));
+@@ -378,7 +378,7 @@ int main(void)
+ if (len < 0) {
+ syslog(LOG_ERR, "netlink_send failed; error:%d", len);
+ close(fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+
+ pfd.fd = fd;
+@@ -497,7 +497,7 @@ int main(void)
+ len = netlink_send(fd, incoming_cn_msg);
+ if (len < 0) {
+ syslog(LOG_ERR, "net_link send failed; error:%d", len);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ }
+
+diff --git a/tools/perf/Makefile b/tools/perf/Makefile
+index b98e307..e45d2b1 100644
+--- a/tools/perf/Makefile
++++ b/tools/perf/Makefile
+@@ -56,7 +56,7 @@ ifeq ($(ARCH),x86_64)
+ ARCH := x86
+ IS_X86_64 := 0
+ ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
+- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
++ IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
+ endif
+ ifeq (${IS_X86_64}, 1)
+ RAW_ARCH := x86_64
+diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
+index e8a03ac..7db8da5 100644
+--- a/tools/power/cpupower/Makefile
++++ b/tools/power/cpupower/Makefile
+@@ -100,7 +100,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo po/$$HLANG.gmo; done;}
+ export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
+
+ # check if compiler option is supported
+-cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
++cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+
+ # use '-Os' optimization if available, else use -O2
+ OPTIMIZATION := $(call cc-supports,-Os,-O2)
diff --git a/3.2.31/4420_grsecurity-2.9.1-3.2.31-201210121914.patch b/3.2.32/4420_grsecurity-2.9.1-3.2.32-201210231935.patch
index a84b893..23c9278 100644
--- a/3.2.31/4420_grsecurity-2.9.1-3.2.31-201210121914.patch
+++ b/3.2.32/4420_grsecurity-2.9.1-3.2.32-201210231935.patch
@@ -255,7 +255,7 @@ index 88fd7f5..b318a78 100644
==============================================================
diff --git a/Makefile b/Makefile
-index fd9c414..079c310 100644
+index b6d8282..f804e48 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -11478,7 +11478,7 @@ index cb00ccc..17e9054 100644
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 18601c8..8b0f075 100644
+index 884507e..195f10f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
@@ -11541,7 +11541,7 @@ index 18601c8..8b0f075 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -195,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -11572,7 +11572,7 @@ index 18601c8..8b0f075 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -389,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -11588,7 +11588,7 @@ index 18601c8..8b0f075 100644
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
-@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -565,7 +634,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -11597,7 +11597,7 @@ index 18601c8..8b0f075 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -588,7 +657,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -11611,7 +11611,7 @@ index 18601c8..8b0f075 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
+@@ -599,6 +673,20 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -11632,7 +11632,7 @@ index 18601c8..8b0f075 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+@@ -763,11 +851,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -30594,7 +30594,7 @@ index 85661b0..c784559a 100644
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
-index 4799393..37bd3ab 100644
+index b97d4f0..7578a4d 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
@@ -31132,7 +31132,7 @@ index c364358..317c8de 100644
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-index a6c2f7a..0eea25d 100644
+index 1202198..6487397 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
@@ -31144,7 +31144,7 @@ index a6c2f7a..0eea25d 100644
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
-@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+@@ -883,9 +883,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
@@ -31157,7 +31157,7 @@ index a6c2f7a..0eea25d 100644
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index c8b5bc1..fee4e34 100644
+index 2812d7b..c35ade7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
@@ -31206,7 +31206,7 @@ index c8b5bc1..fee4e34 100644
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 6c3fb44..d49f3ac 100644
+index adac0dd..65f8049 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2196,7 +2196,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
@@ -31218,19 +31218,16 @@ index 6c3fb44..d49f3ac 100644
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
-@@ -6950,9 +6950,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+@@ -6971,7 +6971,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
- atomic_clear_mask(1 << intel_crtc->plane,
-- &obj->pending_flip.counter);
-- if (atomic_read(&obj->pending_flip) == 0)
-+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
-+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
- wake_up(&dev_priv->pending_flip_queue);
-
- schedule_work(&work->work);
-@@ -7147,7 +7146,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
++ atomic_clear_mask_unchecked(1 << intel_crtc->plane,
+ &obj->pending_flip.counter);
+
+ wake_up(&dev_priv->pending_flip_queue);
+@@ -7167,7 +7167,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
@@ -31245,7 +31242,7 @@ index 6c3fb44..d49f3ac 100644
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
-@@ -7279,7 +7284,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+@@ -7299,7 +7305,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
@@ -31254,7 +31251,7 @@ index 6c3fb44..d49f3ac 100644
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
-@@ -7293,7 +7298,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+@@ -7313,7 +7319,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
@@ -36018,10 +36015,10 @@ index 49b549f..13d648c 100644
mac->phydev = phydev;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
-index ed1be8a..268eb7f 100644
+index 4b43bc5..32d9a1b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
-@@ -702,17 +702,17 @@ struct rtl8169_private {
+@@ -704,17 +704,17 @@ struct rtl8169_private {
struct mdio_ops {
void (*write)(void __iomem *, int, int);
int (*read)(void __iomem *, int);
@@ -37079,7 +37076,7 @@ index 2275162..95f1a92 100644
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index dfee1b3..a454fb6 100644
+index 9005380..c497080 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -37536,7 +37533,7 @@ index ee77a58..af9d518 100644
/* These three are default values which can be overridden */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
-index be9aad8..140879f 100644
+index 22523aa..52f4965 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
@@ -37548,7 +37545,7 @@ index be9aad8..140879f 100644
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-@@ -2986,7 +2986,7 @@ static void start_io(struct ctlr_info *h)
+@@ -3016,7 +3016,7 @@ static void start_io(struct ctlr_info *h)
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
@@ -37557,7 +37554,7 @@ index be9aad8..140879f 100644
dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
-@@ -2996,7 +2996,7 @@ static void start_io(struct ctlr_info *h)
+@@ -3026,7 +3026,7 @@ static void start_io(struct ctlr_info *h)
h->Qdepth--;
/* Tell the controller execute command */
@@ -37566,7 +37563,7 @@ index be9aad8..140879f 100644
/* Put job onto the completed Q */
addQ(&h->cmpQ, c);
-@@ -3005,17 +3005,17 @@ static void start_io(struct ctlr_info *h)
+@@ -3035,17 +3035,17 @@ static void start_io(struct ctlr_info *h)
static inline unsigned long get_next_completion(struct ctlr_info *h)
{
@@ -37587,7 +37584,7 @@ index be9aad8..140879f 100644
(h->interrupts_enabled == 0);
}
-@@ -3914,7 +3914,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+@@ -3945,7 +3945,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
if (prod_index < 0)
return -ENODEV;
h->product_name = products[prod_index].product_name;
@@ -37596,7 +37593,7 @@ index be9aad8..140879f 100644
if (hpsa_board_disabled(h->pdev)) {
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
-@@ -4159,7 +4159,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
+@@ -4190,7 +4190,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
assert_spin_locked(&lockup_detector_lock);
remove_ctlr_from_lockup_detector_list(h);
@@ -37605,7 +37602,7 @@ index be9aad8..140879f 100644
spin_lock_irqsave(&h->lock, flags);
h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
spin_unlock_irqrestore(&h->lock, flags);
-@@ -4337,7 +4337,7 @@ reinit_after_soft_reset:
+@@ -4366,7 +4366,7 @@ reinit_after_soft_reset:
}
/* make sure the board interrupts are off */
@@ -37614,7 +37611,7 @@ index be9aad8..140879f 100644
if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
goto clean2;
-@@ -4371,7 +4371,7 @@ reinit_after_soft_reset:
+@@ -4400,7 +4400,7 @@ reinit_after_soft_reset:
* fake ones to scoop up any residual completions.
*/
spin_lock_irqsave(&h->lock, flags);
@@ -37623,7 +37620,7 @@ index be9aad8..140879f 100644
spin_unlock_irqrestore(&h->lock, flags);
free_irq(h->intr[h->intr_mode], h);
rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
-@@ -4390,9 +4390,9 @@ reinit_after_soft_reset:
+@@ -4419,9 +4419,9 @@ reinit_after_soft_reset:
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
"Waiting for stale completions to drain.\n");
@@ -37635,7 +37632,7 @@ index be9aad8..140879f 100644
rc = controller_reset_failed(h->cfgtable);
if (rc)
-@@ -4413,7 +4413,7 @@ reinit_after_soft_reset:
+@@ -4442,7 +4442,7 @@ reinit_after_soft_reset:
}
/* Turn the interrupts on so we can service requests */
@@ -37644,7 +37641,7 @@ index be9aad8..140879f 100644
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
-@@ -4465,7 +4465,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+@@ -4494,7 +4494,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
* To write all data in the battery backed cache to disks
*/
hpsa_flush_cache(h);
@@ -37653,7 +37650,7 @@ index be9aad8..140879f 100644
free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
-@@ -4629,7 +4629,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+@@ -4658,7 +4658,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
return;
}
/* Change the access methods to the performant access methods */
@@ -37663,7 +37660,7 @@ index be9aad8..140879f 100644
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
-index 91edafb..a9b88ec 100644
+index c721509..8be5717 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -73,7 +73,7 @@ struct ctlr_info {
@@ -38214,7 +38211,7 @@ index 6c4b620..78feefb 100644
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
-index bb7c482..7551a95 100644
+index 08d48a3..2be990d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
@@ -38788,7 +38785,7 @@ index ed147c4..94fc3c6 100644
/* core tmem accessor functions */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
-index 2ff1255..fa7cfed 100644
+index f35cb10..187b92a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1351,7 +1351,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
@@ -39134,10 +39131,10 @@ index ef92869..f4ebd88 100644
ipwireless_disassociate_network_ttys(network,
ttyj->channel_idx);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
-index fc7bbba..9527e93 100644
+index d190269..f59727e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
-@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+@@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
kref_init(&dlci->ref);
mutex_init(&dlci->mutex);
dlci->fifo = &dlci->_fifo;
@@ -39147,10 +39144,10 @@ index fc7bbba..9527e93 100644
return NULL;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index 39d6ab6..eb97f41 100644
+index 8481aae..e1a589c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
-@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+@@ -2124,6 +2124,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = tty_ldisc_N_TTY;
ops->owner = NULL;
@@ -43187,7 +43184,7 @@ index a6395bd..f1e376a 100644
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
#ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 6ff96c6..4654354 100644
+index 8dd615c..ea0baaa 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -32,6 +32,7 @@
@@ -43855,7 +43852,7 @@ index 6ff96c6..4654354 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1862,14 +2309,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -1851,14 +2298,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -43872,7 +43869,7 @@ index 6ff96c6..4654354 100644
return size;
}
-@@ -1963,7 +2410,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1952,7 +2399,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -43881,7 +43878,7 @@ index 6ff96c6..4654354 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -1977,10 +2424,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1966,10 +2413,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -43894,7 +43891,7 @@ index 6ff96c6..4654354 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -1994,7 +2443,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1983,7 +2432,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -43903,7 +43900,7 @@ index 6ff96c6..4654354 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2005,6 +2454,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1994,6 +2443,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -43911,7 +43908,7 @@ index 6ff96c6..4654354 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2029,7 +2479,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2018,7 +2468,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -43920,7 +43917,7 @@ index 6ff96c6..4654354 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2038,6 +2488,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2027,6 +2477,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -43928,7 +43925,7 @@ index 6ff96c6..4654354 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2055,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2044,6 +2495,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -43936,7 +43933,7 @@ index 6ff96c6..4654354 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2075,6 +2527,97 @@ out:
+@@ -2064,6 +2516,97 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -44965,10 +44962,10 @@ index f3a257d..715ac0f 100644
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index 7c7556b..4e6f039 100644
+index a9be90d..3cf866c 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
-@@ -696,7 +696,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+@@ -705,7 +705,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
old_fs = get_fs();
set_fs(get_ds());
rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
@@ -44977,7 +44974,7 @@ index 7c7556b..4e6f039 100644
lower_bufsiz);
set_fs(old_fs);
if (rc < 0)
-@@ -742,7 +742,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -751,7 +751,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
}
old_fs = get_fs();
set_fs(get_ds());
@@ -44986,7 +44983,7 @@ index 7c7556b..4e6f039 100644
set_fs(old_fs);
if (rc < 0) {
kfree(buf);
-@@ -757,7 +757,7 @@ out:
+@@ -766,7 +766,7 @@ out:
static void
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
@@ -47850,7 +47847,7 @@ index e513f19..2ab1351 100644
jffs2_prealloc_raw_node_refs(c, jeb, 1);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
-index b09e51d..e482afa 100644
+index 464cd76..3a3ed7e 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
@@ -48617,7 +48614,7 @@ index ca4913a..8d4cf9e 100644
error = lock_mount(&old);
if (error)
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
-index d774309..198ec0a 100644
+index 1aaa0ee..c5cc5bd 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -92,7 +92,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
@@ -52213,10 +52210,10 @@ index 0000000..1b9afa9
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..34026bb
+index 0000000..ddf281c
--- /dev/null
+++ b/grsecurity/gracl.c
-@@ -0,0 +1,4190 @@
+@@ -0,0 +1,4202 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -53694,6 +53691,7 @@ index 0000000..34026bb
+copy_user_acl(struct gr_arg *arg)
+{
+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++ struct acl_subject_label *subj_list;
+ struct sprole_pw *sptmp;
+ struct gr_hash_struct *ghash;
+ uid_t *domainlist;
@@ -53822,14 +53820,21 @@ index 0000000..34026bb
+ r_tmp->subj_hash_size *
+ sizeof (struct acl_subject_label *));
+
-+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
-+
-+ if (err)
-+ return err;
++ /* acquire the list of subjects, then NULL out
++ the list prior to parsing the subjects for this role,
++ as during this parsing the list is replaced with a list
++ of *nested* subjects for the role
++ */
++ subj_list = r_tmp->hash->first;
+
+ /* set nested subject list to null */
+ r_tmp->hash->first = NULL;
+
++ err = copy_user_subjs(subj_list, r_tmp);
++
++ if (err)
++ return err;
++
+ insert_acl_role_label(r_tmp);
+ }
+
@@ -54848,8 +54853,9 @@ index 0000000..34026bb
+ matchpo->mode |= GR_DELETED;
+ FOR_EACH_SUBJECT_END(subj,x)
+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+ if (subj->inode == ino && subj->device == dev)
-+ subj->mode |= GR_DELETED;
++ /* nested subjects aren't in the role's subj_hash table */
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
+ FOR_EACH_NESTED_SUBJECT_END(subj)
+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
+ matchps->mode |= GR_DELETED;
@@ -55007,6 +55013,9 @@ index 0000000..34026bb
+ subj->inode = ino;
+ subj->device = dev;
+ }
++ /* nested subjects aren't in the role's subj_hash table */
++ update_acl_obj_label(matchn->inode, matchn->device,
++ ino, dev, subj);
+ FOR_EACH_NESTED_SUBJECT_END(subj)
+ FOR_EACH_SUBJECT_START(role, subj, x)
+ update_acl_obj_label(matchn->inode, matchn->device,
@@ -66104,7 +66113,7 @@ index 10422ef..662570f 100644
fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
-index e5a7b9a..dc75cc1 100644
+index 416dcb0..dc75cc1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -509,7 +509,7 @@ struct ip_vs_conn {
@@ -66125,15 +66134,6 @@ index e5a7b9a..dc75cc1 100644
atomic_t weight; /* server weight */
atomic_t refcnt; /* reference counter */
-@@ -1353,7 +1353,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
- struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-
- if (!ct || !nf_ct_is_untracked(ct)) {
-- nf_reset(skb);
-+ nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
index 69b610a..fe3962c 100644
--- a/include/net/irda/ircomm_core.h
@@ -70525,10 +70525,10 @@ index 764825c..3aa6ac4 100644
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
per_cpu(rcu_torture_count, cpu)[i] = 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
-index 6b76d81..7afc1b3 100644
+index a122196..78d44bb 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
-@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
+@@ -369,9 +369,9 @@ void rcu_enter_nohz(void)
trace_rcu_dyntick("Start");
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
@@ -70540,7 +70540,7 @@ index 6b76d81..7afc1b3 100644
local_irq_restore(flags);
}
-@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
+@@ -393,10 +393,10 @@ void rcu_exit_nohz(void)
return;
}
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
@@ -70553,7 +70553,7 @@ index 6b76d81..7afc1b3 100644
trace_rcu_dyntick("End");
local_irq_restore(flags);
}
-@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
+@@ -413,14 +413,14 @@ void rcu_nmi_enter(void)
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
if (rdtp->dynticks_nmi_nesting == 0 &&
@@ -70571,7 +70571,7 @@ index 6b76d81..7afc1b3 100644
}
/**
-@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
+@@ -439,9 +439,9 @@ void rcu_nmi_exit(void)
return;
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
@@ -70583,7 +70583,7 @@ index 6b76d81..7afc1b3 100644
}
/**
-@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
+@@ -476,7 +476,7 @@ void rcu_irq_exit(void)
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
@@ -70592,7 +70592,7 @@ index 6b76d81..7afc1b3 100644
return 0;
}
-@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+@@ -491,7 +491,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
unsigned int curr;
unsigned int snap;
@@ -70601,7 +70601,7 @@ index 6b76d81..7afc1b3 100644
snap = (unsigned int)rdp->dynticks_snap;
/*
-@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+@@ -1554,7 +1554,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Do RCU core processing for the current CPU.
*/
@@ -71116,7 +71116,7 @@ index 2c71d91..1021f81 100644
struct tasklet_struct *list;
diff --git a/kernel/sys.c b/kernel/sys.c
-index 481611f..4f3d936 100644
+index c504302..b76c328 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
@@ -71132,7 +71132,7 @@ index 481611f..4f3d936 100644
no_nice = security_task_setnice(p, niceval);
if (no_nice) {
error = no_nice;
-@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+@@ -573,6 +579,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
goto error;
}
@@ -71142,7 +71142,7 @@ index 481611f..4f3d936 100644
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old->gid))
new->sgid = new->egid;
-@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+@@ -602,6 +611,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
old = current_cred();
retval = -EPERM;
@@ -71153,7 +71153,7 @@ index 481611f..4f3d936 100644
if (nsown_capable(CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = gid;
else if (gid == old->gid || gid == old->sgid)
-@@ -618,7 +631,7 @@ error:
+@@ -619,7 +632,7 @@ error:
/*
* change the user struct in a credentials set to match the new UID
*/
@@ -71162,7 +71162,7 @@ index 481611f..4f3d936 100644
{
struct user_struct *new_user;
-@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+@@ -689,6 +702,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
goto error;
}
@@ -71172,7 +71172,7 @@ index 481611f..4f3d936 100644
if (new->uid != old->uid) {
retval = set_user(new);
if (retval < 0)
-@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+@@ -733,6 +749,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
old = current_cred();
retval = -EPERM;
@@ -71185,7 +71185,7 @@ index 481611f..4f3d936 100644
if (nsown_capable(CAP_SETUID)) {
new->suid = new->uid = uid;
if (uid != old->uid) {
-@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+@@ -787,6 +809,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
goto error;
}
@@ -71195,7 +71195,7 @@ index 481611f..4f3d936 100644
if (ruid != (uid_t) -1) {
new->uid = ruid;
if (ruid != old->uid) {
-@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+@@ -851,6 +876,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
goto error;
}
@@ -71205,7 +71205,7 @@ index 481611f..4f3d936 100644
if (rgid != (gid_t) -1)
new->gid = rgid;
if (egid != (gid_t) -1)
-@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+@@ -897,6 +925,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
old = current_cred();
old_fsuid = old->fsuid;
@@ -71215,7 +71215,7 @@ index 481611f..4f3d936 100644
if (uid == old->uid || uid == old->euid ||
uid == old->suid || uid == old->fsuid ||
nsown_capable(CAP_SETUID)) {
-@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+@@ -907,6 +938,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
}
}
@@ -71223,7 +71223,7 @@ index 481611f..4f3d936 100644
abort_creds(new);
return old_fsuid;
-@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+@@ -933,12 +965,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
if (gid == old->gid || gid == old->egid ||
gid == old->sgid || gid == old->fsgid ||
nsown_capable(CAP_SETGID)) {
@@ -71240,7 +71240,7 @@ index 481611f..4f3d936 100644
abort_creds(new);
return old_fsgid;
-@@ -1170,13 +1206,13 @@ DECLARE_RWSEM(uts_sem);
+@@ -1171,13 +1207,13 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
@@ -71257,7 +71257,7 @@ index 481611f..4f3d936 100644
int ndots = 0;
unsigned v;
-@@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len)
+@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
@@ -71268,7 +71268,7 @@ index 481611f..4f3d936 100644
ret = copy_to_user(release, buf, len);
}
return ret;
-@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+@@ -1244,19 +1283,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
return -EFAULT;
down_read(&uts_sem);
@@ -71293,7 +71293,7 @@ index 481611f..4f3d936 100644
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
-@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+@@ -1721,7 +1760,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = get_dumpable(me->mm);
break;
case PR_SET_DUMPABLE:
@@ -72160,10 +72160,10 @@ index 209b379..7f76423 100644
put_task_struct(tsk);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index b413138..37b3fa6 100644
+index 43a19c5..c815189 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -3447,7 +3447,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
+@@ -3468,7 +3468,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
*/
worker_flags |= WORKER_REBIND;
worker_flags &= ~WORKER_ROGUE;
@@ -72655,7 +72655,7 @@ index 8f005e9..1cb1036 100644
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 0f897b8..5a74f92 100644
+index d6c0fdf..e87d21d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2461,6 +2461,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -73604,10 +73604,10 @@ index 70f5daf..0964853 100644
* Make sure the vDSO gets into every core dump.
* Dumping its contents makes post-mortem fully interpretable later
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 11b8d47..3792cfe 100644
+index 4c82c21..f0fb9eb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
-@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+@@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long vmstart;
unsigned long vmend;
@@ -73618,8 +73618,8 @@ index 11b8d47..3792cfe 100644
vma = find_vma_prev(mm, start, &prev);
if (!vma || vma->vm_start > start)
return -EFAULT;
-@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
- err = policy_vma(vma, new_pol);
+@@ -693,6 +697,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
+
@@ -73635,7 +73635,7 @@ index 11b8d47..3792cfe 100644
}
out:
-@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+@@ -1126,6 +1140,17 @@ static long do_mbind(unsigned long start, unsigned long len,
if (end < start)
return -EINVAL;
@@ -73653,7 +73653,7 @@ index 11b8d47..3792cfe 100644
if (end == start)
return 0;
-@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1344,6 +1369,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
if (!mm)
goto out;
@@ -73668,7 +73668,7 @@ index 11b8d47..3792cfe 100644
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
-@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1353,8 +1386,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
rcu_read_lock();
tcred = __task_cred(task);
if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
@@ -75811,7 +75811,7 @@ index 7a82174..75d1c8b 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index cd3ab93..3ff7a99 100644
+index 4c3b671..6c47937 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -151,7 +151,7 @@
diff --git a/3.2.31/4430_grsec-remove-localversion-grsec.patch b/3.2.32/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.31/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.32/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.31/4435_grsec-mute-warnings.patch b/3.2.32/4435_grsec-mute-warnings.patch
index e85abd6..e85abd6 100644
--- a/3.2.31/4435_grsec-mute-warnings.patch
+++ b/3.2.32/4435_grsec-mute-warnings.patch
diff --git a/3.2.31/4440_grsec-remove-protected-paths.patch b/3.2.32/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.2.31/4440_grsec-remove-protected-paths.patch
+++ b/3.2.32/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.31/4450_grsec-kconfig-default-gids.patch b/3.2.32/4450_grsec-kconfig-default-gids.patch
index d4b0b7e..d4b0b7e 100644
--- a/3.2.31/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.32/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.31/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.32/4465_selinux-avc_audit-log-curr_ip.patch
index 3ea7bcc..3ea7bcc 100644
--- a/3.2.31/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.32/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.31/4470_disable-compat_vdso.patch b/3.2.32/4470_disable-compat_vdso.patch
index 4742d01..4742d01 100644
--- a/3.2.31/4470_disable-compat_vdso.patch
+++ b/3.2.32/4470_disable-compat_vdso.patch
diff --git a/3.6.2/0000_README b/3.6.3/0000_README
index d690128..4ee0b69 100644
--- a/3.6.2/0000_README
+++ b/3.6.3/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.9.1-3.6.2-201210151829.patch
+Patch: 4420_grsecurity-2.9.1-3.6.3-201210231942.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.6.3/1002_linux-3.6.3.patch b/3.6.3/1002_linux-3.6.3.patch
new file mode 100644
index 0000000..70fa991
--- /dev/null
+++ b/3.6.3/1002_linux-3.6.3.patch
@@ -0,0 +1,3132 @@
+diff --git a/Makefile b/Makefile
+index af5d6a9..6cdadf4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 6
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Terrified Chipmunk
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 2f88d8d..48c19d4 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1413,6 +1413,16 @@ config PL310_ERRATA_769419
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
++config ARM_ERRATA_775420
++ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
++ depends on CPU_V7
++ help
++ This option enables the workaround for the 775420 Cortex-A9 (r2p2,
++ r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
++ operation aborts with MMU exception, it might cause the processor
++ to deadlock. This workaround puts DSB before executing ISB if
++ an abort may occur on cache maintenance.
++
+ endmenu
+
+ source "arch/arm/common/Kconfig"
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 3d5fc41..bf53047 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -28,7 +28,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
++ ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+@@ -52,7 +52,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
++ stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index 39e3fb3..3b17227 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
+ * isn't mapped, fail with -EFAULT.
+ */
+ 9001:
++#ifdef CONFIG_ARM_ERRATA_775420
++ dsb
++#endif
+ mov r0, #-EFAULT
+ mov pc, lr
+ UNWIND(.fnend )
+diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
+index dbf1e03..2bc51fb 100644
+--- a/arch/arm/plat-omap/counter_32k.c
++++ b/arch/arm/plat-omap/counter_32k.c
+@@ -55,22 +55,29 @@ static u32 notrace omap_32k_read_sched_clock(void)
+ * nsecs and adds to a monotonically increasing timespec.
+ */
+ static struct timespec persistent_ts;
+-static cycles_t cycles, last_cycles;
++static cycles_t cycles;
+ static unsigned int persistent_mult, persistent_shift;
++static DEFINE_SPINLOCK(read_persistent_clock_lock);
++
+ static void omap_read_persistent_clock(struct timespec *ts)
+ {
+ unsigned long long nsecs;
+- cycles_t delta;
+- struct timespec *tsp = &persistent_ts;
++ cycles_t last_cycles;
++ unsigned long flags;
++
++ spin_lock_irqsave(&read_persistent_clock_lock, flags);
+
+ last_cycles = cycles;
+ cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
+- delta = cycles - last_cycles;
+
+- nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
++ nsecs = clocksource_cyc2ns(cycles - last_cycles,
++ persistent_mult, persistent_shift);
++
++ timespec_add_ns(&persistent_ts, nsecs);
++
++ *ts = persistent_ts;
+
+- timespec_add_ns(tsp, nsecs);
+- *ts = *tsp;
++ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
+ }
+
+ /**
+diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
+index d272857..579f452 100644
+--- a/arch/mips/ath79/clock.c
++++ b/arch/mips/ath79/clock.c
+@@ -17,6 +17,8 @@
+ #include <linux/err.h>
+ #include <linux/clk.h>
+
++#include <asm/div64.h>
++
+ #include <asm/mach-ath79/ath79.h>
+ #include <asm/mach-ath79/ar71xx_regs.h>
+ #include "common.h"
+@@ -166,11 +168,34 @@ static void __init ar933x_clocks_init(void)
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
+ }
+
++static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
++ u32 frac, u32 out_div)
++{
++ u64 t;
++ u32 ret;
++
++ t = ath79_ref_clk.rate;
++ t *= nint;
++ do_div(t, ref_div);
++ ret = t;
++
++ t = ath79_ref_clk.rate;
++ t *= nfrac;
++ do_div(t, ref_div * frac);
++ ret += t;
++
++ ret /= (1 << out_div);
++ return ret;
++}
++
+ static void __init ar934x_clocks_init(void)
+ {
+- u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
++ u32 pll, out_div, ref_div, nint, nfrac, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
++ void __iomem *dpll_base;
++
++ dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+@@ -178,33 +203,59 @@ static void __init ar934x_clocks_init(void)
+ else
+ ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+- pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+- out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+- ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+- nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_NINT_MASK;
+- frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+-
+- cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+- cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
+- cpu_pll /= (1 << out_div);
+-
+- pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+- out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+- ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+- nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_NINT_MASK;
+- frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+-
+- ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+- ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
+- ddr_pll /= (1 << out_div);
++ pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
++ if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
++ out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
++ AR934X_SRIF_DPLL2_OUTDIV_MASK;
++ pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL1_REG);
++ nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
++ AR934X_SRIF_DPLL1_NINT_MASK;
++ nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
++ ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
++ AR934X_SRIF_DPLL1_REFDIV_MASK;
++ frac = 1 << 18;
++ } else {
++ pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
++ out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
++ ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
++ nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_NINT_MASK;
++ nfrac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
++ frac = 1 << 6;
++ }
++
++ cpu_pll = ar934x_get_pll_freq(ath79_ref_clk.rate, ref_div, nint,
++ nfrac, frac, out_div);
++
++ pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL2_REG);
++ if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
++ out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
++ AR934X_SRIF_DPLL2_OUTDIV_MASK;
++ pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL1_REG);
++ nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
++ AR934X_SRIF_DPLL1_NINT_MASK;
++ nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
++ ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
++ AR934X_SRIF_DPLL1_REFDIV_MASK;
++ frac = 1 << 18;
++ } else {
++ pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
++ out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
++ ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
++ nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_NINT_MASK;
++ nfrac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
++ frac = 1 << 10;
++ }
++
++ ddr_pll = ar934x_get_pll_freq(ath79_ref_clk.rate, ref_div, nint,
++ nfrac, frac, out_div);
+
+ clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+@@ -240,6 +291,8 @@ static void __init ar934x_clocks_init(void)
+
+ ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
++
++ iounmap(dpll_base);
+ }
+
+ void __init ath79_clocks_init(void)
+diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+index dde5044..31a9a7c 100644
+--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+@@ -63,6 +63,8 @@
+
+ #define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+ #define AR934X_WMAC_SIZE 0x20000
++#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
++#define AR934X_SRIF_SIZE 0x1000
+
+ /*
+ * DDR_CTRL block
+@@ -399,4 +401,25 @@
+ #define AR933X_GPIO_COUNT 30
+ #define AR934X_GPIO_COUNT 23
+
++/*
++ * SRIF block
++ */
++#define AR934X_SRIF_CPU_DPLL1_REG 0x1c0
++#define AR934X_SRIF_CPU_DPLL2_REG 0x1c4
++#define AR934X_SRIF_CPU_DPLL3_REG 0x1c8
++
++#define AR934X_SRIF_DDR_DPLL1_REG 0x240
++#define AR934X_SRIF_DDR_DPLL2_REG 0x244
++#define AR934X_SRIF_DDR_DPLL3_REG 0x248
++
++#define AR934X_SRIF_DPLL1_REFDIV_SHIFT 27
++#define AR934X_SRIF_DPLL1_REFDIV_MASK 0x1f
++#define AR934X_SRIF_DPLL1_NINT_SHIFT 18
++#define AR934X_SRIF_DPLL1_NINT_MASK 0x1ff
++#define AR934X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
++
++#define AR934X_SRIF_DPLL2_LOCAL_PLL BIT(30)
++#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
++#define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7
++
+ #endif /* __ASM_MACH_AR71XX_REGS_H */
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index f4546e9..23817a6 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+
++#ifdef CONFIG_KPROBES
++ /*
++ * Return immediately if the kprobes fault notifier has set
++ * DIE_PAGE_FAULT.
++ */
++ if (cmd == DIE_PAGE_FAULT)
++ return NOTIFY_DONE;
++#endif /* CONFIG_KPROBES */
++
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 58790bd..05afcca 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -142,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+ KBUILD_CFLAGS += $(mflags-y)
+ KBUILD_AFLAGS += $(mflags-y)
+
+-archscripts:
++archscripts: scripts_basic
+ $(Q)$(MAKE) $(build)=arch/x86/tools relocs
+
+ ###
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 1fbe75a..c1461de 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -984,7 +984,16 @@ static void xen_write_cr4(unsigned long cr4)
+
+ native_write_cr4(cr4);
+ }
+-
++#ifdef CONFIG_X86_64
++static inline unsigned long xen_read_cr8(void)
++{
++ return 0;
++}
++static inline void xen_write_cr8(unsigned long val)
++{
++ BUG_ON(val);
++}
++#endif
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ int ret;
+@@ -1153,6 +1162,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_cr4_safe = native_read_cr4_safe,
+ .write_cr4 = xen_write_cr4,
+
++#ifdef CONFIG_X86_64
++ .read_cr8 = xen_read_cr8,
++ .write_cr8 = xen_write_cr8,
++#endif
++
+ .wbinvd = native_wbinvd,
+
+ .read_msr = native_read_msr_safe,
+@@ -1161,6 +1175,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+
++ .read_tscp = native_read_tscp,
++
+ .iret = xen_iret,
+ .irq_enable_sysexit = xen_sysexit,
+ #ifdef CONFIG_X86_64
+diff --git a/block/blk-core.c b/block/blk-core.c
+index ee3cb3a..8471fb7 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -696,7 +696,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ q->request_fn = rfn;
+ q->prep_rq_fn = NULL;
+ q->unprep_rq_fn = NULL;
+- q->queue_flags = QUEUE_FLAG_DEFAULT;
++ q->queue_flags |= QUEUE_FLAG_DEFAULT;
+
+ /* Override internal queue lock with supplied lock pointer */
+ if (lock)
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 7edaccc..a51df96 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -71,9 +71,6 @@ enum ec_command {
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+
+-#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
+- per one transaction */
+-
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+ MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
++/*
++ * If the number of false interrupts per one transaction exceeds
++ * this threshold, will think there is a GPE storm happened and
++ * will disable the GPE for normal transaction.
++ */
++static unsigned int ec_storm_threshold __read_mostly = 8;
++module_param(ec_storm_threshold, uint, 0644);
++MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
++
+ /* If we find an EC via the ECDT, we need to keep a ptr to its context */
+ /* External interfaces use first EC only, so remember */
+ typedef int (*acpi_ec_query_func) (void *data);
+@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ msleep(1);
+ /* It is safe to enable the GPE outside of the transaction. */
+ acpi_enable_gpe(NULL, ec->gpe);
+- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
++ } else if (t->irq_count > ec_storm_threshold) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+@@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
++ * the GPE storm threshold back to 20
++ */
++static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
++{
++ pr_debug("Setting the EC GPE storm threshold to 20\n");
++ ec_storm_threshold = 20;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
++ {
++ ec_enlarge_storm_threshold, "CLEVO hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+ {},
+ };
+
+-
+ int __init acpi_ec_ecdt_probe(void)
+ {
+ acpi_status status;
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index 817f0ee..4dc8024 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1186,17 +1186,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+ {
+ struct tpm_chip *chip = file->private_data;
+- size_t in_size = size, out_size;
++ size_t in_size = size;
++ ssize_t out_size;
+
+ /* cannot perform a write until the read has cleared
+- either via tpm_read or a user_read_timer timeout */
+- while (atomic_read(&chip->data_pending) != 0)
+- msleep(TPM_TIMEOUT);
+-
+- mutex_lock(&chip->buffer_mutex);
++ either via tpm_read or a user_read_timer timeout.
++ This also prevents splitted buffered writes from blocking here.
++ */
++ if (atomic_read(&chip->data_pending) != 0)
++ return -EBUSY;
+
+ if (in_size > TPM_BUFSIZE)
+- in_size = TPM_BUFSIZE;
++ return -E2BIG;
++
++ mutex_lock(&chip->buffer_mutex);
+
+ if (copy_from_user
+ (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -1206,6 +1209,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++ if (out_size < 0) {
++ mutex_unlock(&chip->buffer_mutex);
++ return out_size;
++ }
+
+ atomic_set(&chip->data_pending, out_size);
+ mutex_unlock(&chip->buffer_mutex);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 2783f69..f8d2287 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -473,8 +473,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
+ fill_bus_reset_event(&bus_reset, client);
+- ret = copy_to_user(u64_to_uptr(a->bus_reset),
+- &bus_reset, sizeof(bus_reset));
++ /* unaligned size of bus_reset is 36 bytes */
++ ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
+ }
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 274d25d..97d4f4b 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3893,7 +3893,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = drm_irq_install(dev);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a3e53c5..f02cfad 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -513,7 +513,7 @@
+ */
+ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+ #define _3D_CHICKEN3 0x02090
+-#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
++#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+
+ #define MI_MODE 0x0209c
+ # define VS_TIMER_DISPATCH (1 << 6)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 0c7f4aa..b634f6f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4351,7 +4351,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
++ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+@@ -4705,7 +4705,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ /* determine panel color depth */
+ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
++ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 8c73fae..c23c9ea 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3355,8 +3355,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+- I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+- _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
++ I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
++ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 670e991..d16f50f 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -974,11 +974,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+ {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+- if (tmds) {
+- if (tmds->i2c_bus)
+- radeon_i2c_destroy(tmds->i2c_bus);
+- }
++ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 0138a72..a48c215 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3158,7 +3158,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ else {
+ bad_sectors -= (sector - first_bad);
+ if (max_sync > bad_sectors)
+- max_sync = max_sync;
++ max_sync = bad_sectors;
+ continue;
+ }
+ }
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index a11253a..c429abd 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2914,8 +2914,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
++ chip->options |= NAND_NO_READRDY;
+
+ pr_info("ONFI flash detected\n");
+ return 1;
+@@ -3080,9 +3079,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+ }
+- /* Get chip options, preserve non chip based options */
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
++ /* Get chip options */
++ chip->options |= type->options;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index cb3356c..04668b4 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -175,13 +175,13 @@ struct e1000_info;
+ /*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+- * WTHRESH=4, and since we want 64 bytes at a time written back, set
+- * it to 5
++ * WTHRESH=4, so a setting of 5 gives the most efficient bus
++ * utilization but to avoid possible Tx stalls, set it to 1
+ */
+ #define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+- (5 << 16) | /* wthresh must be +1 more than desired */\
++ (1 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index d01a099..a46e75e 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
+ * set up some performance related parameters to encourage the
+ * hardware to use the bus more efficiently in bursts, depends
+ * on the tx_int_delay to be enabled,
+- * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
++ * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
+ * hthresh = 1 ==> prefetch when one or more available
+ * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ * BEWARE: this seems to work but should be considered first if
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index 03c2d8d..cc7e720 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -117,6 +117,7 @@ enum {
+ struct mcs7830_data {
+ u8 multi_filter[8];
+ u8 config;
++ u8 link_counter;
+ };
+
+ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
+@@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+ {
+ u8 *buf = urb->transfer_buffer;
+- bool link;
++ bool link, link_changed;
++ struct mcs7830_data *data = mcs7830_get_data(dev);
+
+ if (urb->actual_length < 16)
+ return;
+
+ link = !(buf[1] & 0x20);
+- if (netif_carrier_ok(dev->net) != link) {
+- if (link) {
+- netif_carrier_on(dev->net);
+- usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+- } else
+- netif_carrier_off(dev->net);
+- netdev_dbg(dev->net, "Link Status is: %d\n", link);
+- }
++ link_changed = netif_carrier_ok(dev->net) != link;
++ if (link_changed) {
++ data->link_counter++;
++ /*
++ track link state 20 times to guard against erroneous
++ link state changes reported sometimes by the chip
++ */
++ if (data->link_counter > 20) {
++ data->link_counter = 0;
++ if (link) {
++ netif_carrier_on(dev->net);
++ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
++ } else
++ netif_carrier_off(dev->net);
++ netdev_dbg(dev->net, "Link Status is: %d\n", link);
++ }
++ } else
++ data->link_counter = 0;
+ }
+
+ static const struct driver_info moschip_info = {
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index 76f07d8..1b48414 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, XMIT, "CABQ TX failed\n");
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a22df74..61e08e6 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -767,7 +767,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+
+ return;
+ exit:
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+
+ static void ath9k_stop(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 0d4155a..423a9f3 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+- struct sk_buff *skb,
+- bool dequeue);
++ struct sk_buff *skb);
+
+ enum {
+ MCS_HT20,
+@@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+- if (bf && fi->retries) {
++ if (!bf) {
++ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
++ if (!bf) {
++ ieee80211_free_txskb(sc->hw, skb);
++ continue;
++ }
++ }
++
++ if (fi->retries) {
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+@@ -785,10 +792,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!fi->bf)
+- bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
++ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+
+- if (!bf)
++ if (!bf) {
++ __skb_unlink(skb, &tid->buf_q);
++ ieee80211_free_txskb(sc->hw, skb);
+ continue;
++ }
+
+ bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
+ seqno = bf->bf_state.seqno;
+@@ -1731,9 +1741,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
+ return;
+ }
+
+- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+- if (!bf)
++ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++ if (!bf) {
++ ieee80211_free_txskb(sc->hw, skb);
+ return;
++ }
+
+ bf->bf_state.bf_type = BUF_AMPDU;
+ INIT_LIST_HEAD(&bf_head);
+@@ -1757,11 +1769,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_buf *bf;
+
+ bf = fi->bf;
+- if (!bf)
+- bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
+-
+- if (!bf)
+- return;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
+@@ -1834,8 +1841,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+- struct sk_buff *skb,
+- bool dequeue)
++ struct sk_buff *skb)
+ {
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_frame_info *fi = get_frame_info(skb);
+@@ -1847,7 +1853,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ bf = ath_tx_get_buffer(sc);
+ if (!bf) {
+ ath_dbg(common, XMIT, "TX buffers are full\n");
+- goto error;
++ return NULL;
+ }
+
+ ATH_TXBUF_RESET(bf);
+@@ -1876,18 +1882,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "dma_mapping_error() on TX\n");
+ ath_tx_return_buffer(sc, bf);
+- goto error;
++ return NULL;
+ }
+
+ fi->bf = bf;
+
+ return bf;
+-
+-error:
+- if (dequeue)
+- __skb_unlink(skb, &tid->buf_q);
+- dev_kfree_skb_any(skb);
+- return NULL;
+ }
+
+ /* FIXME: tx power */
+@@ -1916,9 +1916,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
+ */
+ ath_tx_send_ampdu(sc, tid, skb, txctl);
+ } else {
+- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+- if (!bf)
++ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++ if (!bf) {
++ if (txctl->paprd)
++ dev_kfree_skb_any(skb);
++ else
++ ieee80211_free_txskb(sc->hw, skb);
+ return;
++ }
+
+ bf->bf_state.bfs_paprd = txctl->paprd;
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 5b30132..41b74ba 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1403,7 +1403,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ ctio->u.status1.scsi_status =
+ __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+- ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
++ ctio->u.status1.sense_data[0] = resp_code;
+
+ qla2x00_start_iocbs(ha, ha->req);
+ }
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 182d5a5..f4cc413 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2054,7 +2054,7 @@ static void unmap_region(sector_t lba, unsigned int len)
+ block = lba + alignment;
+ rem = do_div(block, granularity);
+
+- if (rem == 0 && lba + granularity <= end && block < map_size) {
++ if (rem == 0 && lba + granularity < end && block < map_size) {
+ clear_bit(block, map_storep);
+ if (scsi_debug_lbprz)
+ memset(fake_storep +
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 528d52b..0144078 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ /*
+ * At this point, all outstanding requests in the adapter
+ * should have been flushed out and return to us
++ * There is a potential race here where the host may be in
++ * the process of responding when we return from here.
++ * Just wait for all in-transit packets to be accounted for
++ * before we return from here.
+ */
++ storvsc_wait_to_drain(stor_device);
+
+ return SUCCESS;
+ }
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 3e79a2f..7554d78 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ struct scatterlist sg;
+ unsigned long flags;
+
+- sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
++ sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+
+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 97c0f78..dd4fce2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3271,7 +3271,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+- spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+@@ -3424,6 +3423,7 @@ static int iscsit_send_reject(
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
++ hdr->ffffffff = 0xffffffff;
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 8a908b2..a90294f 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -25,10 +25,10 @@
+ #define NA_DATAOUT_TIMEOUT_RETRIES 5
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+-#define NA_NOPIN_TIMEOUT 5
++#define NA_NOPIN_TIMEOUT 15
+ #define NA_NOPIN_TIMEOUT_MAX 60
+ #define NA_NOPIN_TIMEOUT_MIN 3
+-#define NA_NOPIN_RESPONSE_TIMEOUT 5
++#define NA_NOPIN_RESPONSE_TIMEOUT 30
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index a38a3f8..de9ea32 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -677,6 +677,12 @@ int iscsit_ta_generate_node_acls(
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
++ if (flag == 1 && a->cache_dynamic_acls == 0) {
++ pr_debug("Explicitly setting cache_dynamic_acls=1 when "
++ "generate_node_acls=1\n");
++ a->cache_dynamic_acls = 1;
++ }
++
+ return 0;
+ }
+
+@@ -716,6 +722,12 @@ int iscsit_ta_cache_dynamic_acls(
+ return -EINVAL;
+ }
+
++ if (a->generate_node_acls == 1 && flag == 0) {
++ pr_debug("Skipping cache_dynamic_acls=0 when"
++ " generate_node_acls=1\n");
++ return 0;
++ }
++
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 801efa8..06aca11 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3132,6 +3132,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3147,6 +3148,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+ config_group_init_type_name(&alua_group,
+@@ -3162,6 +3164,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3173,14 +3176,17 @@ static int __init target_core_init_configfs(void)
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+- if (IS_ERR(lu_gp))
++ if (IS_ERR(lu_gp)) {
++ ret = -ENOMEM;
+ goto out_global;
++ }
+
+ lu_gp_cg = &alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index cbb5aaf..5c5ed7a 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -125,6 +125,19 @@ static struct se_device *fd_create_virtdevice(
+ * of pure timestamp updates.
+ */
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
++ /*
++ * Optionally allow fd_buffered_io=1 to be enabled for people
++ * who want use the fs buffer cache as an WriteCache mechanism.
++ *
++ * This means that in event of a hard failure, there is a risk
++ * of silent data-loss if the SCSI client has *not* performed a
++ * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
++ * to write-out the entire device cache.
++ */
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
++ flags &= ~O_DSYNC;
++ }
+
+ file = filp_open(fd_dev->fd_dev_name, flags, 0600);
+ if (IS_ERR(file)) {
+@@ -188,6 +201,12 @@ static struct se_device *fd_create_virtdevice(
+ if (!dev)
+ goto fail;
+
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
++ " with FDBD_HAS_BUFFERED_IO_WCE\n");
++ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
++ }
++
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+@@ -407,6 +426,7 @@ enum {
+ static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
++ {Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_err, NULL}
+ };
+
+@@ -418,7 +438,7 @@ static ssize_t fd_set_configfs_dev_params(
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+- int ret = 0, token;
++ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+@@ -459,6 +479,19 @@ static ssize_t fd_set_configfs_dev_params(
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
++ case Opt_fd_buffered_io:
++ match_int(args, &arg);
++ if (arg != 1) {
++ pr_err("bogus fd_buffered_io=%d value\n", arg);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ pr_debug("FILEIO: Using buffered I/O"
++ " operations for struct fd_dev\n");
++
++ fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
++ break;
+ default:
+ break;
+ }
+@@ -490,8 +523,10 @@ static ssize_t fd_show_configfs_dev_params(
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+- bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+- fd_dev->fd_dev_name, fd_dev->fd_dev_size);
++ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
++ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
++ (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
++ "Buffered-WCE" : "O_DSYNC");
+ return bl;
+ }
+
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 70ce7fd..876ae53 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -14,6 +14,7 @@
+
+ #define FBDF_HAS_PATH 0x01
+ #define FBDF_HAS_SIZE 0x02
++#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+
+ struct fd_dev {
+ u32 fbd_flags;
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 388a922..9229bd9 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -600,30 +600,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+- unsigned char *buf, *map_buf;
++ unsigned char *rbuf;
+ unsigned char *cdb = cmd->t_task_cdb;
++ unsigned char buf[SE_INQUIRY_BUF];
+ int p, ret;
+
+- map_buf = transport_kmap_data_sg(cmd);
+- /*
+- * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
+- * know we actually allocated a full page. Otherwise, if the
+- * data buffer is too small, allocate a temporary buffer so we
+- * don't have to worry about overruns in all our INQUIRY
+- * emulation handling.
+- */
+- if (cmd->data_length < SE_INQUIRY_BUF &&
+- (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+- buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+- if (!buf) {
+- transport_kunmap_data_sg(cmd);
+- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+- return -ENOMEM;
+- }
+- } else {
+- buf = map_buf;
+- }
+-
+ if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+ buf[0] = 0x3f; /* Not connected */
+ else
+@@ -655,11 +636,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
+ ret = -EINVAL;
+
+ out:
+- if (buf != map_buf) {
+- memcpy(map_buf, buf, cmd->data_length);
+- kfree(buf);
++ rbuf = transport_kmap_data_sg(cmd);
++ if (rbuf) {
++ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
++ transport_kunmap_data_sg(cmd);
+ }
+- transport_kunmap_data_sg(cmd);
+
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+@@ -803,7 +784,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ unsigned char *rbuf;
+ int type = dev->transport->get_device_type(dev);
+ int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+- int offset = ten ? 8 : 4;
++ u32 offset = ten ? 8 : 4;
+ int length = 0;
+ unsigned char buf[SE_MODE_PAGE_BUF];
+
+@@ -836,6 +817,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ offset -= 2;
+ buf[0] = (offset >> 8) & 0xff;
+ buf[1] = offset & 0xff;
++ offset += 2;
+
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+@@ -845,13 +827,10 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[3], type);
+-
+- if ((offset + 2) > cmd->data_length)
+- offset = cmd->data_length;
+-
+ } else {
+ offset -= 1;
+ buf[0] = offset & 0xff;
++ offset += 1;
+
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+@@ -861,14 +840,13 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[2], type);
+-
+- if ((offset + 1) > cmd->data_length)
+- offset = cmd->data_length;
+ }
+
+ rbuf = transport_kmap_data_sg(cmd);
+- memcpy(rbuf, buf, offset);
+- transport_kunmap_data_sg(cmd);
++ if (rbuf) {
++ memcpy(rbuf, buf, min(offset, cmd->data_length));
++ transport_kunmap_data_sg(cmd);
++ }
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 84cbf29..a13f7e1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3475,6 +3475,19 @@ int con_debug_enter(struct vc_data *vc)
+ kdb_set(2, setargs);
+ }
+ }
++ if (vc->vc_cols < 999) {
++ int colcount;
++ char cols[4];
++ const char *setargs[3] = {
++ "set",
++ "COLUMNS",
++ cols,
++ };
++ if (kdbgetintenv(setargs[0], &colcount)) {
++ snprintf(cols, 4, "%i", vc->vc_cols);
++ kdb_set(2, setargs);
++ }
++ }
+ #endif /* CONFIG_KGDB_KDB */
+ return ret;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index f763ed7..e8007b8 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1551,6 +1551,9 @@ static const struct usb_device_id acm_ids[] = {
+ Maybe we should define a new
+ quirk for this. */
+ },
++ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 1e35963..660fd53 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1699,7 +1699,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
+ int retval;
+ struct resource *res;
+
+- if (!dev->platform_data) {
++ if (!dev->platform_data && !pdev->dev.of_node) {
+ /* small (so we copy it) but critical! */
+ DBG("missing platform_data\n");
+ return -ENODEV;
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index d8dedc7..3639371 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -366,6 +366,17 @@ static int vfio_intx_enable(struct vfio_pci_device *vdev)
+ return -ENOMEM;
+
+ vdev->num_ctx = 1;
++
++ /*
++ * If the virtual interrupt is masked, restore it. Devices
++ * supporting DisINTx can be masked at the hardware level
++ * here, non-PCI-2.3 devices will have to wait until the
++ * interrupt is enabled.
++ */
++ vdev->ctx[0].masked = vdev->virq_disabled;
++ if (vdev->pci_2_3)
++ pci_intx(vdev->pdev, !vdev->ctx[0].masked);
++
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
+ return 0;
+@@ -400,25 +411,26 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
+ return PTR_ERR(trigger);
+ }
+
++ vdev->ctx[0].trigger = trigger;
++
+ if (!vdev->pci_2_3)
+ irqflags = 0;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, vdev->ctx[0].name, vdev);
+ if (ret) {
++ vdev->ctx[0].trigger = NULL;
+ kfree(vdev->ctx[0].name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+- vdev->ctx[0].trigger = trigger;
+-
+ /*
+ * INTx disable will stick across the new irq setup,
+ * disable_irq won't.
+ */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
++ if (!vdev->pci_2_3 && vdev->ctx[0].masked)
+ disable_irq_nosync(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index 8af6414..38fcfff 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -647,7 +647,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
+index af8f26b..db1e392 100644
+--- a/drivers/video/via/via_clock.c
++++ b/drivers/video/via/via_clock.c
+@@ -25,6 +25,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/via-core.h>
++#include <asm/olpc.h>
+ #include "via_clock.h"
+ #include "global.h"
+ #include "debug.h"
+@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
+ printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
+ }
+
++static void noop_set_clock_state(u8 state)
++{
++}
++
+ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ {
+ switch (gfx_chip) {
+@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ break;
+
+ }
++
++ if (machine_is_olpc()) {
++ /* The OLPC XO-1.5 cannot suspend/resume reliably if the
++ * IGA1/IGA2 clocks are set as on or off (memory rot
++ * occasionally happens during suspend under such
++ * configurations).
++ *
++ * The only known stable scenario is to leave this bits as-is,
++ * which in their default states are documented to enable the
++ * clock only when it is needed.
++ */
++ clock->set_primary_clock_state = noop_set_clock_state;
++ clock->set_secondary_clock_state = noop_set_clock_state;
++ }
+ }
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index bce15cf..ca373d1 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -47,6 +47,7 @@
+ #include <xen/xenbus.h>
+ #include <xen/xen.h>
+ #include "xenbus_comms.h"
++#include <asm/xen/hypervisor.h>
+
+ struct xs_stored_msg {
+ struct list_head list;
+@@ -617,7 +618,24 @@ static struct xenbus_watch *find_watch(const char *token)
+
+ return NULL;
+ }
++/*
++ * Certain older XenBus toolstack cannot handle reading values that are
++ * not populated. Some Xen 3.4 installation are incapable of doing this
++ * so if we are running on anything older than 4 do not attempt to read
++ * control/platform-feature-xs_reset_watches.
++ */
++static bool xen_strict_xenbus_quirk()
++{
++ uint32_t eax, ebx, ecx, edx, base;
++
++ base = xen_cpuid_base();
++ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+
++ if ((eax >> 16) < 4)
++ return true;
++ return false;
++
++}
+ static void xs_reset_watches(void)
+ {
+ int err, supported = 0;
+@@ -625,6 +643,9 @@ static void xs_reset_watches(void)
+ if (!xen_hvm_domain())
+ return;
+
++ if (xen_strict_xenbus_quirk())
++ return;
++
+ err = xenbus_scanf(XBT_NIL, "control",
+ "platform-feature-xs_reset_watches", "%d", &supported);
+ if (err != 1 || !supported)
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index e7396cf..91b1165 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ status = autofs4_mount_wait(dentry);
+- if (status)
+- return ERR_PTR(status);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
++ if (status) {
++ spin_unlock(&sbi->fs_lock);
++ return ERR_PTR(status);
++ }
+ }
+ done:
+ if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 8e1b60e..02ce909 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -99,7 +99,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+ * FIXME: we should try harder by querying the mds for the ino.
+ */
+ static struct dentry *__fh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_fh *fh)
++ struct ceph_nfs_fh *fh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -107,6 +107,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*fh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__fh_to_dentry %llx\n", fh->ino);
+ vino.ino = fh->ino;
+ vino.snap = CEPH_NOSNAP;
+@@ -150,7 +153,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ * convert connectable fh to dentry
+ */
+ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_confh *cfh)
++ struct ceph_nfs_confh *cfh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -158,6 +161,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__cfh_to_dentry %llx (%llx/%x)\n",
+ cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
+
+@@ -207,9 +213,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
+ if (fh_type == 1)
+- return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
++ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
++ fh_len);
+ else
+- return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
++ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
++ fh_len);
+ }
+
+ /*
+@@ -230,6 +238,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
+
+ if (fh_type == 1)
+ return ERR_PTR(-ESTALE);
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
+
+ pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
+ cfh->parent_name_hash);
+diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
+index e8ed6d4..4767774 100644
+--- a/fs/gfs2/export.c
++++ b/fs/gfs2/export.c
+@@ -161,6 +161,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ case GFS2_SMALL_FH_SIZE:
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_SMALL_FH_SIZE)
++ return NULL;
+ this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
+ this.no_formal_ino |= be32_to_cpu(fh[1]);
+ this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
+@@ -180,6 +182,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
+ switch (fh_type) {
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_LARGE_FH_SIZE)
++ return NULL;
+ parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
+ parent.no_formal_ino |= be32_to_cpu(fh[5]);
+ parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index 1d38044..2b4f235 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -175,7 +175,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
+ {
+ struct isofs_fid *ifid = (struct isofs_fid *)fid;
+
+- if (fh_type != 2)
++ if (fh_len < 2 || fh_type != 2)
+ return NULL;
+
+ return isofs_export_iget(sb,
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 52c15c7..86b39b1 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -86,7 +86,12 @@ nope:
+ static void release_data_buffer(struct buffer_head *bh)
+ {
+ if (buffer_freed(bh)) {
++ WARN_ON_ONCE(buffer_dirty(bh));
+ clear_buffer_freed(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
+ release_buffer_page(bh);
+ } else
+ put_bh(bh);
+@@ -866,17 +871,35 @@ restart_loop:
+ * there's no point in keeping a checkpoint record for
+ * it. */
+
+- /* A buffer which has been freed while still being
+- * journaled by a previous transaction may end up still
+- * being dirty here, but we want to avoid writing back
+- * that buffer in the future after the "add to orphan"
+- * operation been committed, That's not only a performance
+- * gain, it also stops aliasing problems if the buffer is
+- * left behind for writeback and gets reallocated for another
+- * use in a different page. */
+- if (buffer_freed(bh) && !jh->b_next_transaction) {
+- clear_buffer_freed(bh);
+- clear_buffer_jbddirty(bh);
++ /*
++ * A buffer which has been freed while still being journaled by
++ * a previous transaction.
++ */
++ if (buffer_freed(bh)) {
++ /*
++ * If the running transaction is the one containing
++ * "add to orphan" operation (b_next_transaction !=
++ * NULL), we have to wait for that transaction to
++ * commit before we can really get rid of the buffer.
++ * So just clear b_modified to not confuse transaction
++ * credit accounting and refile the buffer to
++ * BJ_Forget of the running transaction. If the just
++ * committed transaction contains "add to orphan"
++ * operation, we can completely invalidate the buffer
++ * now. We are rather throughout in that since the
++ * buffer may be still accessible when blocksize <
++ * pagesize and it is attached to the last partial
++ * page.
++ */
++ jh->b_modified = 0;
++ if (!jh->b_next_transaction) {
++ clear_buffer_freed(bh);
++ clear_buffer_jbddirty(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
++ }
+ }
+
+ if (buffer_jbddirty(bh)) {
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index febc10d..78b7f84 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1843,15 +1843,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+ * We're outside-transaction here. Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
++ int partial_page)
+ {
+ transaction_t *transaction;
+ struct journal_head *jh;
+ int may_free = 1;
+- int ret;
+
+ BUFFER_TRACE(bh, "entry");
+
++retry:
+ /*
+ * It is safe to proceed here without the j_list_lock because the
+ * buffers cannot be stolen by try_to_free_buffers as long as we are
+@@ -1879,10 +1880,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * clear the buffer dirty bit at latest at the moment when the
+ * transaction marking the buffer as freed in the filesystem
+ * structures is committed because from that moment on the
+- * buffer can be reallocated and used by a different page.
++ * block can be reallocated and used by a different page.
+ * Since the block hasn't been freed yet but the inode has
+ * already been added to orphan list, it is safe for us to add
+ * the buffer to BJ_Forget list of the newest transaction.
++ *
++ * Also we have to clear buffer_mapped flag of a truncated buffer
++ * because the buffer_head may be attached to the page straddling
++ * i_size (can happen only when blocksize < pagesize) and thus the
++ * buffer_head can be reused when the file is extended again. So we end
++ * up keeping around invalidated buffers attached to transactions'
++ * BJ_Forget list just to stop checkpointing code from cleaning up
++ * the transaction this buffer was modified in.
+ */
+ transaction = jh->b_transaction;
+ if (transaction == NULL) {
+@@ -1909,13 +1918,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * committed, the buffer won't be needed any
+ * longer. */
+ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_running_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* There is no currently-running transaction. So the
+ * orphan record which we wrote for this file must have
+@@ -1923,13 +1928,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * the committing transaction, if it exists. */
+ if (journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "give to committing trans");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_committing_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* The orphan record's transaction has
+ * committed. We can cleanse this buffer */
+@@ -1950,10 +1951,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+ /*
+ * The buffer is committing, we simply cannot touch
+- * it. So we just set j_next_transaction to the
+- * running transaction (if there is one) and mark
+- * buffer as freed so that commit code knows it should
+- * clear dirty bits when it is done with the buffer.
++ * it. If the page is straddling i_size we have to wait
++ * for commit and try again.
++ */
++ if (partial_page) {
++ tid_t tid = journal->j_committing_transaction->t_tid;
++
++ journal_put_journal_head(jh);
++ spin_unlock(&journal->j_list_lock);
++ jbd_unlock_bh_state(bh);
++ spin_unlock(&journal->j_state_lock);
++ log_wait_commit(journal, tid);
++ goto retry;
++ }
++ /*
++ * OK, buffer won't be reachable after truncate. We just set
++ * j_next_transaction to the running transaction (if there is
++ * one) and mark buffer as freed so that commit code knows it
++ * should clear dirty bits when it is done with the buffer.
+ */
+ set_buffer_freed(bh);
+ if (journal->j_running_transaction && buffer_jbddirty(bh))
+@@ -1976,6 +1991,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+
+ zap_buffer:
++ /*
++ * This is tricky. Although the buffer is truncated, it may be reused
++ * if blocksize < pagesize and it is attached to the page straddling
++ * EOF. Since the buffer might have been added to BJ_Forget list of the
++ * running transaction, journal_get_write_access() won't clear
++ * b_modified and credit accounting gets confused. So clear b_modified
++ * here. */
++ jh->b_modified = 0;
+ journal_put_journal_head(jh);
+ zap_buffer_no_jh:
+ spin_unlock(&journal->j_list_lock);
+@@ -2024,7 +2047,8 @@ void journal_invalidatepage(journal_t *journal,
+ if (offset <= curr_off) {
+ /* This block is wholly outside the truncation point */
+ lock_buffer(bh);
+- may_free &= journal_unmap_buffer(journal, bh);
++ may_free &= journal_unmap_buffer(journal, bh,
++ offset > 0);
+ unlock_buffer(bh);
+ }
+ curr_off = next_off;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 7ef14b3..e4fb3ba 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -7,7 +7,6 @@
+ */
+
+ #include <linux/types.h>
+-#include <linux/utsname.h>
+ #include <linux/kernel.h>
+ #include <linux/ktime.h>
+ #include <linux/slab.h>
+@@ -19,6 +18,8 @@
+
+ #include <asm/unaligned.h>
+
++#include "netns.h"
++
+ #define NLMDBG_FACILITY NLMDBG_MONITOR
+ #define NSM_PROGRAM 100024
+ #define NSM_VERSION 1
+@@ -40,6 +41,7 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
++ char *nodename;
+ };
+
+ struct nsm_res {
+@@ -70,7 +72,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ };
+ struct rpc_create_args args = {
+ .net = net,
+- .protocol = XPRT_TRANSPORT_UDP,
++ .protocol = XPRT_TRANSPORT_TCP,
+ .address = (struct sockaddr *)&sin,
+ .addrsize = sizeof(sin),
+ .servername = "rpc.statd",
+@@ -83,10 +85,54 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ return rpc_create(&args);
+ }
+
+-static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+- struct net *net)
++static struct rpc_clnt *nsm_client_get(struct net *net)
+ {
++ static DEFINE_MUTEX(nsm_create_mutex);
+ struct rpc_clnt *clnt;
++ struct lockd_net *ln = net_generic(net, lockd_net_id);
++
++ spin_lock(&ln->nsm_clnt_lock);
++ if (ln->nsm_users) {
++ ln->nsm_users++;
++ clnt = ln->nsm_clnt;
++ spin_unlock(&ln->nsm_clnt_lock);
++ goto out;
++ }
++ spin_unlock(&ln->nsm_clnt_lock);
++
++ mutex_lock(&nsm_create_mutex);
++ clnt = nsm_create(net);
++ if (!IS_ERR(clnt)) {
++ ln->nsm_clnt = clnt;
++ smp_wmb();
++ ln->nsm_users = 1;
++ }
++ mutex_unlock(&nsm_create_mutex);
++out:
++ return clnt;
++}
++
++static void nsm_client_put(struct net *net)
++{
++ struct lockd_net *ln = net_generic(net, lockd_net_id);
++ struct rpc_clnt *clnt = ln->nsm_clnt;
++ int shutdown = 0;
++
++ spin_lock(&ln->nsm_clnt_lock);
++ if (ln->nsm_users) {
++ if (--ln->nsm_users)
++ ln->nsm_clnt = NULL;
++ shutdown = !ln->nsm_users;
++ }
++ spin_unlock(&ln->nsm_clnt_lock);
++
++ if (shutdown)
++ rpc_shutdown_client(clnt);
++}
++
++static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
++ struct rpc_clnt *clnt)
++{
+ int status;
+ struct nsm_args args = {
+ .priv = &nsm->sm_priv,
+@@ -94,31 +140,24 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
++ .nodename = clnt->cl_nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+ .rpc_resp = res,
+ };
+
+- clnt = nsm_create(net);
+- if (IS_ERR(clnt)) {
+- status = PTR_ERR(clnt);
+- dprintk("lockd: failed to create NSM upcall transport, "
+- "status=%d\n", status);
+- goto out;
+- }
++ BUG_ON(clnt == NULL);
+
+ memset(res, 0, sizeof(*res));
+
+ msg.rpc_proc = &clnt->cl_procinfo[proc];
+- status = rpc_call_sync(clnt, &msg, 0);
++ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
+ if (status < 0)
+ dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ status);
+ else
+ status = 0;
+- rpc_shutdown_client(clnt);
+- out:
+ return status;
+ }
+
+@@ -138,6 +177,7 @@ int nsm_monitor(const struct nlm_host *host)
+ struct nsm_handle *nsm = host->h_nsmhandle;
+ struct nsm_res res;
+ int status;
++ struct rpc_clnt *clnt;
+
+ dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
+
+@@ -150,7 +190,15 @@ int nsm_monitor(const struct nlm_host *host)
+ */
+ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
+
+- status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net);
++ clnt = nsm_client_get(host->net);
++ if (IS_ERR(clnt)) {
++ status = PTR_ERR(clnt);
++ dprintk("lockd: failed to create NSM upcall transport, "
++ "status=%d, net=%p\n", status, host->net);
++ return status;
++ }
++
++ status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, clnt);
+ if (unlikely(res.status != 0))
+ status = -EIO;
+ if (unlikely(status < 0)) {
+@@ -182,9 +230,11 @@ void nsm_unmonitor(const struct nlm_host *host)
+
+ if (atomic_read(&nsm->sm_count) == 1
+ && nsm->sm_monitored && !nsm->sm_sticky) {
++ struct lockd_net *ln = net_generic(host->net, lockd_net_id);
++
+ dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
+
+- status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net);
++ status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, ln->nsm_clnt);
+ if (res.status != 0)
+ status = -EIO;
+ if (status < 0)
+@@ -192,6 +242,8 @@ void nsm_unmonitor(const struct nlm_host *host)
+ nsm->sm_name);
+ else
+ nsm->sm_monitored = 0;
++
++ nsm_client_put(host->net);
+ }
+ }
+
+@@ -430,7 +482,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, utsname()->nodename);
++ encode_nsm_string(xdr, argp->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
+index 4eee248..5010b55 100644
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -12,6 +12,10 @@ struct lockd_net {
+ struct delayed_work grace_period_end;
+ struct lock_manager lockd_manager;
+ struct list_head grace_list;
++
++ spinlock_t nsm_clnt_lock;
++ unsigned int nsm_users;
++ struct rpc_clnt *nsm_clnt;
+ };
+
+ extern int lockd_net_id;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 31a63f8..7e35587 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -596,6 +596,7 @@ static int lockd_init_net(struct net *net)
+
+ INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+ INIT_LIST_HEAD(&ln->grace_list);
++ spin_lock_init(&ln->nsm_clnt_lock);
+ return 0;
+ }
+
+diff --git a/fs/namei.c b/fs/namei.c
+index dd1ed1b..81bd546 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -692,9 +692,9 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
+ if (parent->i_uid == inode->i_uid)
+ return 0;
+
++ audit_log_link_denied("follow_link", link);
+ path_put_conditional(link, nd);
+ path_put(&nd->path);
+- audit_log_link_denied("follow_link", link);
+ return -EACCES;
+ }
+
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index dd392ed..f3d16ad 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ return bio;
+ }
+
+-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+- struct parallel_io *par)
++ struct parallel_io *par,
++ unsigned int offset, int len)
+ {
++ isect = isect + (offset >> SECTOR_SHIFT);
++ dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
++ npg, rw, (unsigned long long)isect, offset, len);
+ retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++ if (bio_add_page(bio, page, len, offset) < len) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+ }
+
++static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++ sector_t isect, struct page *page,
++ struct pnfs_block_extent *be,
++ void (*end_io)(struct bio *, int err),
++ struct parallel_io *par)
++{
++ return do_add_page_to_bio(bio, npg, rw, isect, page, be,
++ end_io, par, 0, PAGE_CACHE_SIZE);
++}
++
+ /* This is basically copied from mpage_end_io_read */
+ static void bl_end_io_read(struct bio *bio, int err)
+ {
+@@ -461,6 +475,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+ return;
+ }
+
++static void
++bl_read_single_end_io(struct bio *bio, int error)
++{
++ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++ struct page *page = bvec->bv_page;
++
++ /* Only one page in bvec */
++ unlock_page(page);
++}
++
++static int
++bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int offset, unsigned int len)
++{
++ struct bio *bio;
++ struct page *shadow_page;
++ sector_t isect;
++ char *kaddr, *kshadow_addr;
++ int ret = 0;
++
++ dprintk("%s: offset %u len %u\n", __func__, offset, len);
++
++ shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (shadow_page == NULL)
++ return -ENOMEM;
++
++ bio = bio_alloc(GFP_NOIO, 1);
++ if (bio == NULL)
++ return -ENOMEM;
++
++ isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
++ (offset / SECTOR_SIZE);
++
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = bl_read_single_end_io;
++
++ lock_page(shadow_page);
++ if (bio_add_page(bio, shadow_page,
++ SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
++ unlock_page(shadow_page);
++ bio_put(bio);
++ return -EIO;
++ }
++
++ submit_bio(READ, bio);
++ wait_on_page_locked(shadow_page);
++ if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
++ ret = -EIO;
++ } else {
++ kaddr = kmap_atomic(page);
++ kshadow_addr = kmap_atomic(shadow_page);
++ memcpy(kaddr + offset, kshadow_addr + offset, len);
++ kunmap_atomic(kshadow_addr);
++ kunmap_atomic(kaddr);
++ }
++ __free_page(shadow_page);
++ bio_put(bio);
++
++ return ret;
++}
++
++static int
++bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int dirty_offset, unsigned int dirty_len,
++ bool full_page)
++{
++ int ret = 0;
++ unsigned int start, end;
++
++ if (full_page) {
++ start = 0;
++ end = PAGE_CACHE_SIZE;
++ } else {
++ start = round_down(dirty_offset, SECTOR_SIZE);
++ end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
++ }
++
++ dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
++ if (!be) {
++ zero_user_segments(page, start, dirty_offset,
++ dirty_offset + dirty_len, end);
++ if (start == 0 && end == PAGE_CACHE_SIZE &&
++ trylock_page(page)) {
++ SetPageUptodate(page);
++ unlock_page(page);
++ }
++ return ret;
++ }
++
++ if (start != dirty_offset)
++ ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
++
++ if (!ret && (dirty_offset + dirty_len < end))
++ ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
++ end - dirty_offset - dirty_len);
++
++ return ret;
++}
++
+ /* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+@@ -494,7 +608,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+ SetPageUptodate(page);
+
+ cleanup:
+- bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+@@ -566,6 +679,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+ struct parallel_io *par = NULL;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
++ unsigned int pg_offset, pg_len, saved_len;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+@@ -674,10 +788,11 @@ next_page:
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
+- isect, NULL);
++ isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ header->pnfs_error = -EINVAL;
+ goto out;
+@@ -694,7 +809,26 @@ next_page:
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+- if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
++
++ dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
++ pg_offset = offset & ~PAGE_CACHE_MASK;
++ if (pg_offset + count > PAGE_CACHE_SIZE)
++ pg_len = PAGE_CACHE_SIZE - pg_offset;
++ else
++ pg_len = count;
++
++ saved_len = pg_len;
++ if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
++ !bl_is_sector_init(be->be_inval, isect)) {
++ ret = bl_read_partial_page_sync(pages[i], cow_read,
++ pg_offset, pg_len, true);
++ if (ret) {
++ dprintk("%s bl_read_partial_page_sync fail %d\n",
++ __func__, ret);
++ header->pnfs_error = ret;
++ goto out;
++ }
++
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS);
+ if (unlikely(ret)) {
+@@ -703,15 +837,35 @@ next_page:
+ header->pnfs_error = ret;
+ goto out;
+ }
++
++ /* Expand to full page write */
++ pg_offset = 0;
++ pg_len = PAGE_CACHE_SIZE;
++ } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
++ (pg_len & (SECTOR_SIZE - 1))){
++ /* ahh, nasty case. We have to do sync full sector
++ * read-modify-write cycles.
++ */
++ unsigned int saved_offset = pg_offset;
++ ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
++ pg_len, false);
++ pg_offset = round_down(pg_offset, SECTOR_SIZE);
++ pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
++ - pg_offset;
+ }
+- bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
++
++
++ bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
+ isect, pages[i], be,
+- bl_end_io_write, par);
++ bl_end_io_write, par,
++ pg_offset, pg_len);
+ if (IS_ERR(bio)) {
+ header->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
++ offset += saved_len;
++ count -= saved_len;
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+@@ -729,17 +883,16 @@ next_page:
+ }
+
+ write_done:
+- wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+- if (count < wdata->res.count) {
+- wdata->res.count = count;
+- }
++ wdata->res.count = wdata->args.count;
+ out:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+ out_mds:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ kfree(par);
+ return PNFS_NOT_ATTEMPTED;
+ }
+diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
+index 0335069..39bb51a 100644
+--- a/fs/nfs/blocklayout/blocklayout.h
++++ b/fs/nfs/blocklayout/blocklayout.h
+@@ -41,6 +41,7 @@
+
+ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+ #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+
+ struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 9969444..0e7cd89 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -855,7 +855,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
+ if (server->wsize > NFS_MAX_FILE_IO_SIZE)
+ server->wsize = NFS_MAX_FILE_IO_SIZE;
+ server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+- server->pnfs_blksize = fsinfo->blksize;
+
+ server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 1e50326..d5a0cf1 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1774,7 +1774,11 @@ static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
+ * informs us the stateid is unrecognized. */
+ if (status != -NFS4ERR_BAD_STATEID)
+ nfs41_free_stateid(server, stateid);
++ nfs_remove_bad_delegation(state->inode);
+
++ write_seqlock(&state->seqlock);
++ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
++ write_sequnlock(&state->seqlock);
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ }
+ }
+@@ -3362,8 +3366,11 @@ static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, s
+
+ nfs_fattr_init(fsinfo->fattr);
+ error = nfs4_do_fsinfo(server, fhandle, fsinfo);
+- if (error == 0)
++ if (error == 0) {
++ /* block layout checks this! */
++ server->pnfs_blksize = fsinfo->blksize;
+ set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
++ }
+
+ return error;
+ }
+diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
+index fdc91a6..ccfe0d0 100644
+--- a/fs/nfsd/nfs4idmap.c
++++ b/fs/nfsd/nfs4idmap.c
+@@ -598,7 +598,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
+ /* Just to make sure it's null-terminated: */
+ memcpy(buf, name, namelen);
+ buf[namelen] = '\0';
+- ret = kstrtouint(name, 10, id);
++ ret = kstrtouint(buf, 10, id);
+ return ret == 0;
+ }
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index cc894ed..5b3224c 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1223,10 +1223,26 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
+ return true;
+ }
+
++/*
++ * RFC 3530 language requires clid_inuse be returned when the
++ * "principal" associated with a requests differs from that previously
++ * used. We use uid, gid's, and gss principal string as our best
++ * approximation. We also don't want to allow non-gss use of a client
++ * established using gss: in theory cr_principal should catch that
++ * change, but in practice cr_principal can be null even in the gss case
++ * since gssd doesn't always pass down a principal string.
++ */
++static bool is_gss_cred(struct svc_cred *cr)
++{
++ /* Is cr_flavor one of the gss "pseudoflavors"?: */
++ return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
++}
++
++
+ static bool
+ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
+ {
+- if ((cr1->cr_flavor != cr2->cr_flavor)
++ if ((is_gss_cred(cr1) != is_gss_cred(cr2))
+ || (cr1->cr_uid != cr2->cr_uid)
+ || (cr1->cr_gid != cr2->cr_gid)
+ || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
+@@ -3766,6 +3782,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+
+ nfsd4_close_open_stateid(stp);
++ release_last_closed_stateid(oo);
+ oo->oo_last_closed_stid = stp;
+
+ if (list_empty(&oo->oo_owner.so_stateids)) {
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 855da58..63ce6be 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ reiserfs_warning(sb, "reiserfs-13077",
+ "nfsd/reiserfs, fhtype=%d, len=%d - odd",
+ fh_type, fh_len);
+- fh_type = 5;
++ fh_type = fh_len;
+ }
++ if (fh_len < 2)
++ return NULL;
+
+ return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
+ (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
+@@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
++ if (fh_type > fh_len)
++ fh_type = fh_len;
+ if (fh_type < 4)
+ return NULL;
+
+diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
+index 4267922..8c6d1d7 100644
+--- a/fs/xfs/xfs_export.c
++++ b/fs/xfs/xfs_export.c
+@@ -189,6 +189,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
+ struct inode *inode = NULL;
+
++ if (fh_len < xfs_fileid_length(fileid_type))
++ return NULL;
++
+ switch (fileid_type) {
+ case FILEID_INO32_GEN_PARENT:
+ inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 57977c6..e5cf2c8 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -212,9 +212,6 @@ typedef enum {
+ #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
+ && (chip->page_shift > 9))
+
+-/* Mask to zero out the chip options, which come from the id table */
+-#define NAND_CHIPOPTIONS_MSK 0x0000ffff
+-
+ /* Non chip related options */
+ /* This option skips the bbt scan during initialization. */
+ #define NAND_SKIP_BBTSCAN 0x00010000
+diff --git a/kernel/audit.c b/kernel/audit.c
+index ea3b7b6..a8c84be 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1466,6 +1466,8 @@ void audit_log_link_denied(const char *operation, struct path *link)
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_ANOM_LINK);
++ if (!ab)
++ return;
+ audit_log_format(ab, "op=%s action=denied", operation);
+ audit_log_format(ab, " pid=%d comm=", current->pid);
+ audit_log_untrustedstring(ab, current->comm);
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 0a69d2a..14ff484 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -552,6 +552,7 @@ int vkdb_printf(const char *fmt, va_list ap)
+ {
+ int diag;
+ int linecount;
++ int colcount;
+ int logging, saved_loglevel = 0;
+ int saved_trap_printk;
+ int got_printf_lock = 0;
+@@ -584,6 +585,10 @@ int vkdb_printf(const char *fmt, va_list ap)
+ if (diag || linecount <= 1)
+ linecount = 24;
+
++ diag = kdbgetintenv("COLUMNS", &colcount);
++ if (diag || colcount <= 1)
++ colcount = 80;
++
+ diag = kdbgetintenv("LOGGING", &logging);
+ if (diag)
+ logging = 0;
+@@ -690,7 +695,7 @@ kdb_printit:
+ gdbstub_msg_write(kdb_buffer, retlen);
+ } else {
+ if (dbg_io_ops && !dbg_io_ops->is_console) {
+- len = strlen(kdb_buffer);
++ len = retlen;
+ cp = kdb_buffer;
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+@@ -709,11 +714,29 @@ kdb_printit:
+ printk(KERN_INFO "%s", kdb_buffer);
+ }
+
+- if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+- kdb_nextline++;
++ if (KDB_STATE(PAGER)) {
++ /*
++ * Check printed string to decide how to bump the
++ * kdb_nextline to control when the more prompt should
++ * show up.
++ */
++ int got = 0;
++ len = retlen;
++ while (len--) {
++ if (kdb_buffer[len] == '\n') {
++ kdb_nextline++;
++ got = 0;
++ } else if (kdb_buffer[len] == '\r') {
++ got = 0;
++ } else {
++ got++;
++ }
++ }
++ kdb_nextline += got / (colcount + 1);
++ }
+
+ /* check for having reached the LINES number of printed lines */
+- if (kdb_nextline == linecount) {
++ if (kdb_nextline >= linecount) {
+ char buf1[16] = "";
+
+ /* Watch out for recursion here. Any routine that calls
+@@ -765,7 +788,7 @@ kdb_printit:
+ kdb_grepping_flag = 0;
+ kdb_printf("\n");
+ } else if (buf1[0] == ' ') {
+- kdb_printf("\n");
++ kdb_printf("\r");
+ suspend_grep = 1; /* for this recursion */
+ } else if (buf1[0] == '\n') {
+ kdb_nextline = linecount - 1;
+diff --git a/kernel/module.c b/kernel/module.c
+index 4edbd9c..9ad9ee9 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2730,6 +2730,10 @@ static int check_module_license_and_versions(struct module *mod)
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+
++ /* lve claims to be GPL but upstream won't provide source */
++ if (strcmp(mod->name, "lve") == 0)
++ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
++
+ #ifdef CONFIG_MODVERSIONS
+ if ((mod->num_syms && !mod->crcs)
+ || (mod->num_gpl_syms && !mod->gpl_crcs)
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 3a9e5d5..e430b97 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -835,7 +835,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
+ */
+ if (ts->tick_stopped) {
+ touch_softlockup_watchdog();
+- if (idle_cpu(cpu))
++ if (is_idle_task(current))
+ ts->idle_jiffies++;
+ }
+ update_process_times(user_mode(regs));
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d3b91e7..f791637 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1111,7 +1111,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+ accumulate_nsecs_to_secs(tk);
+
+ /* Accumulate raw time */
+- raw_nsecs = tk->raw_interval << shift;
++ raw_nsecs = (u64)tk->raw_interval << shift;
+ raw_nsecs += tk->raw_time.tv_nsec;
+ if (raw_nsecs >= NSEC_PER_SEC) {
+ u64 raw_secs = raw_nsecs;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 8c5e7b9..46ef2b1 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
+ #define TVR_SIZE (1 << TVR_BITS)
+ #define TVN_MASK (TVN_SIZE - 1)
+ #define TVR_MASK (TVR_SIZE - 1)
++#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+
+ struct tvec {
+ struct list_head vec[TVN_SIZE];
+@@ -358,11 +359,12 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+ vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+ } else {
+ int i;
+- /* If the timeout is larger than 0xffffffff on 64-bit
+- * architectures then we use the maximum timeout:
++ /* If the timeout is larger than MAX_TVAL (on 64-bit
++ * architectures or with CONFIG_BASE_SMALL=1) then we
++ * use the maximum timeout.
+ */
+- if (idx > 0xffffffffUL) {
+- idx = 0xffffffffUL;
++ if (idx > MAX_TVAL) {
++ idx = MAX_TVAL;
+ expires = idx + base->timer_jiffies;
+ }
+ i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d4e184e..d2eeca1 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2366,12 +2366,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ {
+ struct inode *inode;
+ struct dentry *dentry = NULL;
+- u64 inum = fid->raw[2];
+- inum = (inum << 32) | fid->raw[1];
++ u64 inum;
+
+ if (fh_len < 3)
+ return NULL;
+
++ inum = fid->raw[2];
++ inum = (inum << 32) | fid->raw[1];
++
+ inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ shmem_match, fid->raw);
+ if (inode) {
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 148e73d..e356b8d 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2927,7 +2927,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
+ pkt_dev->pkt_overhead;
+
+- if (datalen < sizeof(struct pktgen_hdr)) {
++ if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
+ datalen = sizeof(struct pktgen_hdr);
+ net_info_ratelimited("increased datalen to %d\n", datalen);
+ }
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 8cd7291..118329a 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -34,7 +34,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
+ skb_queue_len(&local->skb_queue_unreliable);
+ while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
+ (skb = skb_dequeue(&local->skb_queue_unreliable))) {
+- dev_kfree_skb_irq(skb);
++ ieee80211_free_txskb(hw, skb);
+ tmp--;
+ I802_DEBUG_INC(local->tx_status_drop);
+ }
+@@ -159,7 +159,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
+ "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ }
+
+ static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index c5e8c9c..362c418 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -354,7 +354,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
+ total += skb_queue_len(&sta->ps_tx_buf[ac]);
+ if (skb) {
+ purged++;
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ break;
+ }
+ }
+@@ -466,7 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ ps_dbg(tx->sdata,
+ "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ sta->sta.addr, ac);
+- dev_kfree_skb(old);
++ ieee80211_free_txskb(&local->hw, old);
+ } else
+ tx->local->total_ps_buffered++;
+
+@@ -1103,7 +1103,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+ spin_unlock(&tx->sta->lock);
+
+ if (purge_skb)
+- dev_kfree_skb(purge_skb);
++ ieee80211_free_txskb(&tx->local->hw, purge_skb);
+ }
+
+ /* reset session timer */
+@@ -1214,7 +1214,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
+ #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (WARN_ON_ONCE(q >= local->hw.queues)) {
+ __skb_unlink(skb, skbs);
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ continue;
+ }
+ #endif
+@@ -1356,7 +1356,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
+ if (unlikely(res == TX_DROP)) {
+ I802_DEBUG_INC(tx->local->tx_handlers_drop);
+ if (tx->skb)
+- dev_kfree_skb(tx->skb);
++ ieee80211_free_txskb(&tx->local->hw, tx->skb);
+ else
+ __skb_queue_purge(&tx->skbs);
+ return -1;
+@@ -1393,7 +1393,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+ res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
+
+ if (unlikely(res_prepare == TX_DROP)) {
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ goto out;
+ } else if (unlikely(res_prepare == TX_QUEUED)) {
+ goto out;
+@@ -1466,7 +1466,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+ headroom = max_t(int, 0, headroom);
+
+ if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ rcu_read_unlock();
+ return;
+ }
+@@ -2060,8 +2060,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ head_need += IEEE80211_ENCRYPT_HEADROOM;
+ head_need += local->tx_headroom;
+ head_need = max_t(int, 0, head_need);
+- if (ieee80211_skb_resize(sdata, skb, head_need, true))
+- goto fail;
++ if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
++ ieee80211_free_txskb(&local->hw, skb);
++ return NETDEV_TX_OK;
++ }
+ }
+
+ if (encaps_data) {
+@@ -2196,7 +2198,7 @@ void ieee80211_tx_pending(unsigned long data)
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (WARN_ON(!info->control.vif)) {
+- kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ continue;
+ }
+
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index a35b8e5..d1988cf 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1025,6 +1025,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
++/*
++ * Helper function to force a TCP close if the server is sending
++ * junk and/or it has put us in CLOSE_WAIT
++ */
++static void xs_tcp_force_close(struct rpc_xprt *xprt)
++{
++ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
++ xprt_force_disconnect(xprt);
++}
++
+ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1051,7 +1061,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
+ /* Sanity check of the record length */
+ if (unlikely(transport->tcp_reclen < 8)) {
+ dprintk("RPC: invalid TCP record fragment length\n");
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ return;
+ }
+ dprintk("RPC: reading TCP record fragment of length %d\n",
+@@ -1132,7 +1142,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+- xprt_force_disconnect(&transport->xprt);
++ xs_tcp_force_close(&transport->xprt);
+ }
+ xs_tcp_check_fraghdr(transport);
+ }
+@@ -1455,6 +1465,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
++ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
++ clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
+@@ -1512,8 +1524,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ break;
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+- xprt_force_disconnect(xprt);
+ xprt->connect_cookie++;
++ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+ * If the server closed down the connection, make sure that
+@@ -2199,8 +2211,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ /* We're probably in TIME_WAIT. Get rid of existing socket,
+ * and retry
+ */
+- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ break;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
+index c3f69ae..4d908d1 100644
+--- a/scripts/Makefile.fwinst
++++ b/scripts/Makefile.fwinst
+@@ -27,7 +27,7 @@ endif
+ installed-mod-fw := $(addprefix $(INSTALL_FW_PATH)/,$(mod-fw))
+
+ installed-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-all))
+-installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/.
++installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/./
+
+ # Workaround for make < 3.81, where .SECONDEXPANSION doesn't work.
+ PHONY += $(INSTALL_FW_PATH)/$$(%) install-all-dirs
+@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
+ $(installed-fw-dirs):
+ $(call cmd,mkdir)
+
+-$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
++$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %)
+ $(call cmd,install)
+
+ PHONY += __fw_install __fw_modinst FORCE
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 9473fca..8b0f996 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
+ tmp.index = ac97->num;
+ kctl = snd_ctl_new1(&tmp, ac97);
+ }
++ if (!kctl)
++ return -ENOMEM;
+ if (reg >= AC97_PHONE && reg <= AC97_PCM)
+ set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
+ else
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index 7549240..a78fdf4 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ .ca0108_chip = 1,
+ .spk71 = 1,
+ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
++ /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
++ /* This is MAEM8986, 0202 is MAEM8980 */
++ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
++ .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
++ .id = "EMU1010",
++ .emu10k2_chip = 1,
++ .ca0108_chip = 1,
++ .spk71 = 1,
++ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
+ /* Tested by James@superbug.co.uk 8th July 2005. */
+ /* This is MAEM8810, 0202 is MAEM8820 */
+ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 12a9432..a5dc746 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -487,6 +487,7 @@ struct azx {
+
+ /* VGA-switcheroo setup */
+ unsigned int use_vga_switcheroo:1;
++ unsigned int vga_switcheroo_registered:1;
+ unsigned int init_failed:1; /* delayed init failed */
+ unsigned int disabled:1; /* disabled by VGA-switcher */
+
+@@ -2135,9 +2136,12 @@ static unsigned int azx_get_position(struct azx *chip,
+ if (delay < 0)
+ delay += azx_dev->bufsize;
+ if (delay >= azx_dev->period_bytes) {
+- snd_printdd("delay %d > period_bytes %d\n",
+- delay, azx_dev->period_bytes);
+- delay = 0; /* something is wrong */
++ snd_printk(KERN_WARNING SFX
++ "Unstable LPIB (%d >= %d); "
++ "disabling LPIB delay counting\n",
++ delay, azx_dev->period_bytes);
++ delay = 0;
++ chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
+ }
+ azx_dev->substream->runtime->delay =
+ bytes_to_frames(azx_dev->substream->runtime, delay);
+@@ -2556,7 +2560,9 @@ static void azx_vs_set_state(struct pci_dev *pci,
+ if (disabled) {
+ azx_suspend(&pci->dev);
+ chip->disabled = true;
+- snd_hda_lock_devices(chip->bus);
++ if (snd_hda_lock_devices(chip->bus))
++ snd_printk(KERN_WARNING SFX
++ "Cannot lock devices!\n");
+ } else {
+ snd_hda_unlock_devices(chip->bus);
+ chip->disabled = false;
+@@ -2599,14 +2605,20 @@ static const struct vga_switcheroo_client_ops azx_vs_ops = {
+
+ static int __devinit register_vga_switcheroo(struct azx *chip)
+ {
++ int err;
++
+ if (!chip->use_vga_switcheroo)
+ return 0;
+ /* FIXME: currently only handling DIS controller
+ * is there any machine with two switchable HDMI audio controllers?
+ */
+- return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
++ err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+ VGA_SWITCHEROO_DIS,
+ chip->bus != NULL);
++ if (err < 0)
++ return err;
++ chip->vga_switcheroo_registered = 1;
++ return 0;
+ }
+ #else
+ #define init_vga_switcheroo(chip) /* NOP */
+@@ -2626,7 +2638,8 @@ static int azx_free(struct azx *chip)
+ if (use_vga_switcheroo(chip)) {
+ if (chip->disabled && chip->bus)
+ snd_hda_unlock_devices(chip->bus);
+- vga_switcheroo_unregister_client(chip->pci);
++ if (chip->vga_switcheroo_registered)
++ vga_switcheroo_unregister_client(chip->pci);
+ }
+
+ if (chip->initialized) {
+@@ -2974,14 +2987,6 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
+ }
+
+ ok:
+- err = register_vga_switcheroo(chip);
+- if (err < 0) {
+- snd_printk(KERN_ERR SFX
+- "Error registering VGA-switcheroo client\n");
+- azx_free(chip);
+- return err;
+- }
+-
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+@@ -3208,6 +3213,13 @@ static int __devinit azx_probe(struct pci_dev *pci,
+
+ pci_set_drvdata(pci, card);
+
++ err = register_vga_switcheroo(chip);
++ if (err < 0) {
++ snd_printk(KERN_ERR SFX
++ "Error registering VGA-switcheroo client\n");
++ goto out_free;
++ }
++
+ dev++;
+ return 0;
+
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 0c4c1a6..cc31346 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -1417,7 +1417,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+@@ -1974,7 +1974,7 @@ static int patch_cs4210(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+@@ -1999,7 +1999,7 @@ static int patch_cs4213(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 56a3eef..155cbd2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -611,6 +611,8 @@ static void alc_line_automute(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+
++ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
++ return;
+ /* check LO jack only when it's different from HP */
+ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+ return;
+@@ -2627,8 +2629,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
+ return "PCM";
+ break;
+ }
+- if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name)))
++ if (ch >= ARRAY_SIZE(channel_name)) {
++ snd_BUG();
+ return "PCM";
++ }
+
+ return channel_name[ch];
+ }
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 4b4072f..4c404a0 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -118,6 +118,8 @@ enum {
+ };
+
+ struct via_spec {
++ struct hda_gen_spec gen;
++
+ /* codec parameterization */
+ const struct snd_kcontrol_new *mixers[6];
+ unsigned int num_mixers;
+@@ -246,6 +248,7 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
+ /* VT1708BCE & VT1708S are almost same */
+ if (spec->codec_type == VT1708BCE)
+ spec->codec_type = VT1708S;
++ snd_hda_gen_init(&spec->gen);
+ return spec;
+ }
+
+@@ -1628,6 +1631,7 @@ static void via_free(struct hda_codec *codec)
+ vt1708_stop_hp_work(spec);
+ kfree(spec->bind_cap_vol);
+ kfree(spec->bind_cap_sw);
++ snd_hda_gen_free(&spec->gen);
+ kfree(spec);
+ }
+
+diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
+index 32682c1..c8bff6d 100644
+--- a/sound/soc/codecs/wm2200.c
++++ b/sound/soc/codecs/wm2200.c
+@@ -1028,7 +1028,7 @@ SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L,
+ WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0,
+ digital_tlv),
+ SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT,
+- WM2200_SPK1R_MUTE_SHIFT, 1, 0),
++ WM2200_SPK1R_MUTE_SHIFT, 1, 1),
+ };
+
+ WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE);
+@@ -2091,6 +2091,7 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
+
+ switch (wm2200->rev) {
+ case 0:
++ case 1:
+ ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch,
+ ARRAY_SIZE(wm2200_reva_patch));
+ if (ret != 0) {
+diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
+index 9d93793..f8fba57 100644
+--- a/sound/soc/omap/omap-abe-twl6040.c
++++ b/sound/soc/omap/omap-abe-twl6040.c
+@@ -190,7 +190,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
+ twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk");
+ twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk");
+ twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out");
+- twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vinrator");
++ twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator");
+ twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
+ twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic");
+ twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic");
+diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
+index 0540408..1bb0d58c 100644
+--- a/sound/soc/sh/fsi.c
++++ b/sound/soc/sh/fsi.c
+@@ -20,6 +20,7 @@
+ #include <linux/sh_dma.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/workqueue.h>
+ #include <sound/soc.h>
+ #include <sound/sh_fsi.h>
+
+@@ -223,7 +224,7 @@ struct fsi_stream {
+ */
+ struct dma_chan *chan;
+ struct sh_dmae_slave slave; /* see fsi_handler_init() */
+- struct tasklet_struct tasklet;
++ struct work_struct work;
+ dma_addr_t dma;
+ };
+
+@@ -1085,9 +1086,9 @@ static void fsi_dma_complete(void *data)
+ snd_pcm_period_elapsed(io->substream);
+ }
+
+-static void fsi_dma_do_tasklet(unsigned long data)
++static void fsi_dma_do_work(struct work_struct *work)
+ {
+- struct fsi_stream *io = (struct fsi_stream *)data;
++ struct fsi_stream *io = container_of(work, struct fsi_stream, work);
+ struct fsi_priv *fsi = fsi_stream_to_priv(io);
+ struct snd_soc_dai *dai;
+ struct dma_async_tx_descriptor *desc;
+@@ -1129,7 +1130,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
+ * FIXME
+ *
+ * In DMAEngine case, codec and FSI cannot be started simultaneously
+- * since FSI is using tasklet.
++ * since FSI is using the scheduler work queue.
+ * Therefore, in capture case, probably FSI FIFO will have got
+ * overflow error in this point.
+ * in that case, DMA cannot start transfer until error was cleared.
+@@ -1153,7 +1154,7 @@ static bool fsi_dma_filter(struct dma_chan *chan, void *param)
+
+ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
+ {
+- tasklet_schedule(&io->tasklet);
++ schedule_work(&io->work);
+
+ return 0;
+ }
+@@ -1195,14 +1196,14 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct dev
+ return fsi_stream_probe(fsi, dev);
+ }
+
+- tasklet_init(&io->tasklet, fsi_dma_do_tasklet, (unsigned long)io);
++ INIT_WORK(&io->work, fsi_dma_do_work);
+
+ return 0;
+ }
+
+ static int fsi_dma_remove(struct fsi_priv *fsi, struct fsi_stream *io)
+ {
+- tasklet_kill(&io->tasklet);
++ cancel_work_sync(&io->work);
+
+ fsi_stream_stop(fsi, io);
+
diff --git a/3.6.2/4420_grsecurity-2.9.1-3.6.2-201210151829.patch b/3.6.3/4420_grsecurity-2.9.1-3.6.3-201210231942.patch
index 26ec9d1..667fa18 100644
--- a/3.6.2/4420_grsecurity-2.9.1-3.6.2-201210151829.patch
+++ b/3.6.3/4420_grsecurity-2.9.1-3.6.3-201210231942.patch
@@ -251,7 +251,7 @@ index ad7e2e5..199f49e 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index af5d6a9..4ccd9fb 100644
+index 6cdadf4..02df425 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -7666,7 +7666,7 @@ index b322f12..652d0d9 100644
Enabling this option turns a certain set of sanity checks for user
copy operations into compile time failures.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 58790bd..fc2f239 100644
+index 05afcca..b6ecb51 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -50,6 +50,7 @@ else
@@ -27270,7 +27270,7 @@ index 00aaf04..4a26505 100644
-}
-__setup("vdso=", vdso_setup);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 1fbe75a..c22e01f 100644
+index c1461de..355f120 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -98,8 +98,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
@@ -27317,7 +27317,7 @@ index 1fbe75a..c22e01f 100644
#endif
}
-@@ -1205,30 +1203,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+@@ -1221,30 +1219,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
#endif
};
@@ -27355,7 +27355,7 @@ index 1fbe75a..c22e01f 100644
{
if (pm_power_off)
pm_power_off();
-@@ -1331,7 +1329,17 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1347,7 +1345,17 @@ asmlinkage void __init xen_start_kernel(void)
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
/* Work out if we support NX */
@@ -27374,7 +27374,7 @@ index 1fbe75a..c22e01f 100644
xen_setup_features();
-@@ -1362,13 +1370,6 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1378,13 +1386,6 @@ asmlinkage void __init xen_start_kernel(void)
machine_ops = xen_machine_ops;
@@ -30040,7 +30040,7 @@ index f877805..403375a 100644
return 0;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
-index 817f0ee..cd3b75d 100644
+index 4dc8024..90108d1 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
@@ -30235,7 +30235,7 @@ index 57ea7f4..789e3c3 100644
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
-index 2783f69..9f4b0cc 100644
+index f8d2287..5aaf4db 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
@@ -30819,7 +30819,7 @@ index 73fa3e1..ab2e9b9 100644
iir = I915_READ(IIR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 0c7f4aa..c4771ed 100644
+index b634f6f..84bb8ba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2182,7 +2182,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
@@ -33808,7 +33808,7 @@ index 611b5f7..cee0bfb 100644
"md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index 0138a72..eab8fc6 100644
+index a48c215..6bda6f4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1810,7 +1810,7 @@ static void end_sync_read(struct bio *bio, int error)
@@ -34965,19 +34965,6 @@ index 9d71c9c..0e4a0ac 100644
{ "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
{ "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
{ "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
-diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
-index cb3356c..c302a98 100644
---- a/drivers/net/ethernet/intel/e1000e/e1000.h
-+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
-@@ -181,7 +181,7 @@ struct e1000_info;
- #define E1000_TXDCTL_DMA_BURST_ENABLE \
- (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
- E1000_TXDCTL_COUNT_DESC | \
-- (5 << 16) | /* wthresh must be +1 more than desired */\
-+ (1 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
-
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index ed5b409..ec37828 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
@@ -42277,7 +42264,7 @@ index 3c14e43..eafa544 100644
+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
+4 4 4 4 4 4
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
-index 8af6414..658c030 100644
+index 38fcfff..0072dcd 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
@@ -47549,7 +47536,7 @@ index 7e81bfc..c3649aa 100644
lock_flocks();
diff --git a/fs/namei.c b/fs/namei.c
-index dd1ed1b..875e998 100644
+index 81bd546..80149d9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -265,16 +265,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -51551,10 +51538,10 @@ index 0000000..1b9afa9
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..07cd799
+index 0000000..3d58260
--- /dev/null
+++ b/grsecurity/gracl.c
-@@ -0,0 +1,4017 @@
+@@ -0,0 +1,4029 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
@@ -53036,6 +53023,7 @@ index 0000000..07cd799
+copy_user_acl(struct gr_arg *arg)
+{
+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
++ struct acl_subject_label *subj_list;
+ struct sprole_pw *sptmp;
+ struct gr_hash_struct *ghash;
+ uid_t *domainlist;
@@ -53164,14 +53152,21 @@ index 0000000..07cd799
+ r_tmp->subj_hash_size *
+ sizeof (struct acl_subject_label *));
+
-+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
-+
-+ if (err)
-+ return err;
++ /* acquire the list of subjects, then NULL out
++ the list prior to parsing the subjects for this role,
++ as during this parsing the list is replaced with a list
++ of *nested* subjects for the role
++ */
++ subj_list = r_tmp->hash->first;
+
+ /* set nested subject list to null */
+ r_tmp->hash->first = NULL;
+
++ err = copy_user_subjs(subj_list, r_tmp);
++
++ if (err)
++ return err;
++
+ insert_acl_role_label(r_tmp);
+ }
+
@@ -54180,8 +54175,9 @@ index 0000000..07cd799
+ matchpo->mode |= GR_DELETED;
+ FOR_EACH_SUBJECT_END(subj,x)
+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+ if (subj->inode == ino && subj->device == dev)
-+ subj->mode |= GR_DELETED;
++ /* nested subjects aren't in the role's subj_hash table */
++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
++ matchpo->mode |= GR_DELETED;
+ FOR_EACH_NESTED_SUBJECT_END(subj)
+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
+ matchps->mode |= GR_DELETED;
@@ -54339,6 +54335,9 @@ index 0000000..07cd799
+ subj->inode = ino;
+ subj->device = dev;
+ }
++ /* nested subjects aren't in the role's subj_hash table */
++ update_acl_obj_label(matchn->inode, matchn->device,
++ ino, dev, subj);
+ FOR_EACH_NESTED_SUBJECT_END(subj)
+ FOR_EACH_SUBJECT_START(role, subj, x)
+ update_acl_obj_label(matchn->inode, matchn->device,
@@ -66364,7 +66363,7 @@ index 02e6167..54824f7 100644
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
set_fs(fs);
diff --git a/kernel/audit.c b/kernel/audit.c
-index ea3b7b6..c260d34 100644
+index a8c84be..8bd034c 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
@@ -67990,7 +67989,7 @@ index 91c32a0..7b88d63 100644
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contending_point[i],
diff --git a/kernel/module.c b/kernel/module.c
-index 4edbd9c..165e780 100644
+index 9ad9ee9..de7a157 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -58,6 +58,7 @@
@@ -68516,7 +68515,7 @@ index 4edbd9c..165e780 100644
pr_debug("\t0x%lx %s\n",
(long)shdr->sh_addr, info->secstrings + shdr->sh_name);
}
-@@ -2759,12 +2859,12 @@ static void flush_module_icache(const struct module *mod)
+@@ -2763,12 +2863,12 @@ static void flush_module_icache(const struct module *mod)
* Do it before processing of module parameters, so the module
* can provide parameter accessor functions of its own.
*/
@@ -68535,7 +68534,7 @@ index 4edbd9c..165e780 100644
set_fs(old_fs);
}
-@@ -2834,8 +2934,10 @@ out:
+@@ -2838,8 +2938,10 @@ out:
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
@@ -68548,7 +68547,7 @@ index 4edbd9c..165e780 100644
}
int __weak module_finalize(const Elf_Ehdr *hdr,
-@@ -2848,7 +2950,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -2852,7 +2954,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
static int post_relocation(struct module *mod, const struct load_info *info)
{
/* Sort exception table now relocations are done. */
@@ -68558,7 +68557,7 @@ index 4edbd9c..165e780 100644
/* Copy relocated percpu area over. */
percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
-@@ -2899,9 +3003,38 @@ static struct module *load_module(void __user *umod,
+@@ -2903,9 +3007,38 @@ static struct module *load_module(void __user *umod,
if (err)
goto free_unload;
@@ -68597,7 +68596,7 @@ index 4edbd9c..165e780 100644
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols(mod, &info);
if (err < 0)
-@@ -2917,13 +3050,6 @@ static struct module *load_module(void __user *umod,
+@@ -2921,13 +3054,6 @@ static struct module *load_module(void __user *umod,
flush_module_icache(mod);
@@ -68611,7 +68610,7 @@ index 4edbd9c..165e780 100644
/* Mark state as coming so strong_try_module_get() ignores us. */
mod->state = MODULE_STATE_COMING;
-@@ -2981,11 +3107,10 @@ static struct module *load_module(void __user *umod,
+@@ -2985,11 +3111,10 @@ static struct module *load_module(void __user *umod,
unlock:
mutex_unlock(&module_mutex);
synchronize_sched();
@@ -68624,7 +68623,7 @@ index 4edbd9c..165e780 100644
free_unload:
module_unload_free(mod);
free_module:
-@@ -3026,16 +3151,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+@@ -3030,16 +3155,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
MODULE_STATE_COMING, mod);
/* Set RO and NX regions for core */
@@ -68649,7 +68648,7 @@ index 4edbd9c..165e780 100644
do_mod_ctors(mod);
/* Start the module */
-@@ -3081,11 +3206,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+@@ -3085,11 +3210,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
@@ -68667,7 +68666,7 @@ index 4edbd9c..165e780 100644
mutex_unlock(&module_mutex);
return 0;
-@@ -3116,10 +3242,16 @@ static const char *get_ksymbol(struct module *mod,
+@@ -3120,10 +3246,16 @@ static const char *get_ksymbol(struct module *mod,
unsigned long nextval;
/* At worse, next value is at end of module */
@@ -68687,7 +68686,7 @@ index 4edbd9c..165e780 100644
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
-@@ -3354,7 +3486,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3358,7 +3490,7 @@ static int m_show(struct seq_file *m, void *p)
char buf[8];
seq_printf(m, "%s %u",
@@ -68696,7 +68695,7 @@ index 4edbd9c..165e780 100644
print_unload_info(m, mod);
/* Informative for users. */
-@@ -3363,7 +3495,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3367,7 +3499,7 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading":
"Live");
/* Used by oprofile and other similar tools. */
@@ -68705,7 +68704,7 @@ index 4edbd9c..165e780 100644
/* Taints info */
if (mod->taints)
-@@ -3399,7 +3531,17 @@ static const struct file_operations proc_modules_operations = {
+@@ -3403,7 +3535,17 @@ static const struct file_operations proc_modules_operations = {
static int __init proc_modules_init(void)
{
@@ -68723,7 +68722,7 @@ index 4edbd9c..165e780 100644
return 0;
}
module_init(proc_modules_init);
-@@ -3458,12 +3600,12 @@ struct module *__module_address(unsigned long addr)
+@@ -3462,12 +3604,12 @@ struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -68739,7 +68738,7 @@ index 4edbd9c..165e780 100644
return mod;
return NULL;
}
-@@ -3497,11 +3639,20 @@ bool is_module_text_address(unsigned long addr)
+@@ -3501,11 +3643,20 @@ bool is_module_text_address(unsigned long addr)
*/
struct module *__module_text_address(unsigned long addr)
{
@@ -70660,7 +70659,7 @@ index f113755..ec24223 100644
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_clear_oneshot(cpu);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index d3b91e7..2a4be68 100644
+index f791637..00051de 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -14,6 +14,7 @@
@@ -70799,10 +70798,10 @@ index 0b537f2..40d6c20 100644
return -ENOMEM;
return 0;
diff --git a/kernel/timer.c b/kernel/timer.c
-index 8c5e7b9..968d02c 100644
+index 46ef2b1..ad081f144 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -1375,7 +1375,7 @@ void update_process_times(int user_tick)
+@@ -1377,7 +1377,7 @@ void update_process_times(int user_tick)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
@@ -74469,7 +74468,7 @@ index 0f3b7cd..c5652b6 100644
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
diff --git a/mm/shmem.c b/mm/shmem.c
-index d4e184e..9953cdd 100644
+index d2eeca1..3f160be 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -31,7 +31,7 @@
@@ -74490,7 +74489,7 @@ index d4e184e..9953cdd 100644
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
-@@ -2592,8 +2592,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2594,8 +2594,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
diff --git a/3.6.2/4430_grsec-remove-localversion-grsec.patch b/3.6.3/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.6.2/4430_grsec-remove-localversion-grsec.patch
+++ b/3.6.3/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.6.2/4435_grsec-mute-warnings.patch b/3.6.3/4435_grsec-mute-warnings.patch
index e1a7a3c..e1a7a3c 100644
--- a/3.6.2/4435_grsec-mute-warnings.patch
+++ b/3.6.3/4435_grsec-mute-warnings.patch
diff --git a/3.6.2/4440_grsec-remove-protected-paths.patch b/3.6.3/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.6.2/4440_grsec-remove-protected-paths.patch
+++ b/3.6.3/4440_grsec-remove-protected-paths.patch
diff --git a/3.6.2/4450_grsec-kconfig-default-gids.patch b/3.6.3/4450_grsec-kconfig-default-gids.patch
index d4b0b7e..d4b0b7e 100644
--- a/3.6.2/4450_grsec-kconfig-default-gids.patch
+++ b/3.6.3/4450_grsec-kconfig-default-gids.patch
diff --git a/3.6.2/4465_selinux-avc_audit-log-curr_ip.patch b/3.6.3/4465_selinux-avc_audit-log-curr_ip.patch
index 4fb50f4..4fb50f4 100644
--- a/3.6.2/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.6.3/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.6.2/4470_disable-compat_vdso.patch b/3.6.3/4470_disable-compat_vdso.patch
index 4a1947b..4a1947b 100644
--- a/3.6.2/4470_disable-compat_vdso.patch
+++ b/3.6.3/4470_disable-compat_vdso.patch