summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.2.51/1048_linux-3.2.49.patch')
-rw-r--r--3.2.51/1048_linux-3.2.49.patch2970
1 files changed, 2970 insertions, 0 deletions
diff --git a/3.2.51/1048_linux-3.2.49.patch b/3.2.51/1048_linux-3.2.49.patch
new file mode 100644
index 0000000..2dab0cf
--- /dev/null
+++ b/3.2.51/1048_linux-3.2.49.patch
@@ -0,0 +1,2970 @@
+diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
+index 475bb4a..65da157 100644
+--- a/Documentation/i2c/busses/i2c-piix4
++++ b/Documentation/i2c/busses/i2c-piix4
+@@ -8,7 +8,7 @@ Supported adapters:
+ Datasheet: Only available via NDA from ServerWorks
+ * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
+ Datasheet: Not publicly available
+- * AMD Hudson-2
++ * AMD Hudson-2, CZ
+ Datasheet: Not publicly available
+ * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
+ Datasheet: Publicly available at the SMSC website http://www.smsc.com
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 83f156e..8659eba 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -159,7 +159,7 @@ S: Maintained
+ F: drivers/net/ethernet/realtek/r8169.c
+
+ 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-serial@vger.kernel.org
+ W: http://serial.sourceforge.net
+ S: Maintained
+@@ -1781,9 +1781,9 @@ X: net/wireless/wext*
+
+ CHAR and MISC DRIVERS
+ M: Arnd Bergmann <arnd@arndb.de>
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+-S: Maintained
++S: Supported
+ F: drivers/char/*
+ F: drivers/misc/*
+
+@@ -2315,7 +2315,7 @@ F: lib/lru_cache.c
+ F: Documentation/blockdev/drbd/
+
+ DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
+ S: Supported
+ F: Documentation/kobject.txt
+@@ -6257,15 +6257,16 @@ S: Maintained
+ F: arch/alpha/kernel/srm_env.c
+
+ STABLE BRANCH
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: stable@vger.kernel.org
+-S: Maintained
++S: Supported
++F: Documentation/stable_kernel_rules.txt
+
+ STAGING SUBSYSTEM
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+ L: devel@driverdev.osuosl.org
+-S: Maintained
++S: Supported
+ F: drivers/staging/
+
+ STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS
+@@ -6654,8 +6655,8 @@ S: Maintained
+ K: ^Subject:.*(?i)trivial
+
+ TTY LAYER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
+-S: Maintained
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++S: Supported
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
+ F: drivers/tty/*
+ F: drivers/tty/serial/serial_core.c
+@@ -6943,7 +6944,7 @@ S: Maintained
+ F: drivers/usb/serial/digi_acceleport.c
+
+ USB SERIAL DRIVER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+ S: Supported
+ F: Documentation/usb/usb-serial.txt
+@@ -6958,9 +6959,8 @@ S: Maintained
+ F: drivers/usb/serial/empeg.c
+
+ USB SERIAL KEYSPAN DRIVER
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+-W: http://www.kroah.com/linux/
+ S: Maintained
+ F: drivers/usb/serial/*keyspan*
+
+@@ -6988,7 +6988,7 @@ F: Documentation/video4linux/sn9c102.txt
+ F: drivers/media/video/sn9c102/
+
+ USB SUBSYSTEM
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+ W: http://www.linux-usb.org
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
+@@ -7075,7 +7075,7 @@ F: fs/hppfs/
+
+ USERSPACE I/O (UIO)
+ M: "Hans J. Koch" <hjk@hansjkoch.de>
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ S: Maintained
+ F: Documentation/DocBook/uio-howto.tmpl
+ F: drivers/uio/
+diff --git a/Makefile b/Makefile
+index 299e2eb..2e3d791 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index a559ee7..778d248 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -795,6 +795,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ struct frame_tail __user *tail;
+
+
++ perf_callchain_store(entry, regs->ARM_pc);
+ tail = (struct frame_tail __user *)regs->ARM_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index fb9bb46..2c8890a 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -74,7 +74,7 @@
+ #endif
+
+ int boot_cpuid = 0;
+-int __initdata spinning_secondaries;
++int spinning_secondaries;
+ u64 ppc64_pft_size;
+
+ /* Pick defaults since we might want to patch instructions
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 054cc01..d50a821 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+ /* snapshots of runstate info */
+ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
+
+-/* unused ns of stolen and blocked time */
++/* unused ns of stolen time */
+ static DEFINE_PER_CPU(u64, xen_residual_stolen);
+-static DEFINE_PER_CPU(u64, xen_residual_blocked);
+
+ /* return an consistent snapshot of 64-bit time/counter value */
+ static u64 get64(const u64 *p)
+@@ -115,7 +114,7 @@ static void do_stolen_accounting(void)
+ {
+ struct vcpu_runstate_info state;
+ struct vcpu_runstate_info *snap;
+- s64 blocked, runnable, offline, stolen;
++ s64 runnable, offline, stolen;
+ cputime_t ticks;
+
+ get_runstate_snapshot(&state);
+@@ -125,7 +124,6 @@ static void do_stolen_accounting(void)
+ snap = &__get_cpu_var(xen_runstate_snapshot);
+
+ /* work out how much time the VCPU has not been runn*ing* */
+- blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
+ runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
+ offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
+
+@@ -141,17 +139,6 @@ static void do_stolen_accounting(void)
+ ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
+ __this_cpu_write(xen_residual_stolen, stolen);
+ account_steal_ticks(ticks);
+-
+- /* Add the appropriate number of ticks of blocked time,
+- including any left-overs from last time. */
+- blocked += __this_cpu_read(xen_residual_blocked);
+-
+- if (blocked < 0)
+- blocked = 0;
+-
+- ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
+- __this_cpu_write(xen_residual_blocked, blocked);
+- account_idle_ticks(ticks);
+ }
+
+ /* Get the TSC speed from Xen */
+diff --git a/block/genhd.c b/block/genhd.c
+index 6edf228..8bd4ef2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -519,7 +519,7 @@ void register_disk(struct gendisk *disk)
+
+ ddev->parent = disk->driverfs_dev;
+
+- dev_set_name(ddev, disk->disk_name);
++ dev_set_name(ddev, "%s", disk->disk_name);
+
+ /* delay uevents, until we scanned partition table */
+ dev_set_uevent_suppress(ddev, 1);
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 54dd4e3..dc9991f 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -477,7 +477,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
+
+ struct crypto_template *crypto_lookup_template(const char *name)
+ {
+- return try_then_request_module(__crypto_lookup_template(name), name);
++ return try_then_request_module(__crypto_lookup_template(name), "%s",
++ name);
+ }
+ EXPORT_SYMBOL_GPL(crypto_lookup_template);
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 87acc23..0445f52 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -302,6 +302,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -318,6 +319,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+
+ /* AMD */
+ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
++ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+ /* AMD is using RAID class only for ahci controllers */
+ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 0e92326..7a949af 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -360,6 +360,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (BayTrail) */
+ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
++ /* SATA Controller IDE (Coleto Creek) */
++ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
+ { } /* terminate list */
+ };
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 3c92dbd..60def03 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1541,8 +1541,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+- ata_link_online(&ap->pmp_link[pmp])) {
++ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 21b80c5..f63a588 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -389,9 +389,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ /* link reports offline after LPM */
+ link->flags |= ATA_LFLAG_NO_LPM;
+
+- /* Class code report is unreliable. */
++ /*
++ * Class code report is unreliable and SRST times
++ * out under certain configurations.
++ */
+ if (link->pmp < 5)
+- link->flags |= ATA_LFLAG_ASSUME_ATA;
++ link->flags |= ATA_LFLAG_NO_SRST |
++ ATA_LFLAG_ASSUME_ATA;
+
+ /* port 5 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 5)
+@@ -399,20 +403,17 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ ATA_LFLAG_ASSUME_SEMB;
+ }
+ } else if (vendor == 0x1095 && devid == 0x4723) {
+- /* sil4723 quirks */
+- ata_for_each_link(link, ap, EDGE) {
+- /* link reports offline after LPM */
+- link->flags |= ATA_LFLAG_NO_LPM;
+-
+- /* class code report is unreliable */
+- if (link->pmp < 2)
+- link->flags |= ATA_LFLAG_ASSUME_ATA;
+-
+- /* the config device at port 2 locks up on SRST */
+- if (link->pmp == 2)
+- link->flags |= ATA_LFLAG_NO_SRST |
+- ATA_LFLAG_ASSUME_ATA;
+- }
++ /*
++ * sil4723 quirks
++ *
++ * Link reports offline after LPM. Class code report is
++ * unreliable. SIMG PMPs never got SRST reliable and the
++ * config device at port 2 locks up on SRST.
++ */
++ ata_for_each_link(link, ap, EDGE)
++ link->flags |= ATA_LFLAG_NO_LPM |
++ ATA_LFLAG_NO_SRST |
++ ATA_LFLAG_ASSUME_ATA;
+ } else if (vendor == 0x1095 && devid == 0x4726) {
+ /* sil4726 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 40a0fcb..5fb6885 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -598,8 +598,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ if (!lo->sock)
+ return -EINVAL;
+
++ lo->disconnect = 1;
++
+ nbd_send_req(lo, &sreq);
+- return 0;
++ return 0;
+ }
+
+ case NBD_CLEAR_SOCK: {
+@@ -629,6 +631,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->sock = SOCKET_I(inode);
+ if (max_part > 0)
+ bdev->bd_invalidated = 1;
++ lo->disconnect = 0; /* we're connected now */
+ return 0;
+ } else {
+ fput(file);
+@@ -675,7 +678,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+
+ mutex_unlock(&lo->tx_lock);
+
+- thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
++ thread = kthread_create(nbd_thread, lo, "%s",
++ lo->disk->disk_name);
+ if (IS_ERR(thread)) {
+ mutex_lock(&lo->tx_lock);
+ return PTR_ERR(thread);
+@@ -700,6 +704,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ set_capacity(lo->disk, 0);
+ if (max_part > 0)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
++ if (lo->disconnect) /* user requested, ignore socket errors */
++ return 0;
+ return lo->harderror;
+ }
+
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 2678b6f..1331740 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2885,7 +2885,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
+ if (lba < 0)
+ return -EINVAL;
+
+- cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
++ cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
+ if (cgc->buffer == NULL)
+ return -ENOMEM;
+
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index e8eedb7..720cace 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -349,10 +349,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+- spin_lock_irqsave(&pch->lock, flags);
+-
+ tasklet_kill(&pch->task);
+
++ spin_lock_irqsave(&pch->lock, flags);
++
+ pl330_release_channel(pch->pl330_chid);
+ pch->pl330_chid = NULL;
+
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 8af25a0..810658e 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -383,7 +383,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ sizeof(u64));
+
+ /* Make sure we flush all writes before updating the writeIndex */
+- smp_wmb();
++ wmb();
+
+ /* Now, update the write location */
+ hv_set_next_write_location(outring_info, next_write_location);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index d2d0a2a..44442d5 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -466,7 +466,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+- smp_mb();
++ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 60f593c..dbd4fa5 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -137,6 +137,7 @@ config I2C_PIIX4
+ ATI SB700
+ ATI SB800
+ AMD Hudson-2
++ AMD CZ
+ Serverworks OSB4
+ Serverworks CSB5
+ Serverworks CSB6
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 6d14ac2..14b588c 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -22,7 +22,7 @@
+ Intel PIIX4, 440MX
+ Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
+ ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
+- AMD Hudson-2
++ AMD Hudson-2, CZ
+ SMSC Victory66
+
+ Note: we assume there can only be one device, with one SMBus interface.
+@@ -481,6 +481,7 @@ static const struct pci_device_id piix4_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_OSB4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 07cb1a6..6cc8e67 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1076,6 +1076,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+
+ /* Large PTE found which maps this address */
+ unmap_size = PTE_PAGE_SIZE(*pte);
++
++ /* Only unmap from the first pte in the page */
++ if ((unmap_size - 1) & bus_addr)
++ break;
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+@@ -1085,7 +1089,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ unmapped += unmap_size;
+ }
+
+- BUG_ON(!is_power_of_2(unmapped));
++ BUG_ON(unmapped && !is_power_of_2(unmapped));
+
+ return unmapped;
+ }
+diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
+index e4b5c03..4f8c3f7 100644
+--- a/drivers/media/dvb/dvb-core/dmxdev.c
++++ b/drivers/media/dvb/dvb-core/dmxdev.c
+@@ -380,10 +380,8 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
+ ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
+ buffer2_len);
+ }
+- if (ret < 0) {
+- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
++ if (ret < 0)
+ dmxdevfilter->buffer.error = ret;
+- }
+ if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
+ dmxdevfilter->state = DMXDEV_STATE_DONE;
+ spin_unlock(&dmxdevfilter->dev->lock);
+@@ -419,10 +417,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
+ ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ if (ret == buffer1_len)
+ ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
+- if (ret < 0) {
+- dvb_ringbuffer_flush(buffer);
++ if (ret < 0)
+ buffer->error = ret;
+- }
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up(&buffer->queue);
+ return 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index ed7a5a6..a3bd0ba 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5584,14 +5584,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ goto err_stop_0;
+ }
+
+- /* 8168evl does not automatically pad to minimum length. */
+- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+- skb->len < ETH_ZLEN)) {
+- if (skb_padto(skb, ETH_ZLEN))
+- goto err_update_stats;
+- skb_put(skb, ETH_ZLEN - skb->len);
+- }
+-
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
+index 48ab38a..02c939e 100644
+--- a/drivers/net/wan/dlci.c
++++ b/drivers/net/wan/dlci.c
+@@ -385,21 +385,37 @@ static int dlci_del(struct dlci_add *dlci)
+ struct frad_local *flp;
+ struct net_device *master, *slave;
+ int err;
++ bool found = false;
++
++ rtnl_lock();
+
+ /* validate slave device */
+ master = __dev_get_by_name(&init_net, dlci->devname);
+- if (!master)
+- return -ENODEV;
++ if (!master) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ list_for_each_entry(dlp, &dlci_devs, list) {
++ if (dlp->master == master) {
++ found = true;
++ break;
++ }
++ }
++ if (!found) {
++ err = -ENODEV;
++ goto out;
++ }
+
+ if (netif_running(master)) {
+- return -EBUSY;
++ err = -EBUSY;
++ goto out;
+ }
+
+ dlp = netdev_priv(master);
+ slave = dlp->slave;
+ flp = netdev_priv(slave);
+
+- rtnl_lock();
+ err = (*flp->deassoc)(slave, master);
+ if (!err) {
+ list_del(&dlp->list);
+@@ -408,8 +424,8 @@ static int dlci_del(struct dlci_add *dlci)
+
+ dev_put(slave);
+ }
++out:
+ rtnl_unlock();
+-
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+index 3b262ba..c41eb9d 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+@@ -3625,7 +3625,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+ {
+ int chain;
+- u32 regval;
++ u32 regval, value;
+ u32 ant_div_ctl1;
+ static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
+ AR_PHY_SWITCH_CHAIN_0,
+@@ -3633,7 +3633,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+ AR_PHY_SWITCH_CHAIN_2,
+ };
+
+- u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
++ if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
++ ath9k_hw_cfg_output(ah, AR9300_EXT_LNA_CTL_GPIO_AR9485,
++ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
++
++ value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+
+ if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462_10(ah)) {
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+index 4114fe7..4e9b71b 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+@@ -334,6 +334,8 @@
+
+ #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
+
++#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
++
+ /*
+ * AGC Field Definitions
+ */
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index bcabfbf..5e522e4 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -391,7 +391,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
+
+ if (!caldata) {
+ chan->noisefloor = nf;
+- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ return false;
+ }
+
+@@ -413,6 +412,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
++ ah->caldata->chanmode = chan->chanmode;
+ h = ah->caldata->nfCalHist;
+ default_nf = ath9k_hw_get_default_nf(ah, chan);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 2b8e957..c623527 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1540,7 +1540,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ if (caldata &&
+ (chan->channel != caldata->channel ||
+ (chan->channelFlags & ~CHANNEL_CW_INT) !=
+- (caldata->channelFlags & ~CHANNEL_CW_INT))) {
++ (caldata->channelFlags & ~CHANNEL_CW_INT) ||
++ chan->chanmode != caldata->chanmode)) {
+ /* Operating channel changed, reset channel calibration data */
+ memset(caldata, 0, sizeof(*caldata));
+ ath9k_init_nfcal_hist_buffer(ah, chan);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 0c65a09..dc774cd 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -352,6 +352,7 @@ struct ath9k_rtt_hist {
+ struct ath9k_hw_cal_data {
+ u16 channel;
+ u32 channelFlags;
++ u32 chanmode;
+ int32_t CalValid;
+ int8_t iCoff;
+ int8_t qCoff;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index df3e27c..a59267a 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1688,13 +1688,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+- /*
+- * Preserve the current channel values, before updating
+- * the same channel
+- */
+- if (ah->curchan && (old_pos == pos))
+- ath9k_hw_getnf(ah, ah->curchan);
+-
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
+
+diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
+index b97a40e..36a7ce3 100644
+--- a/drivers/net/wireless/b43/Kconfig
++++ b/drivers/net/wireless/b43/Kconfig
+@@ -28,12 +28,12 @@ config B43
+
+ config B43_BCMA
+ bool "Support for BCMA bus"
+- depends on B43 && BCMA
++ depends on B43 && (BCMA = y || BCMA = B43)
+ default y
+
+ config B43_SSB
+ bool
+- depends on B43 && SSB
++ depends on B43 && (SSB = y || SSB = B43)
+ default y
+
+ # Auto-select SSB PCI-HOST support, if possible
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+index 1e851aa..17a8e96 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ tx_agc[RF90_PATH_A] = 0x10101010;
+ tx_agc[RF90_PATH_B] = 0x10101010;
+ } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+- TXHIGHPWRLEVEL_LEVEL1) {
++ TXHIGHPWRLEVEL_LEVEL2) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+ } else{
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 0984dcf..016ef86 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -367,6 +367,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
++ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+ {}
+ };
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index cab24f7..f0c8c5d 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1123,6 +1123,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+
+ /*
+ * Serverworks CSB5 IDE does not fully support native mode
+diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
+index ea09ff2..5317d94 100644
+--- a/drivers/rtc/rtc-rv3029c2.c
++++ b/drivers/rtc/rtc-rv3029c2.c
+@@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
+ dev_dbg(&client->dev, "alarm IRQ armed\n");
+ } else {
+ /* disable AIE irq */
+- ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
++ ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 4f1b10b..3743ac9 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -3,7 +3,7 @@
+ *
+ * Module interface and handling of zfcp data structures.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ /*
+@@ -23,6 +23,7 @@
+ * Christof Schmitt
+ * Martin Petermann
+ * Sven Schuetz
++ * Steffen Maier
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -415,6 +416,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
++ adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
++
+ if (!zfcp_scsi_adapter_register(adapter))
+ return adapter;
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 8c849f0..8bfd579 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -3,7 +3,7 @@
+ *
+ * Implementation of FSF commands.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -455,11 +455,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+
+ fc_host_port_name(shost) = nsp->fl_wwpn;
+ fc_host_node_name(shost) = nsp->fl_wwnn;
+- fc_host_port_id(shost) = ntoh24(bottom->s_id);
+- fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+- adapter->hydra_version = bottom->adapter_type;
+ adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+ (u16)FSF_STATUS_READS_RECOM);
+@@ -467,6 +464,18 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ if (fc_host_permanent_port_name(shost) == -1)
+ fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+
++ zfcp_scsi_set_prot(adapter);
++
++ /* no error return above here, otherwise must fix call chains */
++ /* do not evaluate invalid fields */
++ if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
++ return 0;
++
++ fc_host_port_id(shost) = ntoh24(bottom->s_id);
++ fc_host_speed(shost) = bottom->fc_link_speed;
++
++ adapter->hydra_version = bottom->adapter_type;
++
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+@@ -488,8 +497,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ return -EIO;
+ }
+
+- zfcp_scsi_set_prot(adapter);
+-
+ return 0;
+ }
+
+@@ -534,8 +541,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+ adapter->hydra_version = 0;
+
++ /* avoids adapter shutdown to be able to recognize
++ * events such as LINK UP */
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
++ &adapter->status);
+ zfcp_fsf_link_down_info_eval(req,
+ &qtcb->header.fsf_status_qual.link_down_info);
++ if (zfcp_fsf_exchange_config_evaluate(req))
++ return;
+ break;
+ default:
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index b79576b..7b35364 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -311,8 +311,12 @@ static struct scsi_host_template zfcp_scsi_host_template = {
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+- .sg_tablesize = 1, /* adjusted later */
+- .max_sectors = 8, /* adjusted later */
++ .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
++ /* GCD, adjusted later */
++ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
++ /* GCD, adjusted later */
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 7c471eb..fc5a2ef 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -4886,10 +4886,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ sense, sense_handle);
+ }
+
+- for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
+- dma_free_coherent(&instance->pdev->dev,
+- kern_sge32[i].length,
+- kbuff_arr[i], kern_sge32[i].phys_addr);
++ for (i = 0; i < ioc->sge_count; i++) {
++ if (kbuff_arr[i])
++ dma_free_coherent(&instance->pdev->dev,
++ kern_sge32[i].length,
++ kbuff_arr[i],
++ kern_sge32[i].phys_addr);
+ }
+
+ megasas_return_cmd(instance, cmd);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 17de348..a11a909 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -79,10 +79,6 @@ static int msix_disable = -1;
+ module_param(msix_disable, int, 0);
+ MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+-static int missing_delay[2] = {-1, -1};
+-module_param_array(missing_delay, int, NULL, 0);
+-MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+-
+ static int mpt2sas_fwfault_debug;
+ MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+ "and halt firmware - (default=0)");
+@@ -2104,7 +2100,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+ }
+
+ /**
+- * _base_update_missing_delay - change the missing delay timers
++ * mpt2sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+@@ -2115,8 +2111,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+-static void
+-_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
++void
++mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+ {
+ u16 dmd, dmd_new, dmd_orignal;
+@@ -4302,9 +4298,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ if (r)
+ goto out_free_resources;
+
+- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+- _base_update_missing_delay(ioc, missing_delay[0],
+- missing_delay[1]);
+
+ return 0;
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
+index 3c3babc..aa4daf6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -1029,6 +1029,9 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
+
+ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
+
++void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
++ u16 device_missing_delay, u8 io_missing_delay);
++
+ int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
+ /* scsih shared API */
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 2824a90..987c6d6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -101,6 +101,10 @@ static ushort max_sectors = 0xFFFF;
+ module_param(max_sectors, ushort, 0);
+ MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+
++static int missing_delay[2] = {-1, -1};
++module_param_array(missing_delay, int, NULL, 0);
++MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
++
+ /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+ #define MPT2SAS_MAX_LUN (16895)
+ static int max_lun = MPT2SAS_MAX_LUN;
+@@ -3930,11 +3934,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+ else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+-/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
+-/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
+- */
+- mpi_control |= (0x500);
+-
++ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ /* Make sure Device is not raid volume.
+@@ -7006,11 +7006,14 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
++ u8 retry_count;
+
+ printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders start\n",
++ ioc->name);
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+@@ -7019,19 +7022,39 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+- else
++ else {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding expander: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding expander: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(expander_pg0.SASAddress));
++ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders complete\n",
++ ioc->name);
++
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices phys disk start\n", ioc->name);
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+@@ -7041,6 +7064,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+@@ -7050,17 +7080,46 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding phys disk: "
++ " handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+- _scsih_add_device(ioc, handle, 0, 1);
++ retry_count = 0;
++ /* This will retry adding the end device.
++ * _scsih_add_device() will decide on retries and
++ * return "1" when it should be retried
++ */
++ while (_scsih_add_device(ioc, handle, retry_count++,
++ 1)) {
++ ssleep(1);
++ }
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding phys disk: "
++ " handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: phys disk complete\n",
++ ioc->name);
++
++ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name);
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+@@ -7069,6 +7128,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+@@ -7078,18 +7144,38 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding volume: "
++ " handle (0x%04x)\n", ioc->name,
++ volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding volume: "
++ " handle (0x%04x)\n", ioc->name,
++ volume_pg1.DevHandle);
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes complete\n",
++ ioc->name);
++
+ skip_to_sas:
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices start\n",
++ ioc->name);
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+@@ -7099,6 +7185,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
++ " ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+@@ -7109,12 +7202,31 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+- _scsih_add_device(ioc, handle, 0, 0);
++ retry_count = 0;
++ /* This will retry adding the end device.
++ * _scsih_add_device() will decide on retries and
++ * return "1" when it should be retried
++ */
++ while (_scsih_add_device(ioc, handle, retry_count++,
++ 0)) {
++ ssleep(1);
++ }
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding end device: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices complete\n",
++ ioc->name);
++
+ printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
+ }
+
+@@ -7206,7 +7318,9 @@ _firmware_event_work(struct work_struct *work)
+ case MPT2SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+
+-
++ if (missing_delay[0] != -1 && missing_delay[1] != -1)
++ mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
++ missing_delay[1]);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+ "from worker thread\n", ioc->name));
+diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
+index d4ed9eb..caac1b2 100644
+--- a/drivers/scsi/osd/osd_uld.c
++++ b/drivers/scsi/osd/osd_uld.c
+@@ -465,7 +465,7 @@ static int osd_probe(struct device *dev)
+ oud->class_dev.class = &osd_uld_class;
+ oud->class_dev.parent = dev;
+ oud->class_dev.release = __remove;
+- error = dev_set_name(&oud->class_dev, disk->disk_name);
++ error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
+ if (error) {
+ OSD_ERR("dev_set_name failed => %d\n", error);
+ goto err_put_cdev;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f44d633..6dace1a 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -138,6 +138,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ char *buffer_data;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
++ static const char temp[] = "temporary ";
+ int len;
+
+ if (sdp->type != TYPE_DISK)
+@@ -146,6 +147,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ * it's not worth the risk */
+ return -EINVAL;
+
++ if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
++ buf += sizeof(temp) - 1;
++ sdkp->cache_override = 1;
++ } else {
++ sdkp->cache_override = 0;
++ }
++
+ for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
+ len = strlen(sd_cache_types[i]);
+ if (strncmp(sd_cache_types[i], buf, len) == 0 &&
+@@ -158,6 +166,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ return -EINVAL;
+ rcd = ct & 0x01 ? 1 : 0;
+ wce = ct & 0x02 ? 1 : 0;
++
++ if (sdkp->cache_override) {
++ sdkp->WCE = wce;
++ sdkp->RCD = rcd;
++ return count;
++ }
++
+ if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, NULL))
+ return -EINVAL;
+@@ -2037,6 +2052,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ int old_rcd = sdkp->RCD;
+ int old_dpofua = sdkp->DPOFUA;
+
++
++ if (sdkp->cache_override)
++ return;
++
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+@@ -2518,6 +2537,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
+ sdkp->capacity = 0;
+ sdkp->media_present = 1;
+ sdkp->write_prot = 0;
++ sdkp->cache_override = 0;
+ sdkp->WCE = 0;
+ sdkp->RCD = 0;
+ sdkp->ATO = 0;
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 4163f29..e3e3cd2 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -64,6 +64,7 @@ struct scsi_disk {
+ u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
+ unsigned ATO : 1; /* state of disk ATO bit */
++ unsigned cache_override : 1; /* temp override of WCE,RCD */
+ unsigned WCE : 1; /* state of disk WCE bit */
+ unsigned RCD : 1; /* state of disk RCD bit, unused */
+ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 2594a31..926d483 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -541,13 +541,20 @@ out:
+ */
+ static inline int valid_io_request(struct zram *zram, struct bio *bio)
+ {
+- if (unlikely(
+- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
+- (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
+- (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
++ u64 start, end, bound;
++
++ /* unaligned request */
++ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
++ return 0;
++ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
++ return 0;
+
++ start = bio->bi_sector;
++ end = start + (bio->bi_size >> SECTOR_SHIFT);
++ bound = zram->disksize >> SECTOR_SHIFT;
++ /* out of range range */
++ if (unlikely(start >= bound || end >= bound || start > end))
+ return 0;
+- }
+
+ /* I/O request is valid */
+ return 1;
+@@ -702,7 +709,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ struct zram *zram;
+
+ zram = bdev->bd_disk->private_data;
++ down_write(&zram->lock);
+ zram_free_page(zram, index);
++ up_write(&zram->lock);
+ zram_stat64_inc(zram, &zram->stats.notify_free);
+ }
+
+@@ -713,7 +722,7 @@ static const struct block_device_operations zram_devops = {
+
+ static int create_device(struct zram *zram, int device_id)
+ {
+- int ret = 0;
++ int ret = -ENOMEM;
+
+ init_rwsem(&zram->lock);
+ init_rwsem(&zram->init_lock);
+@@ -723,7 +732,6 @@ static int create_device(struct zram *zram, int device_id)
+ if (!zram->queue) {
+ pr_err("Error allocating disk queue for device %d\n",
+ device_id);
+- ret = -ENOMEM;
+ goto out;
+ }
+
+@@ -733,11 +741,9 @@ static int create_device(struct zram *zram, int device_id)
+ /* gendisk structure */
+ zram->disk = alloc_disk(1);
+ if (!zram->disk) {
+- blk_cleanup_queue(zram->queue);
+ pr_warning("Error allocating disk structure for device %d\n",
+ device_id);
+- ret = -ENOMEM;
+- goto out;
++ goto out_free_queue;
+ }
+
+ zram->disk->major = zram_major;
+@@ -766,11 +772,17 @@ static int create_device(struct zram *zram, int device_id)
+ &zram_disk_attr_group);
+ if (ret < 0) {
+ pr_warning("Error creating sysfs group");
+- goto out;
++ goto out_free_disk;
+ }
+
+ zram->init_done = 0;
++ return 0;
+
++out_free_disk:
++ del_gendisk(zram->disk);
++ put_disk(zram->disk);
++out_free_queue:
++ blk_cleanup_queue(zram->queue);
+ out:
+ return ret;
+ }
+@@ -846,9 +858,11 @@ static void __exit zram_exit(void)
+ for (i = 0; i < zram_num_devices; i++) {
+ zram = &zram_devices[i];
+
++ get_disk(zram->disk);
+ destroy_device(zram);
+ if (zram->init_done)
+ zram_reset_device(zram);
++ put_disk(zram->disk);
+ }
+
+ unregister_blkdev(zram_major, "zram");
+diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
+index e5cd246..87f2fec 100644
+--- a/drivers/staging/zram/zram_drv.h
++++ b/drivers/staging/zram/zram_drv.h
+@@ -107,8 +107,9 @@ struct zram {
+ void *compress_buffer;
+ struct table *table;
+ spinlock_t stat64_lock; /* protect 64-bit stats */
+- struct rw_semaphore lock; /* protect compression buffers and table
+- * against concurrent read and writes */
++ struct rw_semaphore lock; /* protect compression buffers, table,
++ * 32bit stat counters against concurrent
++ * notifications, reads and writes */
+ struct request_queue *queue;
+ struct gendisk *disk;
+ int init_done;
+diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
+index 0ea8ed2..1fae1e9 100644
+--- a/drivers/staging/zram/zram_sysfs.c
++++ b/drivers/staging/zram/zram_sysfs.c
+@@ -186,10 +186,12 @@ static ssize_t mem_used_total_show(struct device *dev,
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+
++ down_read(&zram->init_lock);
+ if (zram->init_done) {
+ val = xv_get_total_size_bytes(zram->mem_pool) +
+ ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
+ }
++ up_read(&zram->init_lock);
+
+ return sprintf(buf, "%llu\n", val);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 83dcf49..3b80285 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -419,7 +419,7 @@ static ssize_t __iscsi_##prefix##_store_##name( \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+- snprintf(auth->name, PAGE_SIZE, "%s", page); \
++ snprintf(auth->name, sizeof(auth->name), "%s", page); \
+ if (!strncmp("NULL", auth->name, 4)) \
+ auth->naf_flags &= ~flags; \
+ else \
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 6986256..6c9bcdf 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -4083,10 +4083,6 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_VENDOR_ID_IBM, 0x0299,
+ 0, 0, pbn_b0_bt_2_115200 },
+
+- { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
+- 0x1000, 0x0012,
+- 0, 0, pbn_b0_bt_2_115200 },
+-
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 8d70fbc..c0b4872 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -940,22 +940,37 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
+ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+ {
+ u8 fcr = ioread8(priv->membase + UART_FCR);
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++ char *error_msg[5] = {};
++ int i = 0;
+
+ /* Reset FIFO */
+ fcr |= UART_FCR_CLEAR_RCVR;
+ iowrite8(fcr, priv->membase + UART_FCR);
+
+ if (lsr & PCH_UART_LSR_ERR)
+- dev_err(&priv->pdev->dev, "Error data in FIFO\n");
++ error_msg[i++] = "Error data in FIFO\n";
+
+- if (lsr & UART_LSR_FE)
+- dev_err(&priv->pdev->dev, "Framing Error\n");
++ if (lsr & UART_LSR_FE) {
++ port->icount.frame++;
++ error_msg[i++] = " Framing Error\n";
++ }
+
+- if (lsr & UART_LSR_PE)
+- dev_err(&priv->pdev->dev, "Parity Error\n");
++ if (lsr & UART_LSR_PE) {
++ port->icount.parity++;
++ error_msg[i++] = " Parity Error\n";
++ }
+
+- if (lsr & UART_LSR_OE)
+- dev_err(&priv->pdev->dev, "Overrun Error\n");
++ if (lsr & UART_LSR_OE) {
++ port->icount.overrun++;
++ error_msg[i++] = " Overrun Error\n";
++ }
++
++ if (tty == NULL) {
++ for (i = 0; error_msg[i] != NULL; i++)
++ dev_err(&priv->pdev->dev, error_msg[i]);
++ }
+ }
+
+ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
+index a5570b6..8d7fb6b 100644
+--- a/drivers/usb/gadget/f_mass_storage.c
++++ b/drivers/usb/gadget/f_mass_storage.c
+@@ -512,6 +512,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+ /* Caller must hold fsg->lock */
+ static void wakeup_thread(struct fsg_common *common)
+ {
++ smp_wmb(); /* ensure the write of bh->state is complete */
+ /* Tell the main thread that something has happened */
+ common->thread_wakeup_needed = 1;
+ if (common->thread_task)
+@@ -731,6 +732,7 @@ static int sleep_thread(struct fsg_common *common)
+ }
+ __set_current_state(TASK_RUNNING);
+ common->thread_wakeup_needed = 0;
++ smp_rmb(); /* ensure the latest bh->state is visible */
+ return rc;
+ }
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5018e33..ec73541 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -271,6 +271,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
+ ctx->size += CTX_SIZE(xhci->hcc_params);
+
+ ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
++ if (!ctx->bytes) {
++ kfree(ctx);
++ return NULL;
++ }
+ memset(ctx->bytes, 0, ctx->size);
+ return ctx;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8ea37bc..b8365a7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -159,8 +159,6 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362 0x9010
+-#define NOVATELWIRELESS_PRODUCT_G1 0xA001
+-#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+
+@@ -744,8 +742,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 5535c3a..e8c4f0c 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -37,7 +37,13 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
+ {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+ {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
+- {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
++ {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
+ {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+ {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 5855d17..9d8feac 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -42,6 +42,7 @@
+ #include <linux/kd.h>
+ #include <linux/slab.h>
+ #include <linux/vt_kern.h>
++#include <linux/sched.h>
+ #include <linux/selection.h>
+ #include <linux/spinlock.h>
+ #include <linux/ioport.h>
+@@ -1124,11 +1125,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+
+ if (arg) {
+ if (set)
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ vga_writeb(arg[i], charmap + i);
++ cond_resched();
++ }
+ else
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ arg[i] = vga_readb(charmap + i);
++ cond_resched();
++ }
+
+ /*
+ * In 512-character mode, the character map is not contiguous if
+@@ -1139,11 +1144,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ charmap += 2 * cmapsz;
+ arg += cmapsz;
+ if (set)
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ vga_writeb(arg[i], charmap + i);
++ cond_resched();
++ }
+ else
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ arg[i] = vga_readb(charmap + i);
++ cond_resched();
++ }
+ }
+ }
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 53ab273..c103267 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -55,17 +55,24 @@ static void bdev_inode_switch_bdi(struct inode *inode,
+ struct backing_dev_info *dst)
+ {
+ struct backing_dev_info *old = inode->i_data.backing_dev_info;
++ bool wakeup_bdi = false;
+
+ if (unlikely(dst == old)) /* deadlock avoidance */
+ return;
+ bdi_lock_two(&old->wb, &dst->wb);
+ spin_lock(&inode->i_lock);
+ inode->i_data.backing_dev_info = dst;
+- if (inode->i_state & I_DIRTY)
++ if (inode->i_state & I_DIRTY) {
++ if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
++ wakeup_bdi = true;
+ list_move(&inode->i_wb_list, &dst->wb.b_dirty);
++ }
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&old->wb.list_lock);
+ spin_unlock(&dst->wb.list_lock);
++
++ if (wakeup_bdi)
++ bdi_wakeup_thread_delayed(dst);
+ }
+
+ sector_t blkdev_max_block(struct block_device *bdev)
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index b48f15f..de268a8 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -70,8 +70,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ /*
+ * express utilization in terms of large blocks to avoid
+ * overflow on 32-bit machines.
++ *
++ * NOTE: for the time being, we make bsize == frsize to humor
++ * not-yet-ancient versions of glibc that are broken.
++ * Someday, we will probably want to report a real block
++ * size... whatever that may mean for a network file system!
+ */
+ buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
++ buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
+ buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+@@ -79,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_files = le64_to_cpu(st.num_objects);
+ buf->f_ffree = -1;
+ buf->f_namelen = NAME_MAX;
+- buf->f_frsize = PAGE_CACHE_SIZE;
+
+ /* leave fsid little-endian, regardless of host endianness */
+ fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index edcbf37..a097817 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -21,7 +21,7 @@
+
+ /* large granularity for statfs utilization stats to facilitate
+ * large volume sizes on 32-bit machines. */
+-#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
++#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
+ #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+
+ #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+index 6d02fd5..aab18fe 100644
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -323,14 +323,14 @@ UniToupper(register wchar_t uc)
+ /*
+ * UniStrupr: Upper case a unicode string
+ */
+-static inline wchar_t *
+-UniStrupr(register wchar_t *upin)
++static inline __le16 *
++UniStrupr(register __le16 *upin)
+ {
+- register wchar_t *up;
++ register __le16 *up;
+
+ up = upin;
+ while (*up) { /* For all characters */
+- *up = UniToupper(*up);
++ *up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
+ up++;
+ }
+ return upin; /* Return input pointer */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 5d9b9ac..cdcd665 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -394,7 +394,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ int rc = 0;
+ int len;
+ char nt_hash[CIFS_NTHASH_SIZE];
+- wchar_t *user;
++ __le16 *user;
+ wchar_t *domain;
+ wchar_t *server;
+
+@@ -419,7 +419,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ return rc;
+ }
+
+- /* convert ses->user_name to unicode and uppercase */
++ /* convert ses->user_name to unicode */
+ len = strlen(ses->user_name);
+ user = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if (user == NULL) {
+@@ -427,7 +427,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ rc = -ENOMEM;
+ return rc;
+ }
+- len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
++ len = cifs_strtoUCS(user, ses->user_name, len, nls_cp);
+ UniStrupr(user);
+
+ rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
+diff --git a/fs/exec.c b/fs/exec.c
+index 312e297..a2d0e51 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1159,13 +1159,6 @@ void setup_new_exec(struct linux_binprm * bprm)
+ set_dumpable(current->mm, suid_dumpable);
+ }
+
+- /*
+- * Flush performance counters when crossing a
+- * security domain:
+- */
+- if (!get_dumpable(current->mm))
+- perf_event_exit_task(current);
+-
+ /* An exec changes our domain. We are no longer part of the thread
+ group */
+
+@@ -1229,6 +1222,15 @@ void install_exec_creds(struct linux_binprm *bprm)
+
+ commit_creds(bprm->cred);
+ bprm->cred = NULL;
++
++ /*
++ * Disable monitoring for regular users
++ * when executing setuid binaries. Must
++ * wait until new credentials are committed
++ * by commit_creds() above
++ */
++ if (get_dumpable(current->mm) != SUID_DUMP_USER)
++ perf_event_exit_task(current);
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+ * ptrace_attach() from altering our determination of the task's
+diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
+index 642dc6d..1272dfb 100644
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+ (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+ +((char *)de - bh->b_data))) {
+- /* On error, skip the f_pos to the next block. */
+- dir_file->f_pos = (dir_file->f_pos |
+- (dir->i_sb->s_blocksize - 1)) + 1;
+- brelse (bh);
+- return count;
++ /* silently ignore the rest of the block */
++ break;
+ }
+ ext3fs_dirhash(de->name, de->name_len, hinfo);
+ if ((hinfo->hash < start_hash) ||
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ce0bc25..3e8fc80 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4801,7 +4801,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ return error;
+- physical = iloc.bh->b_blocknr << blockbits;
++ physical = (__u64)iloc.bh->b_blocknr << blockbits;
+ offset = EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize;
+ physical += offset;
+@@ -4809,7 +4809,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
+ } else { /* external block */
+- physical = EXT4_I(inode)->i_file_acl << blockbits;
++ physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
+ length = inode->i_sb->s_blocksize;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 025b4b6..45778a6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4335,7 +4335,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+ {
+ struct inode *inode;
+- unsigned long delalloc_blocks;
++ unsigned long long delalloc_blocks;
+
+ inode = dentry->d_inode;
+ generic_fillattr(inode, stat);
+@@ -4352,7 +4352,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ */
+ delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+
+- stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
++ stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
+ return 0;
+ }
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 88f97e5..3ca3b7f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ + ((char *)de - bh->b_data))) {
+- /* On error, skip the f_pos to the next block. */
+- dir_file->f_pos = (dir_file->f_pos |
+- (dir->i_sb->s_blocksize - 1)) + 1;
+- brelse(bh);
+- return count;
++ /* silently ignore the rest of the block */
++ break;
+ }
+ ext4fs_dirhash(de->name, de->name_len, hinfo);
+ if ((hinfo->hash < start_hash) ||
+diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
+index a790821..ea3d1ca 100644
+--- a/fs/hpfs/map.c
++++ b/fs/hpfs/map.c
+@@ -17,7 +17,8 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+ struct quad_buffer_head *qbh, char *id)
+ {
+ secno sec;
+- if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
++ unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
++ if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
+ hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
+ return NULL;
+ }
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 98580a3..f760c15 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -553,7 +553,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+ sbi->sb_cp_table = NULL;
+ sbi->sb_c_bitmap = -1;
+ sbi->sb_max_fwd_alloc = 0xffffff;
+-
++
++ if (sbi->sb_fs_size >= 0x80000000) {
++ hpfs_error(s, "invalid size in superblock: %08x",
++ (unsigned)sbi->sb_fs_size);
++ goto bail4;
++ }
++
+ /* Load bitmap directory */
+ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
+ goto bail4;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6ac5bb1..18ea4d9 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -470,10 +470,10 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
+ &transaction->t_outstanding_credits);
+ if (atomic_dec_and_test(&transaction->t_updates))
+ wake_up(&journal->j_wait_updates);
++ tid = transaction->t_tid;
+ spin_unlock(&transaction->t_handle_lock);
+
+ jbd_debug(2, "restarting handle %p\n", handle);
+- tid = transaction->t_tid;
+ need_to_start = !tid_geq(journal->j_commit_request, tid);
+ read_unlock(&journal->j_state_lock);
+ if (need_to_start)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ade5316..99625b8 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -177,8 +177,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
+ */
+ memcpy(p, argp->p, avail);
+ /* step to next page */
+- argp->p = page_address(argp->pagelist[0]);
+ argp->pagelist++;
++ argp->p = page_address(argp->pagelist[0]);
+ if (argp->pagelen < PAGE_SIZE) {
+ argp->end = argp->p + (argp->pagelen>>2);
+ argp->pagelen = 0;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 0d5ea9c..bef187b 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -6499,6 +6499,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
+ }
+
+ new_oi = OCFS2_I(args->new_inode);
++ /*
++ * Adjust extent record count to reserve space for extended attribute.
++ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
++ */
++ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
++ !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
++ struct ocfs2_extent_list *el = &new_di->id2.i_list;
++ le16_add_cpu(&el->l_count, -(inline_size /
++ sizeof(struct ocfs2_extent_rec)));
++ }
+ spin_lock(&new_oi->ip_lock);
+ new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
+ new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 6834920..aaebf0f 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type)
+ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ {
+ int err, over = 0;
++ loff_t pos = file->f_pos;
+ struct qstr nm;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent;
+ struct inode *dir = file->f_path.dentry->d_inode;
+ struct ubifs_info *c = dir->i_sb->s_fs_info;
+
+- dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
++ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
+
+- if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
++ if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
+ /*
+ * The directory was seek'ed to a senseless position or there
+ * are no more entries.
+ */
+ return 0;
+
++ if (file->f_version == 0) {
++ /*
++ * The file was seek'ed, which means that @file->private_data
++ * is now invalid. This may also be just the first
++ * 'ubifs_readdir()' invocation, in which case
++ * @file->private_data is NULL, and the below code is
++ * basically a no-op.
++ */
++ kfree(file->private_data);
++ file->private_data = NULL;
++ }
++
++ /*
++ * 'generic_file_llseek()' unconditionally sets @file->f_version to
++ * zero, and we use this for detecting whether the file was seek'ed.
++ */
++ file->f_version = 1;
++
+ /* File positions 0 and 1 correspond to "." and ".." */
+- if (file->f_pos == 0) {
++ if (pos == 0) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
+ if (over)
+ return 0;
+- file->f_pos = 1;
++ file->f_pos = pos = 1;
+ }
+
+- if (file->f_pos == 1) {
++ if (pos == 1) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, "..", 2, 1,
+ parent_ino(file->f_path.dentry), DT_DIR);
+@@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ goto out;
+ }
+
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+@@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ if (!dent) {
+ /*
+ * The directory was seek'ed to and is now readdir'ed.
+- * Find the entry corresponding to @file->f_pos or the
+- * closest one.
++ * Find the entry corresponding to @pos or the closest one.
+ */
+- dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
++ dent_key_init_hash(c, &key, dir->i_ino, pos);
+ nm.name = NULL;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+@@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ ubifs_inode(dir)->creat_sqnum);
+
+ nm.len = le16_to_cpu(dent->nlen);
+- over = filldir(dirent, dent->name, nm.len, file->f_pos,
++ over = filldir(dirent, dent->name, nm.len, pos,
+ le64_to_cpu(dent->inum),
+ vfs_dent_type(dent->type));
+ if (over)
+@@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ }
+
+ kfree(file->private_data);
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ cond_resched();
++
++ if (file->f_version == 0)
++ /*
++ * The file was seek'ed meanwhile, lets return and start
++ * reading direntries from the new position on the next
++ * invocation.
++ */
++ return 0;
+ }
+
+ out:
+@@ -456,15 +482,13 @@ out:
+
+ kfree(file->private_data);
+ file->private_data = NULL;
++ /* 2 is a special value indicating that there are no more direntries */
+ file->f_pos = 2;
+ return 0;
+ }
+
+-/* If a directory is seeked, we have to free saved readdir() state */
+ static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
+ {
+- kfree(file->private_data);
+- file->private_data = NULL;
+ return generic_file_llseek(file, offset, origin);
+ }
+
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 9bab75f..ace0984 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -531,16 +531,54 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
+ return cgrp->subsys[subsys_id];
+ }
+
+-/*
+- * function to get the cgroup_subsys_state which allows for extra
+- * rcu_dereference_check() conditions, such as locks used during the
+- * cgroup_subsys::attach() methods.
++/**
++ * task_css_set_check - obtain a task's css_set with extra access conditions
++ * @task: the task to obtain css_set for
++ * @__c: extra condition expression to be passed to rcu_dereference_check()
++ *
++ * A task's css_set is RCU protected, initialized and exited while holding
++ * task_lock(), and can only be modified while holding both cgroup_mutex
++ * and task_lock() while the task is alive. This macro verifies that the
++ * caller is inside proper critical section and returns @task's css_set.
++ *
++ * The caller can also specify additional allowed conditions via @__c, such
++ * as locks used during the cgroup_subsys::attach() methods.
++ */
++#define task_css_set_check(task, __c) \
++ rcu_dereference_check((task)->cgroups, \
++ lockdep_is_held(&(task)->alloc_lock) || \
++ cgroup_lock_is_held() || (__c))
++
++/**
++ * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
++ * @task: the target task
++ * @subsys_id: the target subsystem ID
++ * @__c: extra condition expression to be passed to rcu_dereference_check()
++ *
++ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
++ * synchronization rules are the same as task_css_set_check().
+ */
+ #define task_subsys_state_check(task, subsys_id, __c) \
+- rcu_dereference_check(task->cgroups->subsys[subsys_id], \
+- lockdep_is_held(&task->alloc_lock) || \
+- cgroup_lock_is_held() || (__c))
++ task_css_set_check((task), (__c))->subsys[(subsys_id)]
++
++/**
++ * task_css_set - obtain a task's css_set
++ * @task: the task to obtain css_set for
++ *
++ * See task_css_set_check().
++ */
++static inline struct css_set *task_css_set(struct task_struct *task)
++{
++ return task_css_set_check(task, false);
++}
+
++/**
++ * task_subsys_state - obtain css for (task, subsys)
++ * @task: the target task
++ * @subsys_id: the target subsystem ID
++ *
++ * See task_subsys_state_check().
++ */
+ static inline struct cgroup_subsys_state *
+ task_subsys_state(struct task_struct *task, int subsys_id)
+ {
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index a2227f7..32697c1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -327,6 +327,17 @@ static inline unsigned hstate_index_to_shift(unsigned index)
+ return hstates[index].order + PAGE_SHIFT;
+ }
+
++pgoff_t __basepage_index(struct page *page);
++
++/* Return page->index in PAGE_SIZE units */
++static inline pgoff_t basepage_index(struct page *page)
++{
++ if (!PageCompound(page))
++ return page->index;
++
++ return __basepage_index(page);
++}
++
+ #else
+ struct hstate {};
+ #define alloc_huge_page_node(h, nid) NULL
+@@ -345,6 +356,11 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
+ return 1;
+ }
+ #define hstate_index_to_shift(index) 0
++
++static inline pgoff_t basepage_index(struct page *page)
++{
++ return page->index;
++}
+ #endif
+
+ #endif /* _LINUX_HUGETLB_H */
+diff --git a/include/linux/nbd.h b/include/linux/nbd.h
+index d146ca1..e6fe174 100644
+--- a/include/linux/nbd.h
++++ b/include/linux/nbd.h
+@@ -68,6 +68,7 @@ struct nbd_device {
+ u64 bytesize;
+ pid_t pid; /* pid of nbd-client, if attached */
+ int xmit_timeout;
++ int disconnect; /* a disconnect has been requested by user */
+ };
+
+ #endif
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 9b9b2aa..3cfcfea 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -819,8 +819,7 @@ struct perf_event {
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+- int mmap_locked;
+- struct user_struct *mmap_user;
++
+ struct ring_buffer *rb;
+ struct list_head rb_entry;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 9f21915..8be9b746 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -185,9 +185,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+ static void update_context_time(struct perf_event_context *ctx);
+ static u64 perf_event_time(struct perf_event *event);
+
+-static void ring_buffer_attach(struct perf_event *event,
+- struct ring_buffer *rb);
+-
+ void __weak perf_event_print_debug(void) { }
+
+ extern __weak const char *perf_pmu_name(void)
+@@ -714,8 +711,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
+ {
+ struct perf_event_context *ctx;
+
+- rcu_read_lock();
+ retry:
++ /*
++ * One of the few rules of preemptible RCU is that one cannot do
++ * rcu_read_unlock() while holding a scheduler (or nested) lock when
++ * part of the read side critical section was preemptible -- see
++ * rcu_read_unlock_special().
++ *
++ * Since ctx->lock nests under rq->lock we must ensure the entire read
++ * side critical section is non-preemptible.
++ */
++ preempt_disable();
++ rcu_read_lock();
+ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
+ if (ctx) {
+ /*
+@@ -731,6 +738,8 @@ retry:
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
+ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
++ rcu_read_unlock();
++ preempt_enable();
+ goto retry;
+ }
+
+@@ -740,6 +749,7 @@ retry:
+ }
+ }
+ rcu_read_unlock();
++ preempt_enable();
+ return ctx;
+ }
+
+@@ -1687,7 +1697,16 @@ static int __perf_event_enable(void *info)
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ int err;
+
+- if (WARN_ON_ONCE(!ctx->is_active))
++ /*
++ * There's a time window between 'ctx->is_active' check
++ * in perf_event_enable function and this place having:
++ * - IRQs on
++ * - ctx->lock unlocked
++ *
++ * where the task could be killed and 'ctx' deactivated
++ * by perf_event_exit_task.
++ */
++ if (!ctx->is_active)
+ return -EINVAL;
+
+ raw_spin_lock(&ctx->lock);
+@@ -2939,6 +2958,7 @@ static void free_event_rcu(struct rcu_head *head)
+ }
+
+ static void ring_buffer_put(struct ring_buffer *rb);
++static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+
+ static void free_event(struct perf_event *event)
+ {
+@@ -2962,8 +2982,22 @@ static void free_event(struct perf_event *event)
+ }
+
+ if (event->rb) {
+- ring_buffer_put(event->rb);
+- event->rb = NULL;
++ struct ring_buffer *rb;
++
++ /*
++ * Can happen when we close an event with re-directed output.
++ *
++ * Since we have a 0 refcount, perf_mmap_close() will skip
++ * over us; possibly making our ring_buffer_put() the last.
++ */
++ mutex_lock(&event->mmap_mutex);
++ rb = event->rb;
++ if (rb) {
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ ring_buffer_put(rb); /* could be last */
++ }
++ mutex_unlock(&event->mmap_mutex);
+ }
+
+ if (is_cgroup_event(event))
+@@ -3201,30 +3235,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
+ unsigned int events = POLL_HUP;
+
+ /*
+- * Race between perf_event_set_output() and perf_poll(): perf_poll()
+- * grabs the rb reference but perf_event_set_output() overrides it.
+- * Here is the timeline for two threads T1, T2:
+- * t0: T1, rb = rcu_dereference(event->rb)
+- * t1: T2, old_rb = event->rb
+- * t2: T2, event->rb = new rb
+- * t3: T2, ring_buffer_detach(old_rb)
+- * t4: T1, ring_buffer_attach(rb1)
+- * t5: T1, poll_wait(event->waitq)
+- *
+- * To avoid this problem, we grab mmap_mutex in perf_poll()
+- * thereby ensuring that the assignment of the new ring buffer
+- * and the detachment of the old buffer appear atomic to perf_poll()
++ * Pin the event->rb by taking event->mmap_mutex; otherwise
++ * perf_event_set_output() can swizzle our rb and make us miss wakeups.
+ */
+ mutex_lock(&event->mmap_mutex);
+-
+- rcu_read_lock();
+- rb = rcu_dereference(event->rb);
+- if (rb) {
+- ring_buffer_attach(event, rb);
++ rb = event->rb;
++ if (rb)
+ events = atomic_xchg(&rb->poll, 0);
+- }
+- rcu_read_unlock();
+-
+ mutex_unlock(&event->mmap_mutex);
+
+ poll_wait(file, &event->waitq, wait);
+@@ -3538,16 +3555,12 @@ static void ring_buffer_attach(struct perf_event *event,
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+- if (!list_empty(&event->rb_entry))
+- goto unlock;
+-
+- list_add(&event->rb_entry, &rb->event_list);
+-unlock:
++ if (list_empty(&event->rb_entry))
++ list_add(&event->rb_entry, &rb->event_list);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+ }
+
+-static void ring_buffer_detach(struct perf_event *event,
+- struct ring_buffer *rb)
++static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
+ {
+ unsigned long flags;
+
+@@ -3566,13 +3579,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+- if (!rb)
+- goto unlock;
+-
+- list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+- wake_up_all(&event->waitq);
+-
+-unlock:
++ if (rb) {
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
++ wake_up_all(&event->waitq);
++ }
+ rcu_read_unlock();
+ }
+
+@@ -3601,18 +3611,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
+
+ static void ring_buffer_put(struct ring_buffer *rb)
+ {
+- struct perf_event *event, *n;
+- unsigned long flags;
+-
+ if (!atomic_dec_and_test(&rb->refcount))
+ return;
+
+- spin_lock_irqsave(&rb->event_lock, flags);
+- list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+- list_del_init(&event->rb_entry);
+- wake_up_all(&event->waitq);
+- }
+- spin_unlock_irqrestore(&rb->event_lock, flags);
++ WARN_ON_ONCE(!list_empty(&rb->event_list));
+
+ call_rcu(&rb->rcu_head, rb_free_rcu);
+ }
+@@ -3622,26 +3624,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
+ struct perf_event *event = vma->vm_file->private_data;
+
+ atomic_inc(&event->mmap_count);
++ atomic_inc(&event->rb->mmap_count);
+ }
+
++/*
++ * A buffer can be mmap()ed multiple times; either directly through the same
++ * event, or through other events by use of perf_event_set_output().
++ *
++ * In order to undo the VM accounting done by perf_mmap() we need to destroy
++ * the buffer here, where we still have a VM context. This means we need
++ * to detach all events redirecting to us.
++ */
+ static void perf_mmap_close(struct vm_area_struct *vma)
+ {
+ struct perf_event *event = vma->vm_file->private_data;
+
+- if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
+- unsigned long size = perf_data_size(event->rb);
+- struct user_struct *user = event->mmap_user;
+- struct ring_buffer *rb = event->rb;
++ struct ring_buffer *rb = event->rb;
++ struct user_struct *mmap_user = rb->mmap_user;
++ int mmap_locked = rb->mmap_locked;
++ unsigned long size = perf_data_size(rb);
++
++ atomic_dec(&rb->mmap_count);
++
++ if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
++ return;
++
++ /* Detach current event from the buffer. */
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ mutex_unlock(&event->mmap_mutex);
++
++ /* If there's still other mmap()s of this buffer, we're done. */
++ if (atomic_read(&rb->mmap_count)) {
++ ring_buffer_put(rb); /* can't be last */
++ return;
++ }
++
++ /*
++ * No other mmap()s, detach from all other events that might redirect
++ * into the now unreachable buffer. Somewhat complicated by the
++ * fact that rb::event_lock otherwise nests inside mmap_mutex.
++ */
++again:
++ rcu_read_lock();
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
++ if (!atomic_long_inc_not_zero(&event->refcount)) {
++ /*
++ * This event is en-route to free_event() which will
++ * detach it and remove it from the list.
++ */
++ continue;
++ }
++ rcu_read_unlock();
+
+- atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
+- vma->vm_mm->pinned_vm -= event->mmap_locked;
+- rcu_assign_pointer(event->rb, NULL);
+- ring_buffer_detach(event, rb);
++ mutex_lock(&event->mmap_mutex);
++ /*
++ * Check we didn't race with perf_event_set_output() which can
++ * swizzle the rb from under us while we were waiting to
++ * acquire mmap_mutex.
++ *
++ * If we find a different rb; ignore this event, a next
++ * iteration will no longer find it on the list. We have to
++ * still restart the iteration to make sure we're not now
++ * iterating the wrong list.
++ */
++ if (event->rb == rb) {
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ ring_buffer_put(rb); /* can't be last, we still have one */
++ }
+ mutex_unlock(&event->mmap_mutex);
++ put_event(event);
+
+- ring_buffer_put(rb);
+- free_uid(user);
++ /*
++ * Restart the iteration; either we're on the wrong list or
++ * destroyed its integrity by doing a deletion.
++ */
++ goto again;
+ }
++ rcu_read_unlock();
++
++ /*
++ * It could be there's still a few 0-ref events on the list; they'll
++ * get cleaned up by free_event() -- they'll also still have their
++ * ref on the rb and will free it whenever they are done with it.
++ *
++ * Aside from that, this buffer is 'fully' detached and unmapped,
++ * undo the VM accounting.
++ */
++
++ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
++ vma->vm_mm->pinned_vm -= mmap_locked;
++ free_uid(mmap_user);
++
++ ring_buffer_put(rb); /* could be last */
+ }
+
+ static const struct vm_operations_struct perf_mmap_vmops = {
+@@ -3691,12 +3767,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ return -EINVAL;
+
+ WARN_ON_ONCE(event->ctx->parent_ctx);
++again:
+ mutex_lock(&event->mmap_mutex);
+ if (event->rb) {
+- if (event->rb->nr_pages == nr_pages)
+- atomic_inc(&event->rb->refcount);
+- else
++ if (event->rb->nr_pages != nr_pages) {
+ ret = -EINVAL;
++ goto unlock;
++ }
++
++ if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
++ /*
++ * Raced against perf_mmap_close() through
++ * perf_event_set_output(). Try again, hope for better
++ * luck.
++ */
++ mutex_unlock(&event->mmap_mutex);
++ goto again;
++ }
++
+ goto unlock;
+ }
+
+@@ -3737,19 +3825,27 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ ret = -ENOMEM;
+ goto unlock;
+ }
+- rcu_assign_pointer(event->rb, rb);
++
++ atomic_set(&rb->mmap_count, 1);
++ rb->mmap_locked = extra;
++ rb->mmap_user = get_current_user();
+
+ atomic_long_add(user_extra, &user->locked_vm);
+- event->mmap_locked = extra;
+- event->mmap_user = get_current_user();
+- vma->vm_mm->pinned_vm += event->mmap_locked;
++ vma->vm_mm->pinned_vm += extra;
++
++ ring_buffer_attach(event, rb);
++ rcu_assign_pointer(event->rb, rb);
+
+ unlock:
+ if (!ret)
+ atomic_inc(&event->mmap_count);
+ mutex_unlock(&event->mmap_mutex);
+
+- vma->vm_flags |= VM_RESERVED;
++ /*
++ * Since pinned accounting is per vm we cannot allow fork() to copy our
++ * vma.
++ */
++ vma->vm_flags |= VM_DONTCOPY | VM_RESERVED;
+ vma->vm_ops = &perf_mmap_vmops;
+
+ return ret;
+@@ -6114,6 +6210,8 @@ set:
+ if (atomic_read(&event->mmap_count))
+ goto unlock;
+
++ old_rb = event->rb;
++
+ if (output_event) {
+ /* get the rb we want to redirect to */
+ rb = ring_buffer_get(output_event);
+@@ -6121,16 +6219,28 @@ set:
+ goto unlock;
+ }
+
+- old_rb = event->rb;
+- rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
++
++ if (rb)
++ ring_buffer_attach(event, rb);
++
++ rcu_assign_pointer(event->rb, rb);
++
++ if (old_rb) {
++ ring_buffer_put(old_rb);
++ /*
++ * Since we detached before setting the new rb, so that we
++ * could attach the new rb, we could have missed a wakeup.
++ * Provide it now.
++ */
++ wake_up_all(&event->waitq);
++ }
++
+ ret = 0;
+ unlock:
+ mutex_unlock(&event->mmap_mutex);
+
+- if (old_rb)
+- ring_buffer_put(old_rb);
+ out:
+ return ret;
+ }
+@@ -6797,7 +6907,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
+ * child.
+ */
+
+- child_ctx = alloc_perf_context(event->pmu, child);
++ child_ctx = alloc_perf_context(parent_ctx->pmu, child);
+ if (!child_ctx)
+ return -ENOMEM;
+
+diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
+index b7971d6..98ac24e 100644
+--- a/kernel/events/hw_breakpoint.c
++++ b/kernel/events/hw_breakpoint.c
+@@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
+ return;
+ }
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ unsigned int nr;
+
+ nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
+@@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
+ if (cpu >= 0) {
+ toggle_bp_task_slot(bp, cpu, enable, type, weight);
+ } else {
+- for_each_online_cpu(cpu)
++ for_each_possible_cpu(cpu)
+ toggle_bp_task_slot(bp, cpu, enable, type, weight);
+ }
+
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 64568a6..a2101bb 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -26,6 +26,10 @@ struct ring_buffer {
+ spinlock_t event_lock;
+ struct list_head event_list;
+
++ atomic_t mmap_count;
++ unsigned long mmap_locked;
++ struct user_struct *mmap_user;
++
+ struct perf_event_mmap_page *user_page;
+ void *data_pages[0];
+ };
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 77bccfc..1d0538e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -60,6 +60,7 @@
+ #include <linux/pid.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ptrace.h>
++#include <linux/hugetlb.h>
+
+ #include <asm/futex.h>
+
+@@ -363,7 +364,7 @@ again:
+ } else {
+ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ key->shared.inode = page_head->mapping->host;
+- key->shared.pgoff = page_head->index;
++ key->shared.pgoff = basepage_index(page);
+ }
+
+ get_futex_key_refs(key);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 382a6bd..52bdd58 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -539,9 +539,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
+ return 0;
+
+ if (irq_settings_can_request(desc)) {
+- if (desc->action)
+- if (irqflags & desc->action->flags & IRQF_SHARED)
+- canrequest =1;
++ if (!desc->action ||
++ irqflags & desc->action->flags & IRQF_SHARED)
++ canrequest = 1;
+ }
+ irq_put_desc_unlock(desc, flags);
+ return canrequest;
+diff --git a/kernel/printk.c b/kernel/printk.c
+index c0d12ea..16688ec 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -813,9 +813,9 @@ static int console_trylock_for_printk(unsigned int cpu)
+ }
+ }
+ printk_cpu = UINT_MAX;
++ raw_spin_unlock(&logbuf_lock);
+ if (wake)
+ up(&console_sem);
+- raw_spin_unlock(&logbuf_lock);
+ return retval;
+ }
+ static const char recursion_bug_msg [] =
+diff --git a/kernel/timer.c b/kernel/timer.c
+index f2f71d7..f8b05a4 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -145,9 +145,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu,
+ /* now that we have rounded, subtract the extra skew again */
+ j -= cpu * 3;
+
+- if (j <= jiffies) /* rounding ate our timeout entirely; */
+- return original;
+- return j;
++ /*
++ * Make sure j is still in the future. Otherwise return the
++ * unmodified value.
++ */
++ return time_is_after_jiffies(j) ? j : original;
+ }
+
+ /**
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index cb65454..7c75bbb 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -303,6 +303,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int size;
+ int syscall_nr;
+
+@@ -318,8 +320,11 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+
+ size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ event = trace_current_buffer_lock_reserve(&buffer,
+- sys_data->enter_event->event.type, size, 0, 0);
++ sys_data->enter_event->event.type, size, irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -329,7 +334,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+
+ if (!filter_current_check_discard(buffer, sys_data->enter_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+@@ -338,6 +344,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int syscall_nr;
+
+ syscall_nr = syscall_get_nr(current, regs);
+@@ -350,8 +358,12 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ if (!sys_data)
+ return;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ event = trace_current_buffer_lock_reserve(&buffer,
+- sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
++ sys_data->exit_event->event.type, sizeof(*entry),
++ irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -361,7 +373,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+
+ if (!filter_current_check_discard(buffer, sys_data->exit_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ int reg_event_syscall_enter(struct ftrace_event_call *call)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 2dcd716..ddf2128 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -679,6 +679,23 @@ int PageHuge(struct page *page)
+ }
+ EXPORT_SYMBOL_GPL(PageHuge);
+
++pgoff_t __basepage_index(struct page *page)
++{
++ struct page *page_head = compound_head(page);
++ pgoff_t index = page_index(page_head);
++ unsigned long compound_idx;
++
++ if (!PageHuge(page_head))
++ return page_index(page);
++
++ if (compound_order(page_head) >= MAX_ORDER)
++ compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
++ else
++ compound_idx = page - page_head;
++
++ return (index << compound_order(page_head)) + compound_idx;
++}
++
+ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
+ {
+ struct page *page;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a0b6c50..dd7c019 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1737,6 +1737,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
+ conn, code, ident, dlen);
+
++ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
++ return NULL;
++
+ len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
+ count = min_t(unsigned int, conn->mtu, len);
+
+@@ -2865,7 +2868,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+ u16 type, result;
+
+- if (cmd_len != sizeof(*rsp))
++ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ type = __le16_to_cpu(rsp->type);
+diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
+index 214c2bb..9f78c5f 100644
+--- a/net/ceph/auth_none.c
++++ b/net/ceph/auth_none.c
+@@ -39,6 +39,11 @@ static int should_authenticate(struct ceph_auth_client *ac)
+ return xi->starting;
+ }
+
++static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
++{
++ return 0;
++}
++
+ /*
+ * the generic auth code decode the global_id, and we carry no actual
+ * authenticate state, so nothing happens here.
+@@ -107,6 +112,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
+ .destroy = destroy,
+ .is_authenticated = is_authenticated,
+ .should_authenticate = should_authenticate,
++ .build_request = build_request,
+ .handle_reply = handle_reply,
+ .create_authorizer = ceph_auth_none_create_authorizer,
+ .destroy_authorizer = ceph_auth_none_destroy_authorizer,
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 3c8bc6e..d148a2b 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -902,7 +902,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ per_cvt->assigned = 1;
+ hinfo->nid = per_cvt->cvt_nid;
+
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
++ snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mux_idx);
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index e97df24..8b687da 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2117,7 +2117,6 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+- u16 *reg_cache = codec->reg_cache;
+ int ret;
+
+ /* Apply the update (if any) */
+@@ -2126,16 +2125,19 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
+ return 0;
+
+ /* If the left PGA is enabled hit that VU bit... */
+- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
+- return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
+- reg_cache[WM8962_HPOUTL_VOLUME]);
++ ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
++ if (ret & WM8962_HPOUTL_PGA_ENA) {
++ snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
++ snd_soc_read(codec, WM8962_HPOUTL_VOLUME));
++ return 1;
++ }
+
+ /* ...otherwise the right. The VU is stereo. */
+- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
+- return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
+- reg_cache[WM8962_HPOUTR_VOLUME]);
++ if (ret & WM8962_HPOUTR_PGA_ENA)
++ snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
++ snd_soc_read(codec, WM8962_HPOUTR_VOLUME));
+
+- return 0;
++ return 1;
+ }
+
+ /* The VU bits for the speakers are in a different register to the mute
+@@ -3944,7 +3946,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
+ int ret;
+ struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
+ struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
+- u16 *reg_cache = codec->reg_cache;
+ int i, trigger, irq_pol;
+ bool dmicclk, dmicdat;
+
+@@ -4055,8 +4056,9 @@ static int wm8962_probe(struct snd_soc_codec *codec)
+
+ /* Put the speakers into mono mode? */
+ if (pdata->spk_mono)
+- reg_cache[WM8962_CLASS_D_CONTROL_2]
+- |= WM8962_SPK_MONO;
++ snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
++ WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
++
+
+ /* Micbias setup, detection enable and detection
+ * threasholds. */