summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2014-11-06 14:00:21 -0500
committerMike Pagano <mpagano@gentoo.org>2014-11-06 14:00:21 -0500
commit45c0048529803f3c7b288d0e790eb13a8a5b20fa (patch)
tree724b21ac6b97059c9e7e4b04250bb9f0282b27d0
parentLinux patch 3.12.31 (diff)
downloadlinux-patches-45c0048529803f3c7b288d0e790eb13a8a5b20fa.tar.gz
linux-patches-45c0048529803f3c7b288d0e790eb13a8a5b20fa.tar.bz2
linux-patches-45c0048529803f3c7b288d0e790eb13a8a5b20fa.zip
Linux patch 3.12.323.12-34
-rw-r--r--1031_linux-3.12.32.patch7890
1 files changed, 7890 insertions, 0 deletions
diff --git a/1031_linux-3.12.32.patch b/1031_linux-3.12.32.patch
new file mode 100644
index 00000000..9c5801d9
--- /dev/null
+++ b/1031_linux-3.12.32.patch
@@ -0,0 +1,7890 @@
+diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
+new file mode 100644
+index 000000000000..ea45dd3901e3
+--- /dev/null
++++ b/Documentation/lzo.txt
+@@ -0,0 +1,164 @@
++
++LZO stream format as understood by Linux's LZO decompressor
++===========================================================
++
++Introduction
++
++ This is not a specification. No specification seems to be publicly available
++ for the LZO stream format. This document describes what input format the LZO
++ decompressor as implemented in the Linux kernel understands. The file subject
++ of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on
++ the compressor nor on any other implementations though it seems likely that
++ the format matches the standard one. The purpose of this document is to
++ better understand what the code does in order to propose more efficient fixes
++ for future bug reports.
++
++Description
++
++ The stream is composed of a series of instructions, operands, and data. The
++ instructions consist in a few bits representing an opcode, and bits forming
++ the operands for the instruction, whose size and position depend on the
++ opcode and on the number of literals copied by previous instruction. The
++ operands are used to indicate :
++
++ - a distance when copying data from the dictionary (past output buffer)
++ - a length (number of bytes to copy from dictionary)
++ - the number of literals to copy, which is retained in variable "state"
++ as a piece of information for next instructions.
++
++ Optionally depending on the opcode and operands, extra data may follow. These
++ extra data can be a complement for the operand (eg: a length or a distance
++ encoded on larger values), or a literal to be copied to the output buffer.
++
++ The first byte of the block follows a different encoding from other bytes, it
++ seems to be optimized for literal use only, since there is no dictionary yet
++ prior to that byte.
++
++ Lengths are always encoded on a variable size starting with a small number
++ of bits in the operand. If the number of bits isn't enough to represent the
++ length, up to 255 may be added in increments by consuming more bytes with a
++ rate of at most 255 per extra byte (thus the compression ratio cannot exceed
++ around 255:1). The variable length encoding using #bits is always the same :
++
++ length = byte & ((1 << #bits) - 1)
++ if (!length) {
++ length = ((1 << #bits) - 1)
++ length += 255*(number of zero bytes)
++ length += first-non-zero-byte
++ }
++ length += constant (generally 2 or 3)
++
++ For references to the dictionary, distances are relative to the output
++ pointer. Distances are encoded using very few bits belonging to certain
++ ranges, resulting in multiple copy instructions using different encodings.
++ Certain encodings involve one extra byte, others involve two extra bytes
++ forming a little-endian 16-bit quantity (marked LE16 below).
++
++ After any instruction except the large literal copy, 0, 1, 2 or 3 literals
++ are copied before starting the next instruction. The number of literals that
++ were copied may change the meaning and behaviour of the next instruction. In
++ practice, only one instruction needs to know whether 0, less than 4, or more
++ literals were copied. This is the information stored in the <state> variable
++ in this implementation. This number of immediate literals to be copied is
++ generally encoded in the last two bits of the instruction but may also be
++ taken from the last two bits of an extra operand (eg: distance).
++
++ End of stream is declared when a block copy of distance 0 is seen. Only one
++ instruction may encode this distance (0001HLLL), it takes one LE16 operand
++ for the distance, thus requiring 3 bytes.
++
++ IMPORTANT NOTE : in the code some length checks are missing because certain
++ instructions are called under the assumption that a certain number of bytes
++ follow because it has already been garanteed before parsing the instructions.
++ They just have to "refill" this credit if they consume extra bytes. This is
++ an implementation design choice independant on the algorithm or encoding.
++
++Byte sequences
++
++ First byte encoding :
++
++ 0..17 : follow regular instruction encoding, see below. It is worth
++ noting that codes 16 and 17 will represent a block copy from
++ the dictionary which is empty, and that they will always be
++ invalid at this place.
++
++ 18..21 : copy 0..3 literals
++ state = (byte - 17) = 0..3 [ copy <state> literals ]
++ skip byte
++
++ 22..255 : copy literal string
++ length = (byte - 17) = 4..238
++ state = 4 [ don't copy extra literals ]
++ skip byte
++
++ Instruction encoding :
++
++ 0 0 0 0 X X X X (0..15)
++ Depends on the number of literals copied by the last instruction.
++ If last instruction did not copy any literal (state == 0), this
++ encoding will be a copy of 4 or more literal, and must be interpreted
++ like this :
++
++ 0 0 0 0 L L L L (0..15) : copy long literal string
++ length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte)
++ state = 4 (no extra literals are copied)
++
++ If last instruction used to copy between 1 to 3 literals (encoded in
++ the instruction's opcode or distance), the instruction is a copy of a
++ 2-byte block from the dictionary within a 1kB distance. It is worth
++ noting that this instruction provides little savings since it uses 2
++ bytes to encode a copy of 2 other bytes but it encodes the number of
++ following literals for free. It must be interpreted like this :
++
++ 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance
++ length = 2
++ state = S (copy S literals after this block)
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 2) + D + 1
++
++ If last instruction used to copy 4 or more literals (as detected by
++ state == 4), the instruction becomes a copy of a 3-byte block from the
++ dictionary from a 2..3kB distance, and must be interpreted like this :
++
++ 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance
++ length = 3
++ state = S (copy S literals after this block)
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 2) + D + 2049
++
++ 0 0 0 1 H L L L (16..31)
++ Copy of a block within 16..48kB distance (preferably less than 10B)
++ length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte)
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
++ distance = 16384 + (H << 14) + D
++ state = S (copy S literals after this block)
++ End of stream is reached if distance == 16384
++
++ 0 0 1 L L L L L (32..63)
++ Copy of small block within 16kB distance (preferably less than 34B)
++ length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
++ distance = D + 1
++ state = S (copy S literals after this block)
++
++ 0 1 L D D D S S (64..127)
++ Copy 3-4 bytes from block within 2kB distance
++ state = S (copy S literals after this block)
++ length = 3 + L
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 3) + D + 1
++
++ 1 L L D D D S S (128..255)
++ Copy 5-8 bytes from block within 2kB distance
++ state = S (copy S literals after this block)
++ length = 5 + L
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 3) + D + 1
++
++Authors
++
++ This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
++ analysis of the decompression code available in Linux 3.16-rc5. The code is
++ tricky, it is possible that this document contains mistakes or that a few
++ corner cases were overlooked. In any case, please report any doubt, fix, or
++ proposed updates to the author(s) so that the document can be updated.
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
+index 290894176142..53838d9c6295 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -425,6 +425,20 @@ fault through the slow path.
+ Since only 19 bits are used to store generation-number on mmio spte, all
+ pages are zapped when there is an overflow.
+
++Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
++times, the last one happening when the generation number is retrieved and
++stored into the MMIO spte. Thus, the MMIO spte might be created based on
++out-of-date information, but with an up-to-date generation number.
++
++To avoid this, the generation number is incremented again after synchronize_srcu
++returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
++memslot update, while some SRCU readers might be using the old copy. We do not
++want to use an MMIO sptes created with an odd generation number, and we can do
++this without losing a bit in the MMIO spte. The low bit of the generation
++is not stored in MMIO spte, and presumed zero when it is extracted out of the
++spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1,
++the next access to the spte will always be a cache miss.
++
+
+ Further reading
+ ===============
+diff --git a/Makefile b/Makefile
+index 10eda74e4b54..a51d98fee407 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
+index d5bd65f74602..55bb7f39ffe4 100644
+--- a/arch/arm/boot/dts/at91sam9263.dtsi
++++ b/arch/arm/boot/dts/at91sam9263.dtsi
+@@ -506,6 +506,7 @@
+ compatible = "atmel,hsmci";
+ reg = <0xfff80000 0x600>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>;
++ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+@@ -515,6 +516,7 @@
+ compatible = "atmel,hsmci";
+ reg = <0xfff84000 0x600>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>;
++ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
+index 6b2630a92f71..0778e54f1573 100644
+--- a/arch/arm/mach-at91/clock.c
++++ b/arch/arm/mach-at91/clock.c
+@@ -963,6 +963,7 @@ static int __init at91_clock_reset(void)
+ }
+
+ at91_pmc_write(AT91_PMC_SCDR, scdr);
++ at91_pmc_write(AT91_PMC_PCDR, pcdr);
+ if (cpu_is_sama5d3())
+ at91_pmc_write(AT91_PMC_PCDR1, pcdr1);
+
+diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
+index 899af807ef0f..c30a548cee56 100644
+--- a/arch/arm64/include/asm/compat.h
++++ b/arch/arm64/include/asm/compat.h
+@@ -33,8 +33,8 @@ typedef s32 compat_ssize_t;
+ typedef s32 compat_time_t;
+ typedef s32 compat_clock_t;
+ typedef s32 compat_pid_t;
+-typedef u32 __compat_uid_t;
+-typedef u32 __compat_gid_t;
++typedef u16 __compat_uid_t;
++typedef u16 __compat_gid_t;
+ typedef u16 __compat_uid16_t;
+ typedef u16 __compat_gid16_t;
+ typedef u32 __compat_uid32_t;
+diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
+index 2c7dde3c6430..2a5259fd23eb 100644
+--- a/arch/m68k/mm/hwtest.c
++++ b/arch/m68k/mm/hwtest.c
+@@ -28,9 +28,11 @@
+ int hwreg_present( volatile void *regp )
+ {
+ int ret = 0;
++ unsigned long flags;
+ long save_sp, save_vbr;
+ long tmp_vectors[3];
+
++ local_irq_save(flags);
+ __asm__ __volatile__
+ ( "movec %/vbr,%2\n\t"
+ "movel #Lberr1,%4@(8)\n\t"
+@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ : "a" (regp), "a" (tmp_vectors)
+ );
++ local_irq_restore(flags);
+
+ return( ret );
+ }
+@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
+ int hwreg_write( volatile void *regp, unsigned short val )
+ {
+ int ret;
++ unsigned long flags;
+ long save_sp, save_vbr;
+ long tmp_vectors[3];
+
++ local_irq_save(flags);
+ __asm__ __volatile__
+ ( "movec %/vbr,%2\n\t"
+ "movel #Lberr2,%4@(8)\n\t"
+@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ : "a" (regp), "a" (tmp_vectors), "g" (val)
+ );
++ local_irq_restore(flags);
+
+ return( ret );
+ }
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index 5f54a744dcc5..826be86c4248 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -28,8 +28,6 @@
+ #include <asm/synch.h>
+ #include <asm/ppc-opcode.h>
+
+-#define arch_spin_is_locked(x) ((x)->slock != 0)
+-
+ #ifdef CONFIG_PPC64
+ /* use 0x800000yy when locked, where yy == CPU number */
+ #ifdef __BIG_ENDIAN__
+@@ -54,6 +52,12 @@
+ #define SYNC_IO
+ #endif
+
++static inline int arch_spin_is_locked(arch_spinlock_t *lock)
++{
++ smp_mb();
++ return lock->slock != 0;
++}
++
+ /*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
+index 0c9c8d7d0734..170a0346f756 100644
+--- a/arch/powerpc/lib/locks.c
++++ b/arch/powerpc/lib/locks.c
+@@ -70,12 +70,16 @@ void __rw_yield(arch_rwlock_t *rw)
+
+ void arch_spin_unlock_wait(arch_spinlock_t *lock)
+ {
++ smp_mb();
++
+ while (lock->slock) {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ }
+ HMT_medium();
++
++ smp_mb();
+ }
+
+ EXPORT_SYMBOL(arch_spin_unlock_wait);
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 7f1f7ac5cf7f..2df491b03687 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -71,6 +71,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
+ return 1;
++ return 0;
+ case KVM_S390_INT_EMERGENCY:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index d60f34dbae89..11068ae1cc09 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -65,6 +65,7 @@ config SPARC64
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_DEBUG_KMEMLEAK
++ select SPARSE_IRQ
+ select RTC_DRV_CMOS
+ select RTC_DRV_BQ4802
+ select RTC_DRV_SUN4V
+diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
+index ca121f0fa3ec..17be9d618335 100644
+--- a/arch/sparc/include/asm/hypervisor.h
++++ b/arch/sparc/include/asm/hypervisor.h
+@@ -2944,6 +2944,16 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
+ #endif
+
++#define HV_FAST_T5_GET_PERFREG 0x1a8
++#define HV_FAST_T5_SET_PERFREG 0x1a9
++
++#ifndef __ASSEMBLY__
++unsigned long sun4v_t5_get_perfreg(unsigned long reg_num,
++ unsigned long *reg_val);
++unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
++ unsigned long reg_val);
++#endif
++
+ /* Function numbers for HV_CORE_TRAP. */
+ #define HV_CORE_SET_VER 0x00
+ #define HV_CORE_PUTCHAR 0x01
+@@ -2975,6 +2985,7 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ #define HV_GRP_VF_CPU 0x0205
+ #define HV_GRP_KT_CPU 0x0209
+ #define HV_GRP_VT_CPU 0x020c
++#define HV_GRP_T5_CPU 0x0211
+ #define HV_GRP_DIAG 0x0300
+
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
+index abf6afe82ca8..78c9f2d50991 100644
+--- a/arch/sparc/include/asm/irq_64.h
++++ b/arch/sparc/include/asm/irq_64.h
+@@ -37,7 +37,7 @@
+ *
+ * ino_bucket->irq allocation is made during {sun4v_,}build_irq().
+ */
+-#define NR_IRQS 255
++#define NR_IRQS (2048)
+
+ extern void irq_install_pre_handler(int irq,
+ void (*func)(unsigned int, void *, void *),
+@@ -57,11 +57,8 @@ extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
+ unsigned long iclr_base);
+ extern void sun4u_destroy_msi(unsigned int irq);
+
+-extern unsigned char irq_alloc(unsigned int dev_handle,
+- unsigned int dev_ino);
+-#ifdef CONFIG_PCI_MSI
++extern unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
+ extern void irq_free(unsigned int irq);
+-#endif
+
+ extern void __init init_IRQ(void);
+ extern void fixup_irqs(void);
+diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
+index bdb524a7b814..8732ed391aff 100644
+--- a/arch/sparc/include/asm/ldc.h
++++ b/arch/sparc/include/asm/ldc.h
+@@ -53,13 +53,14 @@ struct ldc_channel;
+ /* Allocate state for a channel. */
+ extern struct ldc_channel *ldc_alloc(unsigned long id,
+ const struct ldc_channel_config *cfgp,
+- void *event_arg);
++ void *event_arg,
++ const char *name);
+
+ /* Shut down and free state for a channel. */
+ extern void ldc_free(struct ldc_channel *lp);
+
+ /* Register TX and RX queues of the link with the hypervisor. */
+-extern int ldc_bind(struct ldc_channel *lp, const char *name);
++extern int ldc_bind(struct ldc_channel *lp);
+
+ /* For non-RAW protocols we need to complete a handshake before
+ * communication can proceed. ldc_connect() does that, if the
+diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
+index 76092c4dd277..f668797ae234 100644
+--- a/arch/sparc/include/asm/mmu_64.h
++++ b/arch/sparc/include/asm/mmu_64.h
+@@ -93,7 +93,6 @@ typedef struct {
+ spinlock_t lock;
+ unsigned long sparc64_ctx_val;
+ unsigned long huge_pte_count;
+- struct page *pgtable_page;
+ struct tsb_config tsb_block[MM_NUM_TSBS];
+ struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
+ } mm_context_t;
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index a12dbe3b7762..e48fdf4e16ff 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
+ /* You must call prom_init() before using any of the library services,
+ * preferably as early as possible. Pass it the romvec pointer.
+ */
+-extern void prom_init(void *cif_handler, void *cif_stack);
++extern void prom_init(void *cif_handler);
++extern void prom_init_report(void);
+
+ /* Boot argument acquisition, returns the boot command line string. */
+ extern char *prom_getbootargs(void);
+diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
+index e15538899f3d..b18e602fcac4 100644
+--- a/arch/sparc/include/asm/page_64.h
++++ b/arch/sparc/include/asm/page_64.h
+@@ -15,7 +15,10 @@
+ #define DCACHE_ALIASING_POSSIBLE
+ #endif
+
+-#define HPAGE_SHIFT 22
++#define HPAGE_SHIFT 23
++#define REAL_HPAGE_SHIFT 22
++
++#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
+
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+@@ -53,19 +56,22 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
+ /* These are used to make use of C type-checking.. */
+ typedef struct { unsigned long pte; } pte_t;
+ typedef struct { unsigned long iopte; } iopte_t;
+-typedef struct { unsigned int pmd; } pmd_t;
+-typedef struct { unsigned int pgd; } pgd_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+
+ #define pte_val(x) ((x).pte)
+ #define iopte_val(x) ((x).iopte)
+ #define pmd_val(x) ((x).pmd)
++#define pud_val(x) ((x).pud)
+ #define pgd_val(x) ((x).pgd)
+ #define pgprot_val(x) ((x).pgprot)
+
+ #define __pte(x) ((pte_t) { (x) } )
+ #define __iopte(x) ((iopte_t) { (x) } )
+ #define __pmd(x) ((pmd_t) { (x) } )
++#define __pud(x) ((pud_t) { (x) } )
+ #define __pgd(x) ((pgd_t) { (x) } )
+ #define __pgprot(x) ((pgprot_t) { (x) } )
+
+@@ -73,19 +79,22 @@ typedef struct { unsigned long pgprot; } pgprot_t;
+ /* .. while these make it easier on the compiler */
+ typedef unsigned long pte_t;
+ typedef unsigned long iopte_t;
+-typedef unsigned int pmd_t;
+-typedef unsigned int pgd_t;
++typedef unsigned long pmd_t;
++typedef unsigned long pud_t;
++typedef unsigned long pgd_t;
+ typedef unsigned long pgprot_t;
+
+ #define pte_val(x) (x)
+ #define iopte_val(x) (x)
+ #define pmd_val(x) (x)
++#define pud_val(x) (x)
+ #define pgd_val(x) (x)
+ #define pgprot_val(x) (x)
+
+ #define __pte(x) (x)
+ #define __iopte(x) (x)
+ #define __pmd(x) (x)
++#define __pud(x) (x)
+ #define __pgd(x) (x)
+ #define __pgprot(x) (x)
+
+@@ -93,18 +102,33 @@ typedef unsigned long pgprot_t;
+
+ typedef pte_t *pgtable_t;
+
++extern unsigned long sparc64_va_hole_top;
++extern unsigned long sparc64_va_hole_bottom;
++
++/* The next two defines specify the actual exclusion region we
++ * enforce, wherein we use a 4GB red zone on each side of the VA hole.
++ */
++#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL))
++#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL))
++
+ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
+- (_AC(0x0000000070000000,UL)) : \
+- (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
++ _AC(0x0000000070000000,UL) : \
++ VA_EXCLUDE_END)
+
+ #include <asm-generic/memory_model.h>
+
++extern unsigned long PAGE_OFFSET;
++
+ #endif /* !(__ASSEMBLY__) */
+
+-/* We used to stick this into a hard-coded global register (%g4)
+- * but that does not make sense anymore.
++/* The maximum number of physical memory address bits we support. The
++ * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS"
++ * evaluates to.
+ */
+-#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
++#define MAX_PHYS_ADDRESS_BITS 53
++
++#define ILOG2_4MB 22
++#define ILOG2_256MB 28
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index bcfe063bce23..2c8d41fb13a4 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -15,6 +15,13 @@
+
+ extern struct kmem_cache *pgtable_cache;
+
++static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
++{
++ pgd_set(pgd, pud);
++}
++
++#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
++
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
+@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ kmem_cache_free(pgtable_cache, pgd);
+ }
+
+-#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
++static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
++{
++ pud_set(pud, pmd);
++}
++
++#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ return kmem_cache_alloc(pgtable_cache,
++ GFP_KERNEL|__GFP_REPEAT);
++}
++
++static inline void pud_free(struct mm_struct *mm, pud_t *pud)
++{
++ kmem_cache_free(pgtable_cache, pud);
++}
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
+ #define __pmd_free_tlb(tlb, pmd, addr) \
+ pgtable_free_tlb(tlb, pmd, false)
+
++#define __pud_free_tlb(tlb, pud, addr) \
++ pgtable_free_tlb(tlb, pud, false)
++
+ #endif /* _SPARC64_PGALLOC_H */
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 32aa0b8c49e2..e8dfabf156c7 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -20,8 +20,6 @@
+ #include <asm/page.h>
+ #include <asm/processor.h>
+
+-#include <asm-generic/pgtable-nopud.h>
+-
+ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
+ * The page copy blockops can use 0x6000000 to 0x8000000.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+@@ -42,26 +40,35 @@
+ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
+ #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
+ #define VMALLOC_START _AC(0x0000000100000000,UL)
+-#define VMALLOC_END _AC(0x0000010000000000,UL)
+-#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
+-
+-#define vmemmap ((struct page *)VMEMMAP_BASE)
++#define VMEMMAP_BASE VMALLOC_END
+
+ /* PMD_SHIFT determines the size of the area a second-level page
+ * table can map
+ */
+-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4))
++#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
+ #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
+ #define PMD_MASK (~(PMD_SIZE-1))
+-#define PMD_BITS (PAGE_SHIFT - 2)
++#define PMD_BITS (PAGE_SHIFT - 3)
+
+-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS)
++/* PUD_SHIFT determines the size of the area a third-level page
++ * table can map
++ */
++#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++#define PUD_BITS (PAGE_SHIFT - 3)
++
++/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
++#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
+ #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK (~(PGDIR_SIZE-1))
+-#define PGDIR_BITS (PAGE_SHIFT - 2)
++#define PGDIR_BITS (PAGE_SHIFT - 3)
+
+-#if (PGDIR_SHIFT + PGDIR_BITS) != 44
++#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
++#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
++#endif
++
++#if (PGDIR_SHIFT + PGDIR_BITS) != 53
+ #error Page table parameters do not cover virtual address space properly.
+ #endif
+
+@@ -69,36 +76,20 @@
+ #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
+ #endif
+
+-/* PMDs point to PTE tables which are 4K aligned. */
+-#define PMD_PADDR _AC(0xfffffffe,UL)
+-#define PMD_PADDR_SHIFT _AC(11,UL)
+-
+-#define PMD_ISHUGE _AC(0x00000001,UL)
++#ifndef __ASSEMBLY__
+
+-/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge
+- * pages, this frees up a bunch of bits in the layout that we can
+- * use for the protection settings and software metadata.
+- */
+-#define PMD_HUGE_PADDR _AC(0xfffff800,UL)
+-#define PMD_HUGE_PROTBITS _AC(0x000007ff,UL)
+-#define PMD_HUGE_PRESENT _AC(0x00000400,UL)
+-#define PMD_HUGE_WRITE _AC(0x00000200,UL)
+-#define PMD_HUGE_DIRTY _AC(0x00000100,UL)
+-#define PMD_HUGE_ACCESSED _AC(0x00000080,UL)
+-#define PMD_HUGE_EXEC _AC(0x00000040,UL)
+-#define PMD_HUGE_SPLITTING _AC(0x00000020,UL)
+-
+-/* PGDs point to PMD tables which are 8K aligned. */
+-#define PGD_PADDR _AC(0xfffffffc,UL)
+-#define PGD_PADDR_SHIFT _AC(11,UL)
++extern unsigned long VMALLOC_END;
+
+-#ifndef __ASSEMBLY__
++#define vmemmap ((struct page *)VMEMMAP_BASE)
+
+ #include <linux/sched.h>
+
++bool kern_addr_valid(unsigned long addr);
++
+ /* Entries per page directory level. */
+-#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-4))
++#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
+ #define PTRS_PER_PMD (1UL << PMD_BITS)
++#define PTRS_PER_PUD (1UL << PUD_BITS)
+ #define PTRS_PER_PGD (1UL << PGDIR_BITS)
+
+ /* Kernel has a separate 44bit address space. */
+@@ -107,6 +98,9 @@
+ #define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
++#define pud_ERROR(e) \
++ pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
++ __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
+ #define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
+@@ -117,6 +111,8 @@
+ #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
+ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
+ #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
++#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
++#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
+
+ /* Advertise support for _PAGE_SPECIAL */
+ #define __HAVE_ARCH_PTE_SPECIAL
+@@ -130,6 +126,7 @@
+ #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
+ #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
+ #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
++#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
+ #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
+ #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
+ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
+@@ -160,6 +157,7 @@
+ #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
+ #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
+ #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
++#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
+ #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
+ #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
+ #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
+@@ -185,6 +183,10 @@
+ #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
+ #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
+
++#if REAL_HPAGE_SHIFT != 22
++#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
++#endif
++
+ #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
+ #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
+
+@@ -244,16 +246,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot);
+-#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+-
+-extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+-
+-static inline pmd_t pmd_mkhuge(pmd_t pmd)
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+- /* Do nothing, mk_pmd() does this part. */
+- return pmd;
++ pte_t pte = pfn_pte(page_nr, pgprot);
++
++ return __pmd(pte_val(pte));
+ }
++#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+ #endif
+
+ /* This one can be done with two shifts. */
+@@ -282,8 +281,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
+ {
+ unsigned long mask, tmp;
+
+- /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
+- * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
++ /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
++ * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
+ *
+ * Even if we use negation tricks the result is still a 6
+ * instruction sequence, so don't try to play fancy and just
+@@ -313,15 +312,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
+ " .previous\n"
+ : "=r" (mask), "=r" (tmp)
+ : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
+- _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
+- _PAGE_SPECIAL),
++ _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
++ _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
+ "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
+- _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
+- _PAGE_SPECIAL));
++ _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
++ _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
+
+ return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
+ }
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
++{
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_modify(pte, newprot);
++
++ return __pmd(pte_val(pte));
++}
++#endif
++
+ static inline pte_t pgoff_to_pte(unsigned long off)
+ {
+ off <<= PAGE_SHIFT;
+@@ -362,7 +372,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
+ */
+ #define pgprot_noncached pgprot_noncached
+
+-#ifdef CONFIG_HUGETLB_PAGE
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ static inline pte_t pte_mkhuge(pte_t pte)
+ {
+ unsigned long mask;
+@@ -380,6 +390,17 @@ static inline pte_t pte_mkhuge(pte_t pte)
+
+ return __pte(pte_val(pte) | mask);
+ }
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t pmd_mkhuge(pmd_t pmd)
++{
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_mkhuge(pte);
++ pte_val(pte) |= _PAGE_PMD_HUGE;
++
++ return __pmd(pte_val(pte));
++}
++#endif
+ #endif
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -631,95 +652,136 @@ static inline unsigned long pte_special(pte_t pte)
+ return pte_val(pte) & _PAGE_SPECIAL;
+ }
+
+-static inline int pmd_large(pmd_t pmd)
++static inline unsigned long pmd_large(pmd_t pmd)
+ {
+- return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
+- (PMD_ISHUGE | PMD_HUGE_PRESENT);
++ pte_t pte = __pte(pmd_val(pmd));
++
++ return pte_val(pte) & _PAGE_PMD_HUGE;
+ }
+
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static inline int pmd_young(pmd_t pmd)
++static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+- return pmd_val(pmd) & PMD_HUGE_ACCESSED;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ return pte_pfn(pte);
+ }
+
+-static inline int pmd_write(pmd_t pmd)
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline unsigned long pmd_young(pmd_t pmd)
+ {
+- return pmd_val(pmd) & PMD_HUGE_WRITE;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ return pte_young(pte);
+ }
+
+-static inline unsigned long pmd_pfn(pmd_t pmd)
++static inline unsigned long pmd_write(pmd_t pmd)
+ {
+- unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR;
++ pte_t pte = __pte(pmd_val(pmd));
+
+- return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
++ return pte_write(pte);
+ }
+
+-static inline int pmd_trans_splitting(pmd_t pmd)
++static inline unsigned long pmd_trans_huge(pmd_t pmd)
+ {
+- return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
+- (PMD_ISHUGE|PMD_HUGE_SPLITTING);
++ pte_t pte = __pte(pmd_val(pmd));
++
++ return pte_val(pte) & _PAGE_PMD_HUGE;
+ }
+
+-static inline int pmd_trans_huge(pmd_t pmd)
++static inline unsigned long pmd_trans_splitting(pmd_t pmd)
+ {
+- return pmd_val(pmd) & PMD_ISHUGE;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ return pmd_trans_huge(pmd) && pte_special(pte);
+ }
+
+ #define has_transparent_hugepage() 1
+
+ static inline pmd_t pmd_mkold(pmd_t pmd)
+ {
+- pmd_val(pmd) &= ~PMD_HUGE_ACCESSED;
+- return pmd;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_mkold(pte);
++
++ return __pmd(pte_val(pte));
+ }
+
+ static inline pmd_t pmd_wrprotect(pmd_t pmd)
+ {
+- pmd_val(pmd) &= ~PMD_HUGE_WRITE;
+- return pmd;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_wrprotect(pte);
++
++ return __pmd(pte_val(pte));
+ }
+
+ static inline pmd_t pmd_mkdirty(pmd_t pmd)
+ {
+- pmd_val(pmd) |= PMD_HUGE_DIRTY;
+- return pmd;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_mkdirty(pte);
++
++ return __pmd(pte_val(pte));
+ }
+
+ static inline pmd_t pmd_mkyoung(pmd_t pmd)
+ {
+- pmd_val(pmd) |= PMD_HUGE_ACCESSED;
+- return pmd;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_mkyoung(pte);
++
++ return __pmd(pte_val(pte));
+ }
+
+ static inline pmd_t pmd_mkwrite(pmd_t pmd)
+ {
+- pmd_val(pmd) |= PMD_HUGE_WRITE;
+- return pmd;
+-}
++ pte_t pte = __pte(pmd_val(pmd));
+
+-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+-{
+- pmd_val(pmd) &= ~PMD_HUGE_PRESENT;
+- return pmd;
++ pte = pte_mkwrite(pte);
++
++ return __pmd(pte_val(pte));
+ }
+
+ static inline pmd_t pmd_mksplitting(pmd_t pmd)
+ {
+- pmd_val(pmd) |= PMD_HUGE_SPLITTING;
+- return pmd;
++ pte_t pte = __pte(pmd_val(pmd));
++
++ pte = pte_mkspecial(pte);
++
++ return __pmd(pte_val(pte));
+ }
+
+-extern pgprot_t pmd_pgprot(pmd_t entry);
++static inline pgprot_t pmd_pgprot(pmd_t entry)
++{
++ unsigned long val = pmd_val(entry);
++
++ return __pgprot(val);
++}
+ #endif
+
+ static inline int pmd_present(pmd_t pmd)
+ {
+- return pmd_val(pmd) != 0U;
++ return pmd_val(pmd) != 0UL;
+ }
+
+ #define pmd_none(pmd) (!pmd_val(pmd))
+
++/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
++ * very simple, it's just the physical address. PTE tables are of
++ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
++ * the top bits outside of the range of any physical address size we
++ * support are clear as well. We also validate the physical itself.
++ */
++#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
++
++#define pud_none(pud) (!pud_val(pud))
++
++#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
++
++#define pgd_none(pgd) (!pgd_val(pgd))
++
++#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+@@ -733,37 +795,54 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+
+ static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+ {
+- unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT;
++ unsigned long val = __pa((unsigned long) (ptep));
+
+ pmd_val(*pmdp) = val;
+ }
+
+ #define pud_set(pudp, pmdp) \
+- (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT))
++ (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
+ static inline unsigned long __pmd_page(pmd_t pmd)
+ {
+- unsigned long paddr = (unsigned long) pmd_val(pmd);
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+- if (pmd_val(pmd) & PMD_ISHUGE)
+- paddr &= PMD_HUGE_PADDR;
+-#endif
+- paddr <<= PMD_PADDR_SHIFT;
+- return ((unsigned long) __va(paddr));
++ pte_t pte = __pte(pmd_val(pmd));
++ unsigned long pfn;
++
++ pfn = pte_pfn(pte);
++
++ return ((unsigned long) __va(pfn << PAGE_SHIFT));
+ }
+ #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
+ #define pud_page_vaddr(pud) \
+- ((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT)))
++ ((unsigned long) __va(pud_val(pud)))
+ #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
+-#define pmd_bad(pmd) (0)
+-#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
+-#define pud_none(pud) (!pud_val(pud))
+-#define pud_bad(pud) (0)
++#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
+ #define pud_present(pud) (pud_val(pud) != 0U)
+-#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
++#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
++#define pgd_page_vaddr(pgd) \
++ ((unsigned long) __va(pgd_val(pgd)))
++#define pgd_present(pgd) (pgd_val(pgd) != 0U)
++#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
++
++static inline unsigned long pud_large(pud_t pud)
++{
++ pte_t pte = __pte(pud_val(pud));
++
++ return pte_val(pte) & _PAGE_PMD_HUGE;
++}
++
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ pte_t pte = __pte(pud_val(pud));
++
++ return pte_pfn(pte);
++}
+
+ /* Same in both SUN4V and SUN4U. */
+ #define pte_none(pte) (!pte_val(pte))
+
++#define pgd_set(pgdp, pudp) \
++ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
++
+ /* to find an entry in a page-table-directory. */
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+ #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+@@ -771,6 +850,11 @@ static inline unsigned long __pmd_page(pmd_t pmd)
+ /* to find an entry in a kernel page-table-directory */
+ #define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
++/* Find an entry in the third-level page table.. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
++#define pud_offset(pgdp, address) \
++ ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
++
+ /* Find an entry in the second-level page table.. */
+ #define pmd_offset(pudp, address) \
+ ((pmd_t *) pud_page_vaddr(*(pudp)) + \
+@@ -794,7 +878,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ pmd_t *pmdp)
+ {
+ pmd_t pmd = *pmdp;
+- set_pmd_at(mm, addr, pmdp, __pmd(0U));
++ set_pmd_at(mm, addr, pmdp, __pmd(0UL));
+ return pmd;
+ }
+
+@@ -842,8 +926,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ })
+ #endif
+
+-extern pgd_t swapper_pg_dir[2048];
+-extern pmd_t swapper_low_pmd_dir[2048];
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+ extern void paging_init(void);
+ extern unsigned long find_ecache_flush_span(unsigned long size);
+@@ -857,6 +940,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+ extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd);
+
++#define __HAVE_ARCH_PMDP_INVALIDATE
++extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
++ pmd_t *pmdp);
++
+ #define __HAVE_ARCH_PGTABLE_DEPOSIT
+ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+@@ -883,18 +970,6 @@ extern unsigned long pte_file(pte_t);
+ extern pte_t pgoff_to_pte(unsigned long);
+ #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
+
+-extern unsigned long sparc64_valid_addr_bitmap[];
+-
+-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+-static inline bool kern_addr_valid(unsigned long addr)
+-{
+- unsigned long paddr = __pa(addr);
+-
+- if ((paddr >> 41UL) != 0UL)
+- return false;
+- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
+-}
+-
+ extern int page_in_phys_avail(unsigned long paddr);
+
+ /*
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 5e35e0517318..acd614668ec1 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -24,6 +24,10 @@ static inline int con_is_present(void)
+ }
+ #endif
+
++#ifdef CONFIG_SPARC64
++extern void __init start_early_boot(void);
++#endif
++
+ extern void sun_do_break(void);
+ extern int stop_a_enabled;
+ extern int scons_pwroff;
+diff --git a/arch/sparc/include/asm/sparsemem.h b/arch/sparc/include/asm/sparsemem.h
+index b99d4e4b6d28..e5e1752d5d78 100644
+--- a/arch/sparc/include/asm/sparsemem.h
++++ b/arch/sparc/include/asm/sparsemem.h
+@@ -3,9 +3,11 @@
+
+ #ifdef __KERNEL__
+
++#include <asm/page.h>
++
+ #define SECTION_SIZE_BITS 30
+-#define MAX_PHYSADDR_BITS 42
+-#define MAX_PHYSMEM_BITS 42
++#define MAX_PHYSADDR_BITS MAX_PHYS_ADDRESS_BITS
++#define MAX_PHYSMEM_BITS MAX_PHYS_ADDRESS_BITS
+
+ #endif /* !(__KERNEL__) */
+
+diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
+index 6b67e50fb9b4..69424d48cbb7 100644
+--- a/arch/sparc/include/asm/spitfire.h
++++ b/arch/sparc/include/asm/spitfire.h
+@@ -45,6 +45,8 @@
+ #define SUN4V_CHIP_NIAGARA3 0x03
+ #define SUN4V_CHIP_NIAGARA4 0x04
+ #define SUN4V_CHIP_NIAGARA5 0x05
++#define SUN4V_CHIP_SPARC_M6 0x06
++#define SUN4V_CHIP_SPARC_M7 0x07
+ #define SUN4V_CHIP_SPARC64X 0x8a
+ #define SUN4V_CHIP_UNKNOWN 0xff
+
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index d5e504251079..6cda09d02367 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -63,7 +63,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
+- unsigned long fpregs[0] __attribute__ ((aligned(64)));
++ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
++ __attribute__ ((aligned(64)));
+ };
+
+ #endif /* !(__ASSEMBLY__) */
+@@ -102,6 +103,7 @@ struct thread_info {
+ #define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
+ #define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
+ #define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
++#define FAULT_CODE_BAD_RA 0x20 /* Bad RA for sun4v */
+
+ #if PAGE_SHIFT == 13
+ #define THREAD_SIZE (2*PAGE_SIZE)
+diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
+index e696432b950d..ecb49cfa3be9 100644
+--- a/arch/sparc/include/asm/tsb.h
++++ b/arch/sparc/include/asm/tsb.h
+@@ -133,107 +133,89 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ sub TSB, 0x8, TSB; \
+ TSB_STORE(TSB, TAG);
+
+- /* Do a kernel page table walk. Leaves physical PTE pointer in
+- * REG1. Jumps to FAIL_LABEL on early page table walk termination.
+- * VADDR will not be clobbered, but REG2 will.
++ /* Do a kernel page table walk. Leaves valid PTE value in
++ * REG1. Jumps to FAIL_LABEL on early page table walk
++ * termination. VADDR will not be clobbered, but REG2 will.
++ *
++ * There are two masks we must apply to propagate bits from
++ * the virtual address into the PTE physical address field
++ * when dealing with huge pages. This is because the page
++ * table boundaries do not match the huge page size(s) the
++ * hardware supports.
++ *
++ * In these cases we propagate the bits that are below the
++ * page table level where we saw the huge page mapping, but
++ * are still within the relevant physical bits for the huge
++ * page size in question. So for PMD mappings (which fall on
++ * bit 23, for 8MB per PMD) we must propagate bit 22 for a
++ * 4MB huge page. For huge PUDs (which fall on bit 33, for
++ * 8GB per PUD), we have to accomodate 256MB and 2GB huge
++ * pages. So for those we propagate bits 32 to 28.
+ */
+ #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
+ sethi %hi(swapper_pg_dir), REG1; \
+ or REG1, %lo(swapper_pg_dir), REG1; \
+ sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+- andn REG2, 0x3, REG2; \
+- lduw [REG1 + REG2], REG1; \
++ andn REG2, 0x7, REG2; \
++ ldx [REG1 + REG2], REG1; \
+ brz,pn REG1, FAIL_LABEL; \
+- sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
++ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+- sllx REG1, PGD_PADDR_SHIFT, REG1; \
+- andn REG2, 0x3, REG2; \
+- lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ andn REG2, 0x7, REG2; \
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ brz,pn REG1, FAIL_LABEL; \
+- sllx VADDR, 64 - PMD_SHIFT, REG2; \
+- srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \
+- sllx REG1, PMD_PADDR_SHIFT, REG1; \
++ sethi %uhi(_PAGE_PUD_HUGE), REG2; \
++ brz,pn REG1, FAIL_LABEL; \
++ sllx REG2, 32, REG2; \
++ andcc REG1, REG2, %g0; \
++ sethi %hi(0xf8000000), REG2; \
++ bne,pt %xcc, 697f; \
++ sllx REG2, 1, REG2; \
++ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+- add REG1, REG2, REG1;
+-
+- /* These macros exists only to make the PMD translator below
+- * easier to read. It hides the ELF section switch for the
+- * sun4v code patching.
+- */
+-#define OR_PTE_BIT_1INSN(REG, NAME) \
+-661: or REG, _PAGE_##NAME##_4U, REG; \
+- .section .sun4v_1insn_patch, "ax"; \
+- .word 661b; \
+- or REG, _PAGE_##NAME##_4V, REG; \
+- .previous;
+-
+-#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \
+-661: sethi %hi(_PAGE_##NAME##_4U), TMP; \
+- or REG, TMP, REG; \
+- .section .sun4v_2insn_patch, "ax"; \
+- .word 661b; \
+- mov -1, TMP; \
+- or REG, _PAGE_##NAME##_4V, REG; \
+- .previous;
+-
+- /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */
+-#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \
+-661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \
+- .section .sun4v_1insn_patch, "ax"; \
+- .word 661b; \
+- sethi %uhi(_PAGE_VALID), REG; \
+- .previous; \
+- sllx REG, 32, REG; \
+-661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \
+- .section .sun4v_1insn_patch, "ax"; \
+- .word 661b; \
+- or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \
+- .previous;
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
++ brz,pn REG1, FAIL_LABEL; \
++ sllx REG2, 32, REG2; \
++ andcc REG1, REG2, %g0; \
++ be,pn %xcc, 698f; \
++ sethi %hi(0x400000), REG2; \
++697: brgez,pn REG1, FAIL_LABEL; \
++ andn REG1, REG2, REG1; \
++ and VADDR, REG2, REG2; \
++ ba,pt %xcc, 699f; \
++ or REG1, REG2, REG1; \
++698: sllx VADDR, 64 - PMD_SHIFT, REG2; \
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \
++ andn REG2, 0x7, REG2; \
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ brgez,pn REG1, FAIL_LABEL; \
++ nop; \
++699:
+
+ /* PMD has been loaded into REG1, interpret the value, seeing
+ * if it is a HUGE PMD or a normal one. If it is not valid
+ * then jump to FAIL_LABEL. If it is a HUGE PMD, and it
+ * translates to a valid PTE, branch to PTE_LABEL.
+ *
+- * We translate the PMD by hand, one bit at a time,
+- * constructing the huge PTE.
+- *
+- * So we construct the PTE in REG2 as follows:
+- *
+- * 1) Extract the PMD PFN from REG1 and place it into REG2.
+- *
+- * 2) Translate PMD protection bits in REG1 into REG2, one bit
+- * at a time using andcc tests on REG1 and OR's into REG2.
+- *
+- * Only two bits to be concerned with here, EXEC and WRITE.
+- * Now REG1 is freed up and we can use it as a temporary.
+- *
+- * 3) Construct the VALID, CACHE, and page size PTE bits in
+- * REG1, OR with REG2 to form final PTE.
++ * We have to propagate the 4MB bit of the virtual address
++ * because we are fabricating 8MB pages using 4MB hw pages.
+ */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
+- brz,pn REG1, FAIL_LABEL; \
+- andcc REG1, PMD_ISHUGE, %g0; \
+- be,pt %xcc, 700f; \
+- and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \
+- cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \
+- bne,pn %xcc, FAIL_LABEL; \
+- andn REG1, PMD_HUGE_PROTBITS, REG2; \
+- sllx REG2, PMD_PADDR_SHIFT, REG2; \
+- /* REG2 now holds PFN << PAGE_SHIFT */ \
+- andcc REG1, PMD_HUGE_WRITE, %g0; \
+- bne,a,pt %xcc, 1f; \
+- OR_PTE_BIT_1INSN(REG2, W); \
+-1: andcc REG1, PMD_HUGE_EXEC, %g0; \
+- be,pt %xcc, 1f; \
+- nop; \
+- OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \
+- /* REG1 can now be clobbered, build final PTE */ \
+-1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \
+- ba,pt %xcc, PTE_LABEL; \
+- or REG1, REG2, REG1; \
++ brz,pn REG1, FAIL_LABEL; \
++ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
++ sllx REG2, 32, REG2; \
++ andcc REG1, REG2, %g0; \
++ be,pt %xcc, 700f; \
++ sethi %hi(4 * 1024 * 1024), REG2; \
++ brgez,pn REG1, FAIL_LABEL; \
++ andn REG1, REG2, REG1; \
++ and VADDR, REG2, REG2; \
++ brlz,pt REG1, PTE_LABEL; \
++ or REG1, REG2, REG1; \
+ 700:
+ #else
+ #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
+@@ -253,18 +235,21 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
+ sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+- andn REG2, 0x3, REG2; \
+- lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
++ andn REG2, 0x7, REG2; \
++ ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
++ brz,pn REG1, FAIL_LABEL; \
++ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \
++ andn REG2, 0x7, REG2; \
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ brz,pn REG1, FAIL_LABEL; \
+ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+- sllx REG1, PGD_PADDR_SHIFT, REG1; \
+- andn REG2, 0x3, REG2; \
+- lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ andn REG2, 0x7, REG2; \
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \
+ sllx VADDR, 64 - PMD_SHIFT, REG2; \
+- srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \
+- sllx REG1, PMD_PADDR_SHIFT, REG1; \
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+ add REG1, REG2, REG1; \
+ ldxa [REG1] ASI_PHYS_USE_EC, REG1; \
+@@ -306,8 +291,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ (KERNEL_TSB_SIZE_BYTES / 16)
+ #define KERNEL_TSB4M_NENTRIES 4096
+
+-#define KTSB_PHYS_SHIFT 15
+-
+ /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
+ * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
+ * and the found TTE will be left in REG1. REG3 and REG4 must
+@@ -316,17 +299,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ * VADDR and TAG will be preserved and not clobbered by this macro.
+ */
+ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+-661: sethi %hi(swapper_tsb), REG1; \
+- or REG1, %lo(swapper_tsb), REG1; \
++661: sethi %uhi(swapper_tsb), REG1; \
++ sethi %hi(swapper_tsb), REG2; \
++ or REG1, %ulo(swapper_tsb), REG1; \
++ or REG2, %lo(swapper_tsb), REG2; \
+ .section .swapper_tsb_phys_patch, "ax"; \
+ .word 661b; \
+ .previous; \
+-661: nop; \
+- .section .tsb_ldquad_phys_patch, "ax"; \
+- .word 661b; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- .previous; \
++ sllx REG1, 32, REG1; \
++ or REG1, REG2, REG1; \
+ srlx VADDR, PAGE_SHIFT, REG2; \
+ and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
+ sllx REG2, 4, REG2; \
+@@ -341,17 +322,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ * we can make use of that for the index computation.
+ */
+ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+-661: sethi %hi(swapper_4m_tsb), REG1; \
+- or REG1, %lo(swapper_4m_tsb), REG1; \
++661: sethi %uhi(swapper_4m_tsb), REG1; \
++ sethi %hi(swapper_4m_tsb), REG2; \
++ or REG1, %ulo(swapper_4m_tsb), REG1; \
++ or REG2, %lo(swapper_4m_tsb), REG2; \
+ .section .swapper_4m_tsb_phys_patch, "ax"; \
+ .word 661b; \
+ .previous; \
+-661: nop; \
+- .section .tsb_ldquad_phys_patch, "ax"; \
+- .word 661b; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- .previous; \
++ sllx REG1, 32, REG1; \
++ or REG1, REG2, REG1; \
+ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
+ sllx REG2, 4, REG2; \
+ add REG1, REG2, REG2; \
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index e562d3caee57..ad7e178337f1 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
+ extern __must_check long strlen_user(const char __user *str);
+ extern __must_check long strnlen_user(const char __user *str, long n);
+
+-#define __copy_to_user_inatomic ___copy_to_user
+-#define __copy_from_user_inatomic ___copy_from_user
++#define __copy_to_user_inatomic __copy_to_user
++#define __copy_from_user_inatomic __copy_from_user
+
+ struct pt_regs;
+ extern unsigned long compute_effective_address(struct pt_regs *,
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
+index 39ca301920db..11fdf0ef50bb 100644
+--- a/arch/sparc/include/asm/visasm.h
++++ b/arch/sparc/include/asm/visasm.h
+@@ -39,6 +39,14 @@
+ 297: wr %o5, FPRS_FEF, %fprs; \
+ 298:
+
++#define VISEntryHalfFast(fail_label) \
++ rd %fprs, %o5; \
++ andcc %o5, FPRS_FEF, %g0; \
++ be,pt %icc, 297f; \
++ nop; \
++ ba,a,pt %xcc, fail_label; \
++297: wr %o5, FPRS_FEF, %fprs;
++
+ #define VISExitHalf \
+ wr %o5, 0, %fprs;
+
+diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
+index 5c5125895db8..52e10defedc4 100644
+--- a/arch/sparc/kernel/cpu.c
++++ b/arch/sparc/kernel/cpu.c
+@@ -493,6 +493,18 @@ static void __init sun4v_cpu_probe(void)
+ sparc_pmu_type = "niagara5";
+ break;
+
++ case SUN4V_CHIP_SPARC_M6:
++ sparc_cpu_type = "SPARC-M6";
++ sparc_fpu_type = "SPARC-M6 integrated FPU";
++ sparc_pmu_type = "sparc-m6";
++ break;
++
++ case SUN4V_CHIP_SPARC_M7:
++ sparc_cpu_type = "SPARC-M7";
++ sparc_fpu_type = "SPARC-M7 integrated FPU";
++ sparc_pmu_type = "sparc-m7";
++ break;
++
+ case SUN4V_CHIP_SPARC64X:
+ sparc_cpu_type = "SPARC64-X";
+ sparc_fpu_type = "SPARC64-X integrated FPU";
+diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
+index cb5d272d658a..b031c9c08bca 100644
+--- a/arch/sparc/kernel/cpumap.c
++++ b/arch/sparc/kernel/cpumap.c
+@@ -327,6 +327,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+ case SUN4V_CHIP_NIAGARA3:
+ case SUN4V_CHIP_NIAGARA4:
+ case SUN4V_CHIP_NIAGARA5:
++ case SUN4V_CHIP_SPARC_M6:
++ case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC64X:
+ rover_inc_table = niagara_iterate_method;
+ break;
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index dff60abbea01..f87a55d77094 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -1200,14 +1200,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ ds_cfg.tx_irq = vdev->tx_irq;
+ ds_cfg.rx_irq = vdev->rx_irq;
+
+- lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
++ lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out_free_ds_states;
+ }
+ dp->lp = lp;
+
+- err = ldc_bind(lp, "DS");
++ err = ldc_bind(lp);
+ if (err)
+ goto out_free_ldc;
+
+diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
+index b2c2c5be281c..d668ca149e64 100644
+--- a/arch/sparc/kernel/dtlb_prot.S
++++ b/arch/sparc/kernel/dtlb_prot.S
+@@ -24,11 +24,11 @@
+ mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
+
+ /* PROT ** ICACHE line 2: More real fault processing */
++ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+ bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
+- ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+- ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
+ mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+- nop
++ ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
++ nop
+ nop
+ nop
+ nop
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index 9c179fbfb219..3ad726c9789c 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -66,13 +66,10 @@ struct pause_patch_entry {
+ extern struct pause_patch_entry __pause_3insn_patch,
+ __pause_3insn_patch_end;
+
+-extern void __init per_cpu_patch(void);
+ extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+ struct sun4v_1insn_patch_entry *);
+ extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ struct sun4v_2insn_patch_entry *);
+-extern void __init sun4v_patch(void);
+-extern void __init boot_cpu_id_too_large(int cpu);
+ extern unsigned int dcache_parity_tl1_occurred;
+ extern unsigned int icache_parity_tl1_occurred;
+
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 26b706a1867d..3d61fcae7ee3 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -282,8 +282,8 @@ sun4v_chip_type:
+ stx %l2, [%l4 + 0x0]
+ ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
+ /* 4MB align */
+- srlx %l3, 22, %l3
+- sllx %l3, 22, %l3
++ srlx %l3, ILOG2_4MB, %l3
++ sllx %l3, ILOG2_4MB, %l3
+ stx %l3, [%l4 + 0x8]
+
+ /* Leave service as-is, "call-method" */
+@@ -427,6 +427,12 @@ sun4v_chip_type:
+ cmp %g2, '5'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_NIAGARA5, %g4
++ cmp %g2, '6'
++ be,pt %xcc, 5f
++ mov SUN4V_CHIP_SPARC_M6, %g4
++ cmp %g2, '7'
++ be,pt %xcc, 5f
++ mov SUN4V_CHIP_SPARC_M7, %g4
+ ba,pt %xcc, 49f
+ nop
+
+@@ -585,6 +591,12 @@ niagara_tlb_fixup:
+ cmp %g1, SUN4V_CHIP_NIAGARA5
+ be,pt %xcc, niagara4_patch
+ nop
++ cmp %g1, SUN4V_CHIP_SPARC_M6
++ be,pt %xcc, niagara4_patch
++ nop
++ cmp %g1, SUN4V_CHIP_SPARC_M7
++ be,pt %xcc, niagara4_patch
++ nop
+
+ call generic_patch_copyops
+ nop
+@@ -660,14 +672,12 @@ tlb_fixup_done:
+ sethi %hi(init_thread_union), %g6
+ or %g6, %lo(init_thread_union), %g6
+ ldx [%g6 + TI_TASK], %g4
+- mov %sp, %l6
+
+ wr %g0, ASI_P, %asi
+ mov 1, %g1
+ sllx %g1, THREAD_SHIFT, %g1
+ sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ add %g6, %g1, %sp
+- mov 0, %fp
+
+ /* Set per-cpu pointer initially to zero, this makes
+ * the boot-cpu use the in-kernel-image per-cpu areas
+@@ -694,44 +704,14 @@ tlb_fixup_done:
+ nop
+ #endif
+
+- mov %l6, %o1 ! OpenPROM stack
+ call prom_init
+ mov %l7, %o0 ! OpenPROM cif handler
+
+- /* Initialize current_thread_info()->cpu as early as possible.
+- * In order to do that accurately we have to patch up the get_cpuid()
+- * assembler sequences. And that, in turn, requires that we know
+- * if we are on a Starfire box or not. While we're here, patch up
+- * the sun4v sequences as well.
++ /* To create a one-register-window buffer between the kernel's
++ * initial stack and the last stack frame we use from the firmware,
++ * do the rest of the boot from a C helper function.
+ */
+- call check_if_starfire
+- nop
+- call per_cpu_patch
+- nop
+- call sun4v_patch
+- nop
+-
+-#ifdef CONFIG_SMP
+- call hard_smp_processor_id
+- nop
+- cmp %o0, NR_CPUS
+- blu,pt %xcc, 1f
+- nop
+- call boot_cpu_id_too_large
+- nop
+- /* Not reached... */
+-
+-1:
+-#else
+- mov 0, %o0
+-#endif
+- sth %o0, [%g6 + TI_CPU]
+-
+- call prom_init_report
+- nop
+-
+- /* Off we go.... */
+- call start_kernel
++ call start_early_boot
+ nop
+ /* Not reached... */
+
+diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
+index c0a2de0fd624..5c55145bfbf0 100644
+--- a/arch/sparc/kernel/hvapi.c
++++ b/arch/sparc/kernel/hvapi.c
+@@ -46,6 +46,7 @@ static struct api_info api_table[] = {
+ { .group = HV_GRP_VF_CPU, },
+ { .group = HV_GRP_KT_CPU, },
+ { .group = HV_GRP_VT_CPU, },
++ { .group = HV_GRP_T5_CPU, },
+ { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
+ };
+
+diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
+index f3ab509b76a8..caedf8320416 100644
+--- a/arch/sparc/kernel/hvcalls.S
++++ b/arch/sparc/kernel/hvcalls.S
+@@ -821,3 +821,19 @@ ENTRY(sun4v_vt_set_perfreg)
+ retl
+ nop
+ ENDPROC(sun4v_vt_set_perfreg)
++
++ENTRY(sun4v_t5_get_perfreg)
++ mov %o1, %o4
++ mov HV_FAST_T5_GET_PERFREG, %o5
++ ta HV_FAST_TRAP
++ stx %o1, [%o4]
++ retl
++ nop
++ENDPROC(sun4v_t5_get_perfreg)
++
++ENTRY(sun4v_t5_set_perfreg)
++ mov HV_FAST_T5_SET_PERFREG, %o5
++ ta HV_FAST_TRAP
++ retl
++ nop
++ENDPROC(sun4v_t5_set_perfreg)
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
+index 4eb1a5a1d544..4ad81387f5a9 100644
+--- a/arch/sparc/kernel/hvtramp.S
++++ b/arch/sparc/kernel/hvtramp.S
+@@ -110,7 +110,6 @@ hv_cpu_startup:
+ sllx %g5, THREAD_SHIFT, %g5
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ add %g6, %g5, %sp
+- mov 0, %fp
+
+ call init_irqwork_curcpu
+ nop
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index 2096468de9b2..6cacf2d2d475 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -278,7 +278,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
+ }
+
+ order = get_order(len_total);
+- if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
++ va = __get_free_pages(gfp, order);
++ if (va == 0)
+ goto err_nopages;
+
+ if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
+@@ -443,7 +444,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
+ }
+
+ order = get_order(len_total);
+- va = (void *) __get_free_pages(GFP_KERNEL, order);
++ va = (void *) __get_free_pages(gfp, order);
+ if (va == NULL) {
+ printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
+ goto err_nopages;
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index d4840cec2c55..7c22f1cfd2a1 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -47,8 +47,6 @@
+ #include "cpumap.h"
+ #include "kstack.h"
+
+-#define NUM_IVECS (IMAP_INR + 1)
+-
+ struct ino_bucket *ivector_table;
+ unsigned long ivector_table_pa;
+
+@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
+
+ #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
+
+-static struct {
+- unsigned int dev_handle;
+- unsigned int dev_ino;
+- unsigned int in_use;
+-} irq_table[NR_IRQS];
+-static DEFINE_SPINLOCK(irq_alloc_lock);
++static unsigned long hvirq_major __initdata;
++static int __init early_hvirq_major(char *p)
++{
++ int rc = kstrtoul(p, 10, &hvirq_major);
++
++ return rc;
++}
++early_param("hvirq", early_hvirq_major);
+
+-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
++static int hv_irq_version;
++
++/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
++ * based interfaces, but:
++ *
++ * 1) Several OSs, Solaris and Linux included, use them even when only
++ * negotiating version 1.0 (or failing to negotiate at all). So the
++ * hypervisor has a workaround that provides the VIRQ interfaces even
++ * when only verion 1.0 of the API is in use.
++ *
++ * 2) Second, and more importantly, with major version 2.0 these VIRQ
++ * interfaces only were actually hooked up for LDC interrupts, even
++ * though the Hypervisor specification clearly stated:
++ *
++ * The new interrupt API functions will be available to a guest
++ * when it negotiates version 2.0 in the interrupt API group 0x2. When
++ * a guest negotiates version 2.0, all interrupt sources will only
++ * support using the cookie interface, and any attempt to use the
++ * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
++ * ENOTSUPPORTED error being returned.
++ *
++ * with an emphasis on "all interrupt sources".
++ *
++ * To correct this, major version 3.0 was created which does actually
++ * support VIRQs for all interrupt sources (not just LDC devices). So
++ * if we want to move completely over the cookie based VIRQs we must
++ * negotiate major version 3.0 or later of HV_GRP_INTR.
++ */
++static bool sun4v_cookie_only_virqs(void)
+ {
+- unsigned long flags;
+- unsigned char ent;
++ if (hv_irq_version >= 3)
++ return true;
++ return false;
++}
+
+- BUILD_BUG_ON(NR_IRQS >= 256);
++static void __init irq_init_hv(void)
++{
++ unsigned long hv_error, major, minor = 0;
++
++ if (tlb_type != hypervisor)
++ return;
++
++ if (hvirq_major)
++ major = hvirq_major;
++ else
++ major = 3;
+
+- spin_lock_irqsave(&irq_alloc_lock, flags);
++ hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
++ if (!hv_error)
++ hv_irq_version = major;
++ else
++ hv_irq_version = 1;
+
+- for (ent = 1; ent < NR_IRQS; ent++) {
+- if (!irq_table[ent].in_use)
++ pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
++ hv_irq_version,
++ sun4v_cookie_only_virqs() ? "enabled" : "disabled");
++}
++
++/* This function is for the timer interrupt.*/
++int __init arch_probe_nr_irqs(void)
++{
++ return 1;
++}
++
++#define DEFAULT_NUM_IVECS (0xfffU)
++static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
++#define NUM_IVECS (nr_ivec)
++
++static unsigned int __init size_nr_ivec(void)
++{
++ if (tlb_type == hypervisor) {
++ switch (sun4v_chip_type) {
++ /* Athena's devhandle|devino is large.*/
++ case SUN4V_CHIP_SPARC64X:
++ nr_ivec = 0xffff;
+ break;
++ }
+ }
+- if (ent >= NR_IRQS) {
+- printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
+- ent = 0;
+- } else {
+- irq_table[ent].dev_handle = dev_handle;
+- irq_table[ent].dev_ino = dev_ino;
+- irq_table[ent].in_use = 1;
+- }
++ return nr_ivec;
++}
++
++struct irq_handler_data {
++ union {
++ struct {
++ unsigned int dev_handle;
++ unsigned int dev_ino;
++ };
++ unsigned long sysino;
++ };
++ struct ino_bucket bucket;
++ unsigned long iclr;
++ unsigned long imap;
++};
++
++static inline unsigned int irq_data_to_handle(struct irq_data *data)
++{
++ struct irq_handler_data *ihd = data->handler_data;
++
++ return ihd->dev_handle;
++}
++
++static inline unsigned int irq_data_to_ino(struct irq_data *data)
++{
++ struct irq_handler_data *ihd = data->handler_data;
++
++ return ihd->dev_ino;
++}
+
+- spin_unlock_irqrestore(&irq_alloc_lock, flags);
++static inline unsigned long irq_data_to_sysino(struct irq_data *data)
++{
++ struct irq_handler_data *ihd = data->handler_data;
+
+- return ent;
++ return ihd->sysino;
+ }
+
+-#ifdef CONFIG_PCI_MSI
+ void irq_free(unsigned int irq)
+ {
+- unsigned long flags;
++ void *data = irq_get_handler_data(irq);
+
+- if (irq >= NR_IRQS)
+- return;
++ kfree(data);
++ irq_set_handler_data(irq, NULL);
++ irq_free_descs(irq, 1);
++}
+
+- spin_lock_irqsave(&irq_alloc_lock, flags);
++unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
++{
++ int irq;
+
+- irq_table[irq].in_use = 0;
++ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
++ if (irq <= 0)
++ goto out;
+
+- spin_unlock_irqrestore(&irq_alloc_lock, flags);
++ return irq;
++out:
++ return 0;
++}
++
++static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
++{
++ unsigned long hv_err, cookie;
++ struct ino_bucket *bucket;
++ unsigned int irq = 0U;
++
++ hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
++ if (hv_err) {
++ pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
++ goto out;
++ }
++
++ if (cookie & ((1UL << 63UL))) {
++ cookie = ~cookie;
++ bucket = (struct ino_bucket *) __va(cookie);
++ irq = bucket->__irq;
++ }
++out:
++ return irq;
++}
++
++static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
++{
++ unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
++ struct ino_bucket *bucket;
++ unsigned int irq;
++
++ bucket = &ivector_table[sysino];
++ irq = bucket_get_irq(__pa(bucket));
++
++ return irq;
++}
++
++void ack_bad_irq(unsigned int irq)
++{
++ pr_crit("BAD IRQ ack %d\n", irq);
++}
++
++void irq_install_pre_handler(int irq,
++ void (*func)(unsigned int, void *, void *),
++ void *arg1, void *arg2)
++{
++ pr_warn("IRQ pre handler NOT supported.\n");
+ }
+-#endif
+
+ /*
+ * /proc/interrupts printing:
+@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
+ return tid;
+ }
+
+-struct irq_handler_data {
+- unsigned long iclr;
+- unsigned long imap;
+-
+- void (*pre_handler)(unsigned int, void *, void *);
+- void *arg1;
+- void *arg2;
+-};
+-
+ #ifdef CONFIG_SMP
+ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
+ {
+@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data)
+
+ static void sun4v_irq_enable(struct irq_data *data)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_settarget(ino, cpuid);
+@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data)
+ static int sun4v_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, mask);
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_settarget(ino, cpuid);
+@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data,
+
+ static void sun4v_irq_disable(struct irq_data *data)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data)
+
+ static void sun4v_irq_eoi(struct irq_data *data)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data)
+
+ static void sun4v_virq_enable(struct irq_data *data)
+ {
+- unsigned long cpuid, dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
++ unsigned long cpuid;
+ int err;
+
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+-
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data)
+ static int sun4v_virt_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+ {
+- unsigned long cpuid, dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
++ unsigned long cpuid;
+ int err;
+
+ cpuid = irq_choose_cpu(data->irq, mask);
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+-
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
+
+ static void sun4v_virq_disable(struct irq_data *data)
+ {
+- unsigned long dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
+ int err;
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+ HV_INTR_DISABLED);
+@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data)
+
+ static void sun4v_virq_eoi(struct irq_data *data)
+ {
+- unsigned long dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
+ int err;
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+-
+ err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = {
+ .flags = IRQCHIP_EOI_IF_HANDLED,
+ };
+
+-static void pre_flow_handler(struct irq_data *d)
+-{
+- struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
+- unsigned int ino = irq_table[d->irq].dev_ino;
+-
+- handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
+-}
+-
+-void irq_install_pre_handler(int irq,
+- void (*func)(unsigned int, void *, void *),
+- void *arg1, void *arg2)
+-{
+- struct irq_handler_data *handler_data = irq_get_handler_data(irq);
+-
+- handler_data->pre_handler = func;
+- handler_data->arg1 = arg1;
+- handler_data->arg2 = arg2;
+-
+- __irq_set_preflow_handler(irq, pre_flow_handler);
+-}
+-
+ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
+ {
+- struct ino_bucket *bucket;
+ struct irq_handler_data *handler_data;
++ struct ino_bucket *bucket;
+ unsigned int irq;
+ int ino;
+
+@@ -537,119 +641,166 @@ out:
+ return irq;
+ }
+
+-static unsigned int sun4v_build_common(unsigned long sysino,
+- struct irq_chip *chip)
++static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
++ void (*handler_data_init)(struct irq_handler_data *data,
++ u32 devhandle, unsigned int devino),
++ struct irq_chip *chip)
+ {
+- struct ino_bucket *bucket;
+- struct irq_handler_data *handler_data;
++ struct irq_handler_data *data;
+ unsigned int irq;
+
+- BUG_ON(tlb_type != hypervisor);
++ irq = irq_alloc(devhandle, devino);
++ if (!irq)
++ goto out;
+
+- bucket = &ivector_table[sysino];
+- irq = bucket_get_irq(__pa(bucket));
+- if (!irq) {
+- irq = irq_alloc(0, sysino);
+- bucket_set_irq(__pa(bucket), irq);
+- irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
+- "IVEC");
++ data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
++ if (unlikely(!data)) {
++ pr_err("IRQ handler data allocation failed.\n");
++ irq_free(irq);
++ irq = 0;
++ goto out;
+ }
+
+- handler_data = irq_get_handler_data(irq);
+- if (unlikely(handler_data))
+- goto out;
++ irq_set_handler_data(irq, data);
++ handler_data_init(data, devhandle, devino);
++ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
++ data->imap = ~0UL;
++ data->iclr = ~0UL;
++out:
++ return irq;
++}
+
+- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+- if (unlikely(!handler_data)) {
+- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
+- prom_halt();
+- }
+- irq_set_handler_data(irq, handler_data);
++static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
++ unsigned int devino)
++{
++ struct irq_handler_data *ihd = irq_get_handler_data(irq);
++ unsigned long hv_error, cookie;
+
+- /* Catch accidental accesses to these things. IMAP/ICLR handling
+- * is done by hypervisor calls on sun4v platforms, not by direct
+- * register accesses.
++ /* handler_irq needs to find the irq. cookie is seen signed in
++ * sun4v_dev_mondo and treated as a non ivector_table delivery.
+ */
+- handler_data->imap = ~0UL;
+- handler_data->iclr = ~0UL;
++ ihd->bucket.__irq = irq;
++ cookie = ~__pa(&ihd->bucket);
+
+-out:
+- return irq;
++ hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
++ if (hv_error)
++ pr_err("HV vintr set cookie failed = %ld\n", hv_error);
++
++ return hv_error;
+ }
+
+-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
++static void cookie_handler_data(struct irq_handler_data *data,
++ u32 devhandle, unsigned int devino)
+ {
+- unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
++ data->dev_handle = devhandle;
++ data->dev_ino = devino;
++}
++
++static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
++ struct irq_chip *chip)
++{
++ unsigned long hv_error;
++ unsigned int irq;
++
++ irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
++
++ hv_error = cookie_assign(irq, devhandle, devino);
++ if (hv_error) {
++ irq_free(irq);
++ irq = 0;
++ }
+
+- return sun4v_build_common(sysino, &sun4v_irq);
++ return irq;
+ }
+
+-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
++static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
+ {
+- struct irq_handler_data *handler_data;
+- unsigned long hv_err, cookie;
+- struct ino_bucket *bucket;
+ unsigned int irq;
+
+- bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
+- if (unlikely(!bucket))
+- return 0;
++ irq = cookie_exists(devhandle, devino);
++ if (irq)
++ goto out;
+
+- /* The only reference we store to the IRQ bucket is
+- * by physical address which kmemleak can't see, tell
+- * it that this object explicitly is not a leak and
+- * should be scanned.
+- */
+- kmemleak_not_leak(bucket);
++ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
+
+- __flush_dcache_range((unsigned long) bucket,
+- ((unsigned long) bucket +
+- sizeof(struct ino_bucket)));
++out:
++ return irq;
++}
+
+- irq = irq_alloc(devhandle, devino);
++static void sysino_set_bucket(unsigned int irq)
++{
++ struct irq_handler_data *ihd = irq_get_handler_data(irq);
++ struct ino_bucket *bucket;
++ unsigned long sysino;
++
++ sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
++ BUG_ON(sysino >= nr_ivec);
++ bucket = &ivector_table[sysino];
+ bucket_set_irq(__pa(bucket), irq);
++}
+
+- irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
+- "IVEC");
++static void sysino_handler_data(struct irq_handler_data *data,
++ u32 devhandle, unsigned int devino)
++{
++ unsigned long sysino;
+
+- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+- if (unlikely(!handler_data))
+- return 0;
++ sysino = sun4v_devino_to_sysino(devhandle, devino);
++ data->sysino = sysino;
++}
+
+- /* In order to make the LDC channel startup sequence easier,
+- * especially wrt. locking, we do not let request_irq() enable
+- * the interrupt.
+- */
+- irq_set_status_flags(irq, IRQ_NOAUTOEN);
+- irq_set_handler_data(irq, handler_data);
++static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
++ struct irq_chip *chip)
++{
++ unsigned int irq;
+
+- /* Catch accidental accesses to these things. IMAP/ICLR handling
+- * is done by hypervisor calls on sun4v platforms, not by direct
+- * register accesses.
+- */
+- handler_data->imap = ~0UL;
+- handler_data->iclr = ~0UL;
++ irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
++ if (!irq)
++ goto out;
+
+- cookie = ~__pa(bucket);
+- hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
+- if (hv_err) {
+- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
+- "err=%lu\n", devhandle, devino, hv_err);
+- prom_halt();
+- }
++ sysino_set_bucket(irq);
++out:
++ return irq;
++}
+
++static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
++{
++ int irq;
++
++ irq = sysino_exists(devhandle, devino);
++ if (irq)
++ goto out;
++
++ irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
++out:
+ return irq;
+ }
+
+-void ack_bad_irq(unsigned int irq)
++unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+ {
+- unsigned int ino = irq_table[irq].dev_ino;
++ unsigned int irq;
+
+- if (!ino)
+- ino = 0xdeadbeef;
++ if (sun4v_cookie_only_virqs())
++ irq = sun4v_build_cookie(devhandle, devino);
++ else
++ irq = sun4v_build_sysino(devhandle, devino);
+
+- printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
+- ino, irq);
++ return irq;
++}
++
++unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
++{
++ int irq;
++
++ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
++ if (!irq)
++ goto out;
++
++ /* This is borrowed from the original function.
++ */
++ irq_set_status_flags(irq, IRQ_NOAUTOEN);
++
++out:
++ return irq;
+ }
+
+ void *hardirq_stack[NR_CPUS];
+@@ -731,9 +882,12 @@ void fixup_irqs(void)
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct irq_desc *desc = irq_to_desc(irq);
+- struct irq_data *data = irq_desc_get_irq_data(desc);
++ struct irq_data *data;
+ unsigned long flags;
+
++ if (!desc)
++ continue;
++ data = irq_desc_get_irq_data(desc);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->action && !irqd_is_per_cpu(data)) {
+ if (data->chip->irq_set_affinity)
+@@ -933,16 +1087,22 @@ static struct irqaction timer_irq_action = {
+ .name = "timer",
+ };
+
+-/* Only invoked on boot processor. */
+-void __init init_IRQ(void)
++static void __init irq_ivector_init(void)
+ {
+- unsigned long size;
++ unsigned long size, order;
++ unsigned int ivecs;
+
+- map_prom_timers();
+- kill_prom_timer();
++ /* If we are doing cookie only VIRQs then we do not need the ivector
++ * table to process interrupts.
++ */
++ if (sun4v_cookie_only_virqs())
++ return;
+
+- size = sizeof(struct ino_bucket) * NUM_IVECS;
+- ivector_table = kzalloc(size, GFP_KERNEL);
++ ivecs = size_nr_ivec();
++ size = sizeof(struct ino_bucket) * ivecs;
++ order = get_order(size);
++ ivector_table = (struct ino_bucket *)
++ __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!ivector_table) {
+ prom_printf("Fatal error, cannot allocate ivector_table\n");
+ prom_halt();
+@@ -951,6 +1111,15 @@ void __init init_IRQ(void)
+ ((unsigned long) ivector_table) + size);
+
+ ivector_table_pa = __pa(ivector_table);
++}
++
++/* Only invoked on boot processor.*/
++void __init init_IRQ(void)
++{
++ irq_init_hv();
++ irq_ivector_init();
++ map_prom_timers();
++ kill_prom_timer();
+
+ if (tlb_type == hypervisor)
+ sun4v_init_mondo_queues();
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
+index fde5a419cf27..ef0d8e9e1210 100644
+--- a/arch/sparc/kernel/ktlb.S
++++ b/arch/sparc/kernel/ktlb.S
+@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr:
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
+
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+-
+- /* Load and check PTE. */
+- ldxa [%g5] ASI_PHYS_USE_EC, %g5
+- mov 1, %g7
+- sllx %g7, TSB_TAG_INVALID_BIT, %g7
+- brgez,a,pn %g5, kvmap_itlb_longpath
+- TSB_STORE(%g1, %g7)
+-
+ TSB_WRITE(%g1, %g5, %g6)
+
+ /* fallthrough to TLB load */
+@@ -118,6 +110,12 @@ kvmap_dtlb_obp:
+ ba,pt %xcc, kvmap_dtlb_load
+ nop
+
++kvmap_linear_early:
++ sethi %hi(kern_linear_pte_xor), %g7
++ ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
++ ba,pt %xcc, kvmap_dtlb_tsb4m_load
++ xor %g2, %g4, %g5
++
+ .align 32
+ kvmap_dtlb_tsb4m_load:
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+@@ -146,85 +144,17 @@ kvmap_dtlb_4v:
+ /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
+ KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+ #endif
+- /* TSB entry address left in %g1, lookup linear PTE.
+- * Must preserve %g1 and %g6 (TAG).
+- */
+-kvmap_dtlb_tsb4m_miss:
+- /* Clear the PAGE_OFFSET top virtual bits, shift
+- * down to get PFN, and make sure PFN is in range.
+- */
+- sllx %g4, 21, %g5
+-
+- /* Check to see if we know about valid memory at the 4MB
+- * chunk this physical address will reside within.
+- */
+- srlx %g5, 21 + 41, %g2
+- brnz,pn %g2, kvmap_dtlb_longpath
+- nop
+-
+- /* This unconditional branch and delay-slot nop gets patched
+- * by the sethi sequence once the bitmap is properly setup.
++ /* Linear mapping TSB lookup failed. Fallthrough to kernel
++ * page table based lookup.
+ */
+- .globl valid_addr_bitmap_insn
+-valid_addr_bitmap_insn:
+- ba,pt %xcc, 2f
+- nop
+- .subsection 2
+- .globl valid_addr_bitmap_patch
+-valid_addr_bitmap_patch:
+- sethi %hi(sparc64_valid_addr_bitmap), %g7
+- or %g7, %lo(sparc64_valid_addr_bitmap), %g7
+- .previous
+-
+- srlx %g5, 21 + 22, %g2
+- srlx %g2, 6, %g5
+- and %g2, 63, %g2
+- sllx %g5, 3, %g5
+- ldx [%g7 + %g5], %g5
+- mov 1, %g7
+- sllx %g7, %g2, %g7
+- andcc %g5, %g7, %g0
+- be,pn %xcc, kvmap_dtlb_longpath
+-
+-2: sethi %hi(kpte_linear_bitmap), %g2
+-
+- /* Get the 256MB physical address index. */
+- sllx %g4, 21, %g5
+- or %g2, %lo(kpte_linear_bitmap), %g2
+- srlx %g5, 21 + 28, %g5
+- and %g5, (32 - 1), %g7
+-
+- /* Divide by 32 to get the offset into the bitmask. */
+- srlx %g5, 5, %g5
+- add %g7, %g7, %g7
+- sllx %g5, 3, %g5
+-
+- /* kern_linear_pte_xor[(mask >> shift) & 3)] */
+- ldx [%g2 + %g5], %g2
+- srlx %g2, %g7, %g7
+- sethi %hi(kern_linear_pte_xor), %g5
+- and %g7, 3, %g7
+- or %g5, %lo(kern_linear_pte_xor), %g5
+- sllx %g7, 3, %g7
+- ldx [%g5 + %g7], %g2
+-
+ .globl kvmap_linear_patch
+ kvmap_linear_patch:
+- ba,pt %xcc, kvmap_dtlb_tsb4m_load
+- xor %g2, %g4, %g5
++ ba,a,pt %xcc, kvmap_linear_early
+
+ kvmap_dtlb_vmalloc_addr:
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
+
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+-
+- /* Load and check PTE. */
+- ldxa [%g5] ASI_PHYS_USE_EC, %g5
+- mov 1, %g7
+- sllx %g7, TSB_TAG_INVALID_BIT, %g7
+- brgez,a,pn %g5, kvmap_dtlb_longpath
+- TSB_STORE(%g1, %g7)
+-
+ TSB_WRITE(%g1, %g5, %g6)
+
+ /* fallthrough to TLB load */
+@@ -256,13 +186,8 @@ kvmap_dtlb_load:
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ kvmap_vmemmap:
+- sub %g4, %g5, %g5
+- srlx %g5, 22, %g5
+- sethi %hi(vmemmap_table), %g1
+- sllx %g5, 3, %g5
+- or %g1, %lo(vmemmap_table), %g1
+- ba,pt %xcc, kvmap_dtlb_load
+- ldx [%g1 + %g5], %g5
++ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
++ ba,a,pt %xcc, kvmap_dtlb_load
+ #endif
+
+ kvmap_dtlb_nonlinear:
+@@ -274,8 +199,8 @@ kvmap_dtlb_nonlinear:
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* Do not use the TSB for vmemmap. */
+- mov (VMEMMAP_BASE >> 40), %g5
+- sllx %g5, 40, %g5
++ sethi %hi(VMEMMAP_BASE), %g5
++ ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
+ cmp %g4,%g5
+ bgeu,pn %xcc, kvmap_vmemmap
+ nop
+@@ -287,8 +212,8 @@ kvmap_dtlb_tsbmiss:
+ sethi %hi(MODULES_VADDR), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kvmap_dtlb_longpath
+- mov (VMALLOC_END >> 40), %g5
+- sllx %g5, 40, %g5
++ sethi %hi(VMALLOC_END), %g5
++ ldx [%g5 + %lo(VMALLOC_END)], %g5
+ cmp %g4, %g5
+ bgeu,pn %xcc, kvmap_dtlb_longpath
+ nop
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index 66dacd56bb10..27bb55485472 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1078,7 +1078,8 @@ static void ldc_iommu_release(struct ldc_channel *lp)
+
+ struct ldc_channel *ldc_alloc(unsigned long id,
+ const struct ldc_channel_config *cfgp,
+- void *event_arg)
++ void *event_arg,
++ const char *name)
+ {
+ struct ldc_channel *lp;
+ const struct ldc_mode_ops *mops;
+@@ -1093,6 +1094,8 @@ struct ldc_channel *ldc_alloc(unsigned long id,
+ err = -EINVAL;
+ if (!cfgp)
+ goto out_err;
++ if (!name)
++ goto out_err;
+
+ switch (cfgp->mode) {
+ case LDC_MODE_RAW:
+@@ -1185,6 +1188,21 @@ struct ldc_channel *ldc_alloc(unsigned long id,
+
+ INIT_HLIST_HEAD(&lp->mh_list);
+
++ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
++ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
++
++ err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
++ lp->rx_irq_name, lp);
++ if (err)
++ goto out_free_txq;
++
++ err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
++ lp->tx_irq_name, lp);
++ if (err) {
++ free_irq(lp->cfg.rx_irq, lp);
++ goto out_free_txq;
++ }
++
+ return lp;
+
+ out_free_txq:
+@@ -1237,31 +1255,14 @@ EXPORT_SYMBOL(ldc_free);
+ * state. This does not initiate a handshake, ldc_connect() does
+ * that.
+ */
+-int ldc_bind(struct ldc_channel *lp, const char *name)
++int ldc_bind(struct ldc_channel *lp)
+ {
+ unsigned long hv_err, flags;
+ int err = -EINVAL;
+
+- if (!name ||
+- (lp->state != LDC_STATE_INIT))
++ if (lp->state != LDC_STATE_INIT)
+ return -EINVAL;
+
+- snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
+- snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
+-
+- err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
+- lp->rx_irq_name, lp);
+- if (err)
+- return err;
+-
+- err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
+- lp->tx_irq_name, lp);
+- if (err) {
+- free_irq(lp->cfg.rx_irq, lp);
+- return err;
+- }
+-
+-
+ spin_lock_irqsave(&lp->lock, flags);
+
+ enable_irq(lp->cfg.rx_irq);
+diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
+index 6479256fd5a4..fce8ab17bcbb 100644
+--- a/arch/sparc/kernel/nmi.c
++++ b/arch/sparc/kernel/nmi.c
+@@ -141,7 +141,6 @@ static inline unsigned int get_nmi_count(int cpu)
+
+ static __init void nmi_cpu_busy(void *data)
+ {
+- local_irq_enable_in_hardirq();
+ while (endflag == 0)
+ mb();
+ }
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index bc4d3f5d2e5d..cb021453de2a 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -398,8 +398,8 @@ static void apb_fake_ranges(struct pci_dev *dev,
+ apb_calc_first_last(map, &first, &last);
+ res = bus->resource[1];
+ res->flags = IORESOURCE_MEM;
+- region.start = (first << 21);
+- region.end = (last << 21) + ((1 << 21) - 1);
++ region.start = (first << 29);
++ region.end = (last << 29) + ((1 << 29) - 1);
+ pcibios_bus_to_resource(dev, res, &region);
+ }
+
+diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
+index 269af58497aa..7e967c8018c8 100644
+--- a/arch/sparc/kernel/pcr.c
++++ b/arch/sparc/kernel/pcr.c
+@@ -191,12 +191,41 @@ static const struct pcr_ops n4_pcr_ops = {
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+ };
+
++static u64 n5_pcr_read(unsigned long reg_num)
++{
++ unsigned long val;
++
++ (void) sun4v_t5_get_perfreg(reg_num, &val);
++
++ return val;
++}
++
++static void n5_pcr_write(unsigned long reg_num, u64 val)
++{
++ (void) sun4v_t5_set_perfreg(reg_num, val);
++}
++
++static const struct pcr_ops n5_pcr_ops = {
++ .read_pcr = n5_pcr_read,
++ .write_pcr = n5_pcr_write,
++ .read_pic = n4_pic_read,
++ .write_pic = n4_pic_write,
++ .nmi_picl_value = n4_picl_value,
++ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
++ PCR_N4_UTRACE | PCR_N4_TOE |
++ (26 << PCR_N4_SL_SHIFT)),
++ .pcr_nmi_disable = PCR_N4_PICNPT,
++};
++
++
+ static unsigned long perf_hsvc_group;
+ static unsigned long perf_hsvc_major;
+ static unsigned long perf_hsvc_minor;
+
+ static int __init register_perf_hsvc(void)
+ {
++ unsigned long hverror;
++
+ if (tlb_type == hypervisor) {
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+@@ -215,6 +244,10 @@ static int __init register_perf_hsvc(void)
+ perf_hsvc_group = HV_GRP_VT_CPU;
+ break;
+
++ case SUN4V_CHIP_NIAGARA5:
++ perf_hsvc_group = HV_GRP_T5_CPU;
++ break;
++
+ default:
+ return -ENODEV;
+ }
+@@ -222,10 +255,12 @@ static int __init register_perf_hsvc(void)
+
+ perf_hsvc_major = 1;
+ perf_hsvc_minor = 0;
+- if (sun4v_hvapi_register(perf_hsvc_group,
+- perf_hsvc_major,
+- &perf_hsvc_minor)) {
+- printk("perfmon: Could not register hvapi.\n");
++ hverror = sun4v_hvapi_register(perf_hsvc_group,
++ perf_hsvc_major,
++ &perf_hsvc_minor);
++ if (hverror) {
++ pr_err("perfmon: Could not register hvapi(0x%lx).\n",
++ hverror);
+ return -ENODEV;
+ }
+ }
+@@ -254,6 +289,10 @@ static int __init setup_sun4v_pcr_ops(void)
+ pcr_ops = &n4_pcr_ops;
+ break;
+
++ case SUN4V_CHIP_NIAGARA5:
++ pcr_ops = &n5_pcr_ops;
++ break;
++
+ default:
+ ret = -ENODEV;
+ break;
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index b5c38faa4ead..617b9fe33771 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -1662,7 +1662,8 @@ static bool __init supported_pmu(void)
+ sparc_pmu = &niagara2_pmu;
+ return true;
+ }
+- if (!strcmp(sparc_pmu_type, "niagara4")) {
++ if (!strcmp(sparc_pmu_type, "niagara4") ||
++ !strcmp(sparc_pmu_type, "niagara5")) {
+ sparc_pmu = &niagara4_pmu;
+ return true;
+ }
+@@ -1671,9 +1672,12 @@ static bool __init supported_pmu(void)
+
+ int __init init_hw_perf_events(void)
+ {
++ int err;
++
+ pr_info("Performance events: ");
+
+- if (!supported_pmu()) {
++ err = pcr_arch_init();
++ if (err || !supported_pmu()) {
+ pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
+ return 0;
+ }
+@@ -1685,7 +1689,7 @@ int __init init_hw_perf_events(void)
+
+ return 0;
+ }
+-early_initcall(init_hw_perf_events);
++pure_initcall(init_hw_perf_events);
+
+ void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index b9cc9763faf4..fa49b80d8ab6 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -305,6 +305,9 @@ static void __global_pmu_self(int this_cpu)
+ struct global_pmu_snapshot *pp;
+ int i, num;
+
++ if (!pcr_ops)
++ return;
++
+ pp = &global_cpu_snapshot[this_cpu].pmu;
+
+ num = 1;
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 3fdb455e3318..61a519808cb7 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/cpu.h>
+ #include <linux/initrd.h>
+ #include <linux/module.h>
++#include <linux/start_kernel.h>
+
+ #include <asm/io.h>
+ #include <asm/processor.h>
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
+
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+
+-void __init per_cpu_patch(void)
++static void __init per_cpu_patch(void)
+ {
+ struct cpuid_patch_entry *p;
+ unsigned long ver;
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ }
+ }
+
+-void __init sun4v_patch(void)
++static void __init sun4v_patch(void)
+ {
+ extern void sun4v_hvapi_init(void);
+
+@@ -335,14 +336,25 @@ static void __init pause_patch(void)
+ }
+ }
+
+-#ifdef CONFIG_SMP
+-void __init boot_cpu_id_too_large(int cpu)
++void __init start_early_boot(void)
+ {
+- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+- cpu, NR_CPUS);
+- prom_halt();
++ int cpu;
++
++ check_if_starfire();
++ per_cpu_patch();
++ sun4v_patch();
++
++ cpu = hard_smp_processor_id();
++ if (cpu >= NR_CPUS) {
++ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
++ cpu, NR_CPUS);
++ prom_halt();
++ }
++ current_thread_info()->cpu = cpu;
++
++ prom_init_report();
++ start_kernel();
+ }
+-#endif
+
+ /* On Ultra, we support all of the v8 capabilities. */
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
+@@ -500,12 +512,16 @@ static void __init init_sparc64_elf_hwcap(void)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= HWCAP_SPARC_BLKINIT;
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= HWCAP_SPARC_N2;
+ }
+@@ -533,6 +549,8 @@ static void __init init_sparc64_elf_hwcap(void)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
+ AV_SPARC_ASI_BLK_INIT |
+@@ -540,6 +558,8 @@ static void __init init_sparc64_elf_hwcap(void)
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
+ AV_SPARC_FMAF);
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 643bf38ed619..2b4e03e9cd4b 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1394,7 +1394,6 @@ void __cpu_die(unsigned int cpu)
+
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+- pcr_arch_init();
+ }
+
+ void smp_send_reschedule(int cpu)
+@@ -1474,6 +1473,13 @@ static void __init pcpu_populate_pte(unsigned long addr)
+ pud_t *pud;
+ pmd_t *pmd;
+
++ if (pgd_none(*pgd)) {
++ pud_t *new;
++
++ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
++ pgd_populate(&init_mm, pgd, new);
++ }
++
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
+index bde867fd71e8..6179e19bc9b9 100644
+--- a/arch/sparc/kernel/sun4v_tlb_miss.S
++++ b/arch/sparc/kernel/sun4v_tlb_miss.S
+@@ -182,7 +182,7 @@ sun4v_tsb_miss_common:
+ cmp %g5, -1
+ be,pt %xcc, 80f
+ nop
+- COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
++ COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7)
+
+ /* That clobbered %g2, reload it. */
+ ldxa [%g0] ASI_SCRATCHPAD, %g2
+@@ -195,6 +195,11 @@ sun4v_tsb_miss_common:
+ ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
+
+ sun4v_itlb_error:
++ rdpr %tl, %g1
++ cmp %g1, 1
++ ble,pt %icc, sun4v_bad_ra
++ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
++
+ sethi %hi(sun4v_err_itlb_vaddr), %g1
+ stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
+ sethi %hi(sun4v_err_itlb_ctx), %g1
+@@ -206,15 +211,10 @@ sun4v_itlb_error:
+ sethi %hi(sun4v_err_itlb_error), %g1
+ stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
+
++ sethi %hi(1f), %g7
+ rdpr %tl, %g4
+- cmp %g4, 1
+- ble,pt %icc, 1f
+- sethi %hi(2f), %g7
+ ba,pt %xcc, etraptl1
+- or %g7, %lo(2f), %g7
+-
+-1: ba,pt %xcc, etrap
+-2: or %g7, %lo(2b), %g7
++1: or %g7, %lo(1f), %g7
+ mov %l4, %o1
+ call sun4v_itlb_error_report
+ add %sp, PTREGS_OFF, %o0
+@@ -222,6 +222,11 @@ sun4v_itlb_error:
+ /* NOTREACHED */
+
+ sun4v_dtlb_error:
++ rdpr %tl, %g1
++ cmp %g1, 1
++ ble,pt %icc, sun4v_bad_ra
++ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
++
+ sethi %hi(sun4v_err_dtlb_vaddr), %g1
+ stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
+ sethi %hi(sun4v_err_dtlb_ctx), %g1
+@@ -233,21 +238,23 @@ sun4v_dtlb_error:
+ sethi %hi(sun4v_err_dtlb_error), %g1
+ stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
+
++ sethi %hi(1f), %g7
+ rdpr %tl, %g4
+- cmp %g4, 1
+- ble,pt %icc, 1f
+- sethi %hi(2f), %g7
+ ba,pt %xcc, etraptl1
+- or %g7, %lo(2f), %g7
+-
+-1: ba,pt %xcc, etrap
+-2: or %g7, %lo(2b), %g7
++1: or %g7, %lo(1f), %g7
+ mov %l4, %o1
+ call sun4v_dtlb_error_report
+ add %sp, PTREGS_OFF, %o0
+
+ /* NOTREACHED */
+
++sun4v_bad_ra:
++ or %g0, %g4, %g5
++ ba,pt %xcc, sparc64_realfault_common
++ or %g1, %g0, %g4
++
++ /* NOTREACHED */
++
+ /* Instruction Access Exception, tl0. */
+ sun4v_iacc:
+ ldxa [%g0] ASI_SCRATCHPAD, %g2
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 51561b8b15ba..d05eb9c1d846 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -39,9 +39,6 @@ asmlinkage unsigned long sys_getpagesize(void)
+ return PAGE_SIZE;
+ }
+
+-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+-#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
+-
+ /* Does addr --> addr+len fall within 4GB of the VA-space hole or
+ * overflow past the end of the 64-bit address space?
+ */
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index ad4bde3bb61e..092a39d506d6 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -110,10 +110,13 @@ startup_continue:
+ brnz,pn %g1, 1b
+ nop
+
+- sethi %hi(p1275buf), %g2
+- or %g2, %lo(p1275buf), %g2
+- ldx [%g2 + 0x10], %l2
+- add %l2, -(192 + 128), %sp
++ /* Get onto temporary stack which will be in the locked
++ * kernel image.
++ */
++ sethi %hi(tramp_stack), %g1
++ or %g1, %lo(tramp_stack), %g1
++ add %g1, TRAMP_STACK_SIZE, %g1
++ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
+ flushw
+
+ /* Setup the loop variables:
+@@ -395,7 +398,6 @@ after_lock_tlb:
+ sllx %g5, THREAD_SHIFT, %g5
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ add %g6, %g5, %sp
+- mov 0, %fp
+
+ rdpr %pstate, %o1
+ or %o1, PSTATE_IE, %o1
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index b3f833ab90eb..1a338509edb5 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2092,6 +2092,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ atomic_inc(&sun4v_nonresum_oflow_cnt);
+ }
+
++static void sun4v_tlb_error(struct pt_regs *regs)
++{
++ die_if_kernel("TLB/TSB error", regs);
++}
++
+ unsigned long sun4v_err_itlb_vaddr;
+ unsigned long sun4v_err_itlb_ctx;
+ unsigned long sun4v_err_itlb_pte;
+@@ -2099,8 +2104,7 @@ unsigned long sun4v_err_itlb_error;
+
+ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ {
+- if (tl > 1)
+- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
++ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+@@ -2113,7 +2117,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
+ sun4v_err_itlb_pte, sun4v_err_itlb_error);
+
+- prom_halt();
++ sun4v_tlb_error(regs);
+ }
+
+ unsigned long sun4v_err_dtlb_vaddr;
+@@ -2123,8 +2127,7 @@ unsigned long sun4v_err_dtlb_error;
+
+ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+ {
+- if (tl > 1)
+- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
++ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+@@ -2137,7 +2140,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+ sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
+ sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
+
+- prom_halt();
++ sun4v_tlb_error(regs);
+ }
+
+ void hypervisor_tlbop_error(unsigned long err, unsigned long op)
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index a313e4a9399b..be98685c14c6 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -75,7 +75,7 @@ tsb_miss_page_table_walk:
+ mov 512, %g7
+ andn %g5, 0x7, %g5
+ sllx %g7, %g6, %g7
+- srlx %g4, HPAGE_SHIFT, %g6
++ srlx %g4, REAL_HPAGE_SHIFT, %g6
+ sub %g7, 1, %g7
+ and %g6, %g7, %g6
+ sllx %g6, 4, %g6
+@@ -162,10 +162,10 @@ tsb_miss_page_table_walk_sun4v_fastpath:
+ nop
+ .previous
+
+- rdpr %tl, %g3
+- cmp %g3, 1
++ rdpr %tl, %g7
++ cmp %g7, 1
+ bne,pn %xcc, winfix_trampoline
+- nop
++ mov %g3, %g4
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call hugetlb_setup
+diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
+index f8e7dd53e1c7..9c5fbd0b8a04 100644
+--- a/arch/sparc/kernel/viohs.c
++++ b/arch/sparc/kernel/viohs.c
+@@ -714,7 +714,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio,
+ cfg.tx_irq = vio->vdev->tx_irq;
+ cfg.rx_irq = vio->vdev->rx_irq;
+
+- lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
++ lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
+ if (IS_ERR(lp))
+ return PTR_ERR(lp);
+
+@@ -746,7 +746,7 @@ void vio_port_up(struct vio_driver_state *vio)
+
+ err = 0;
+ if (state == LDC_STATE_INIT) {
+- err = ldc_bind(vio->lp, vio->name);
++ err = ldc_bind(vio->lp);
+ if (err)
+ printk(KERN_WARNING "%s: Port %lu bind failed, "
+ "err=%d\n",
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index 0bacceb19150..09243057cb0b 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -35,8 +35,9 @@ jiffies = jiffies_64;
+
+ SECTIONS
+ {
+- /* swapper_low_pmd_dir is sparc64 only */
+- swapper_low_pmd_dir = 0x0000000000402000;
++#ifdef CONFIG_SPARC64
++ swapper_pg_dir = 0x0000000000402000;
++#endif
+ . = INITIAL_ADDRESS;
+ .text TEXTSTART :
+ {
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 9cf2ee01cee3..140527a20e7d 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -41,6 +41,10 @@
+ #endif
+ #endif
+
++#if !defined(EX_LD) && !defined(EX_ST)
++#define NON_USER_COPY
++#endif
++
+ #ifndef EX_LD
+ #define EX_LD(x) x
+ #endif
+@@ -197,9 +201,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
+ mov EX_RETVAL(%o3), %o0
+
+ .Llarge_src_unaligned:
++#ifdef NON_USER_COPY
++ VISEntryHalfFast(.Lmedium_vis_entry_fail)
++#else
++ VISEntryHalf
++#endif
+ andn %o2, 0x3f, %o4
+ sub %o2, %o4, %o2
+- VISEntryHalf
+ alignaddr %o1, %g0, %g1
+ add %o1, %o4, %o1
+ EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
+@@ -240,6 +248,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
+ nop
+ ba,a,pt %icc, .Lmedium_unaligned
+
++#ifdef NON_USER_COPY
++.Lmedium_vis_entry_fail:
++ or %o0, %o1, %g2
++#endif
+ .Lmedium:
+ LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
+ andcc %g2, 0x7, %g0
+diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
+index 77e531f6c2a7..46272dfc26e8 100644
+--- a/arch/sparc/lib/clear_page.S
++++ b/arch/sparc/lib/clear_page.S
+@@ -37,10 +37,10 @@ _clear_page: /* %o0=dest */
+ .globl clear_user_page
+ clear_user_page: /* %o0=dest, %o1=vaddr */
+ lduw [%g6 + TI_PRE_COUNT], %o2
+- sethi %uhi(PAGE_OFFSET), %g2
++ sethi %hi(PAGE_OFFSET), %g2
+ sethi %hi(PAGE_SIZE), %o4
+
+- sllx %g2, 32, %g2
++ ldx [%g2 + %lo(PAGE_OFFSET)], %g2
+ sethi %hi(PAGE_KERNEL_LOCKED), %g3
+
+ ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
+index 4d2df328e514..dd16c61f3263 100644
+--- a/arch/sparc/lib/copy_page.S
++++ b/arch/sparc/lib/copy_page.S
+@@ -46,10 +46,10 @@
+ .type copy_user_page,#function
+ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
+ lduw [%g6 + TI_PRE_COUNT], %o4
+- sethi %uhi(PAGE_OFFSET), %g2
++ sethi %hi(PAGE_OFFSET), %g2
+ sethi %hi(PAGE_SIZE), %o3
+
+- sllx %g2, 32, %g2
++ ldx [%g2 + %lo(PAGE_OFFSET)], %g2
+ sethi %hi(PAGE_KERNEL_LOCKED), %g3
+
+ ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
+index 99c017be8719..f75e6906df14 100644
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -3,8 +3,9 @@
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+- * Returns 0, if ok, and number of bytes not yet set if exception
+- * occurs and we were called as clear_user.
++ * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and
++ * number of bytes not yet set if exception occurs and we were called as
++ * clear_user.
+ */
+
+ #include <asm/ptrace.h>
+@@ -65,6 +66,8 @@ __bzero_begin:
+ .globl __memset_start, __memset_end
+ __memset_start:
+ memset:
++ mov %o0, %g1
++ mov 1, %g4
+ and %o1, 0xff, %g3
+ sll %g3, 8, %g2
+ or %g3, %g2, %g3
+@@ -89,6 +92,7 @@ memset:
+ sub %o0, %o2, %o0
+
+ __bzero:
++ clr %g4
+ mov %g0, %g3
+ 1:
+ cmp %o1, 7
+@@ -151,8 +155,8 @@ __bzero:
+ bne,a 8f
+ EX(stb %g3, [%o0], and %o1, 1)
+ 8:
+- retl
+- clr %o0
++ b 0f
++ nop
+ 7:
+ be 13b
+ orcc %o1, 0, %g0
+@@ -164,6 +168,12 @@ __bzero:
+ bne 8b
+ EX(stb %g3, [%o0 - 1], add %o1, 1)
+ 0:
++ andcc %g4, 1, %g0
++ be 5f
++ nop
++ retl
++ mov %g1, %o0
++5:
+ retl
+ clr %o0
+ __memset_end:
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 3841a081beb3..603e462a210e 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -346,6 +346,9 @@ retry:
+ down_read(&mm->mmap_sem);
+ }
+
++ if (fault_code & FAULT_CODE_BAD_RA)
++ goto do_sigbus;
++
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
+index 01ee23dd724d..ae6ce383d4df 100644
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ int *nr)
+ {
+ struct page *head, *page, *tail;
+- u32 mask;
+ int refs;
+
+- mask = PMD_HUGE_PRESENT;
+- if (write)
+- mask |= PMD_HUGE_WRITE;
+- if ((pmd_val(pmd) & mask) != mask)
++ if (!(pmd_val(pmd) & _PAGE_VALID))
++ return 0;
++
++ if (write && !pmd_write(pmd))
+ return 0;
+
+ refs = 0;
+@@ -161,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ return 1;
+ }
+
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
++ struct page **pages)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long addr, len, end;
++ unsigned long next, flags;
++ pgd_t *pgdp;
++ int nr = 0;
++
++ start &= PAGE_MASK;
++ addr = start;
++ len = (unsigned long) nr_pages << PAGE_SHIFT;
++ end = start + len;
++
++ local_irq_save(flags);
++ pgdp = pgd_offset(mm, addr);
++ do {
++ pgd_t pgd = *pgdp;
++
++ next = pgd_addr_end(addr, end);
++ if (pgd_none(pgd))
++ break;
++ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++ break;
++ } while (pgdp++, addr = next, addr != end);
++ local_irq_restore(flags);
++
++ return nr;
++}
++
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+ {
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index d2b59441ebdd..8545f62fa62c 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -21,8 +21,6 @@
+ /* Slightly simplified from the non-hugepage variant because by
+ * definition we don't have to worry about any page coloring stuff
+ */
+-#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+-#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
+
+ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index b26015f49c0d..4438e94822a2 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -73,7 +73,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly;
+ * 'cpu' properties, but we need to have this table setup before the
+ * MDESC is initialized.
+ */
+-unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+
+ #ifndef CONFIG_DEBUG_PAGEALLOC
+ /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
+@@ -82,10 +81,11 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+ */
+ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+ #endif
++extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+
+ static unsigned long cpu_pgsz_mask;
+
+-#define MAX_BANKS 32
++#define MAX_BANKS 1024
+
+ static struct linux_prom64_registers pavail[MAX_BANKS];
+ static int pavail_ents;
+@@ -163,10 +163,6 @@ static void __init read_obp_memory(const char *property,
+ cmp_p64, NULL);
+ }
+
+-unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
+- sizeof(unsigned long)];
+-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
+-
+ /* Kernel physical address base and size in bytes. */
+ unsigned long kern_base __read_mostly;
+ unsigned long kern_size __read_mostly;
+@@ -358,7 +354,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
+
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+- __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
++ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ address, pte_val(pte));
+ else
+ #endif
+@@ -592,7 +588,7 @@ static void __init remap_kernel(void)
+ int i, tlb_ent = sparc64_highest_locked_tlbent();
+
+ tte_vaddr = (unsigned long) KERNBASE;
+- phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
++ phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
+ tte_data = kern_large_tte(phys_page);
+
+ kern_locked_tte_data = tte_data;
+@@ -838,7 +834,10 @@ static int find_node(unsigned long addr)
+ if ((addr & p->mask) == p->val)
+ return i;
+ }
+- return -1;
++ /* The following condition has been observed on LDOM guests.*/
++ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
++ " rule. Some physical memory will be owned by node 0.");
++ return 0;
+ }
+
+ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+@@ -1359,9 +1358,144 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
+ static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+ static int pall_ents __initdata;
+
+-#ifdef CONFIG_DEBUG_PAGEALLOC
++static unsigned long max_phys_bits = 40;
++
++bool kern_addr_valid(unsigned long addr)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if ((long)addr < 0L) {
++ unsigned long pa = __pa(addr);
++
++ if ((addr >> max_phys_bits) != 0UL)
++ return false;
++
++ return pfn_valid(pa >> PAGE_SHIFT);
++ }
++
++ if (addr >= (unsigned long) KERNBASE &&
++ addr < (unsigned long)&_end)
++ return true;
++
++ pgd = pgd_offset_k(addr);
++ if (pgd_none(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, addr);
++ if (pud_none(*pud))
++ return 0;
++
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
++ pmd = pmd_offset(pud, addr);
++ if (pmd_none(*pmd))
++ return 0;
++
++ if (pmd_large(*pmd))
++ return pfn_valid(pmd_pfn(*pmd));
++
++ pte = pte_offset_kernel(pmd, addr);
++ if (pte_none(*pte))
++ return 0;
++
++ return pfn_valid(pte_pfn(*pte));
++}
++EXPORT_SYMBOL(kern_addr_valid);
++
++static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
++ unsigned long vend,
++ pud_t *pud)
++{
++ const unsigned long mask16gb = (1UL << 34) - 1UL;
++ u64 pte_val = vstart;
++
++ /* Each PUD is 8GB */
++ if ((vstart & mask16gb) ||
++ (vend - vstart <= mask16gb)) {
++ pte_val ^= kern_linear_pte_xor[2];
++ pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
++
++ return vstart + PUD_SIZE;
++ }
++
++ pte_val ^= kern_linear_pte_xor[3];
++ pte_val |= _PAGE_PUD_HUGE;
++
++ vend = vstart + mask16gb + 1UL;
++ while (vstart < vend) {
++ pud_val(*pud) = pte_val;
++
++ pte_val += PUD_SIZE;
++ vstart += PUD_SIZE;
++ pud++;
++ }
++ return vstart;
++}
++
++static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
++ bool guard)
++{
++ if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
++ return true;
++
++ return false;
++}
++
++static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
++ unsigned long vend,
++ pmd_t *pmd)
++{
++ const unsigned long mask256mb = (1UL << 28) - 1UL;
++ const unsigned long mask2gb = (1UL << 31) - 1UL;
++ u64 pte_val = vstart;
++
++ /* Each PMD is 8MB */
++ if ((vstart & mask256mb) ||
++ (vend - vstart <= mask256mb)) {
++ pte_val ^= kern_linear_pte_xor[0];
++ pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
++
++ return vstart + PMD_SIZE;
++ }
++
++ if ((vstart & mask2gb) ||
++ (vend - vstart <= mask2gb)) {
++ pte_val ^= kern_linear_pte_xor[1];
++ pte_val |= _PAGE_PMD_HUGE;
++ vend = vstart + mask256mb + 1UL;
++ } else {
++ pte_val ^= kern_linear_pte_xor[2];
++ pte_val |= _PAGE_PMD_HUGE;
++ vend = vstart + mask2gb + 1UL;
++ }
++
++ while (vstart < vend) {
++ pmd_val(*pmd) = pte_val;
++
++ pte_val += PMD_SIZE;
++ vstart += PMD_SIZE;
++ pmd++;
++ }
++
++ return vstart;
++}
++
++static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
++ bool guard)
++{
++ if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
++ return true;
++
++ return false;
++}
++
+ static unsigned long __ref kernel_map_range(unsigned long pstart,
+- unsigned long pend, pgprot_t prot)
++ unsigned long pend, pgprot_t prot,
++ bool use_huge)
+ {
+ unsigned long vstart = PAGE_OFFSET + pstart;
+ unsigned long vend = PAGE_OFFSET + pend;
+@@ -1380,19 +1514,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
+ pmd_t *pmd;
+ pte_t *pte;
+
++ if (pgd_none(*pgd)) {
++ pud_t *new;
++
++ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
++ alloc_bytes += PAGE_SIZE;
++ pgd_populate(&init_mm, pgd, new);
++ }
+ pud = pud_offset(pgd, vstart);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
++ if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
++ vstart = kernel_map_hugepud(vstart, vend, pud);
++ continue;
++ }
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, vstart);
+- if (!pmd_present(*pmd)) {
++ if (pmd_none(*pmd)) {
+ pte_t *new;
+
++ if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
++ vstart = kernel_map_hugepmd(vstart, vend, pmd);
++ continue;
++ }
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pmd_populate_kernel(&init_mm, pmd, new);
+@@ -1415,100 +1564,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
+ return alloc_bytes;
+ }
+
+-extern unsigned int kvmap_linear_patch[1];
+-#endif /* CONFIG_DEBUG_PAGEALLOC */
+-
+-static void __init kpte_set_val(unsigned long index, unsigned long val)
++static void __init flush_all_kernel_tsbs(void)
+ {
+- unsigned long *ptr = kpte_linear_bitmap;
+-
+- val <<= ((index % (BITS_PER_LONG / 2)) * 2);
+- ptr += (index / (BITS_PER_LONG / 2));
+-
+- *ptr |= val;
+-}
+-
+-static const unsigned long kpte_shift_min = 28; /* 256MB */
+-static const unsigned long kpte_shift_max = 34; /* 16GB */
+-static const unsigned long kpte_shift_incr = 3;
+-
+-static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
+- unsigned long shift)
+-{
+- unsigned long size = (1UL << shift);
+- unsigned long mask = (size - 1UL);
+- unsigned long remains = end - start;
+- unsigned long val;
+-
+- if (remains < size || (start & mask))
+- return start;
+-
+- /* VAL maps:
+- *
+- * shift 28 --> kern_linear_pte_xor index 1
+- * shift 31 --> kern_linear_pte_xor index 2
+- * shift 34 --> kern_linear_pte_xor index 3
+- */
+- val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
+-
+- remains &= ~mask;
+- if (shift != kpte_shift_max)
+- remains = size;
+-
+- while (remains) {
+- unsigned long index = start >> kpte_shift_min;
++ int i;
+
+- kpte_set_val(index, val);
++ for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
++ struct tsb *ent = &swapper_tsb[i];
+
+- start += 1UL << kpte_shift_min;
+- remains -= 1UL << kpte_shift_min;
++ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
++#ifndef CONFIG_DEBUG_PAGEALLOC
++ for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
++ struct tsb *ent = &swapper_4m_tsb[i];
+
+- return start;
+-}
+-
+-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
+-{
+- unsigned long smallest_size, smallest_mask;
+- unsigned long s;
+-
+- smallest_size = (1UL << kpte_shift_min);
+- smallest_mask = (smallest_size - 1UL);
+-
+- while (start < end) {
+- unsigned long orig_start = start;
+-
+- for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
+- start = kpte_mark_using_shift(start, end, s);
+-
+- if (start != orig_start)
+- break;
+- }
+-
+- if (start == orig_start)
+- start = (start + smallest_size) & ~smallest_mask;
++ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
++#endif
+ }
+
+-static void __init init_kpte_bitmap(void)
+-{
+- unsigned long i;
+-
+- for (i = 0; i < pall_ents; i++) {
+- unsigned long phys_start, phys_end;
+-
+- phys_start = pall[i].phys_addr;
+- phys_end = phys_start + pall[i].reg_size;
+-
+- mark_kpte_bitmap(phys_start, phys_end);
+- }
+-}
++extern unsigned int kvmap_linear_patch[1];
+
+ static void __init kernel_physical_mapping_init(void)
+ {
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ unsigned long i, mem_alloced = 0UL;
++ bool use_huge = true;
+
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ use_huge = false;
++#endif
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+@@ -1516,7 +1599,7 @@ static void __init kernel_physical_mapping_init(void)
+ phys_end = phys_start + pall[i].reg_size;
+
+ mem_alloced += kernel_map_range(phys_start, phys_end,
+- PAGE_KERNEL);
++ PAGE_KERNEL, use_huge);
+ }
+
+ printk("Allocated %ld bytes for kernel page tables.\n",
+@@ -1525,8 +1608,9 @@ static void __init kernel_physical_mapping_init(void)
+ kvmap_linear_patch[0] = 0x01000000; /* nop */
+ flushi(&kvmap_linear_patch[0]);
+
++ flush_all_kernel_tsbs();
++
+ __flush_tlb_all();
+-#endif
+ }
+
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+@@ -1536,7 +1620,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
+ unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+ kernel_map_range(phys_start, phys_end,
+- (enable ? PAGE_KERNEL : __pgprot(0)));
++ (enable ? PAGE_KERNEL : __pgprot(0)), false);
+
+ flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
+ PAGE_OFFSET + phys_end);
+@@ -1561,6 +1645,80 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
+ return ~0UL;
+ }
+
++unsigned long PAGE_OFFSET;
++EXPORT_SYMBOL(PAGE_OFFSET);
++
++unsigned long VMALLOC_END = 0x0000010000000000UL;
++EXPORT_SYMBOL(VMALLOC_END);
++
++unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
++unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
++
++static void __init setup_page_offset(void)
++{
++ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
++ /* Cheetah/Panther support a full 64-bit virtual
++ * address, so we can use all that our page tables
++ * support.
++ */
++ sparc64_va_hole_top = 0xfff0000000000000UL;
++ sparc64_va_hole_bottom = 0x0010000000000000UL;
++
++ max_phys_bits = 42;
++ } else if (tlb_type == hypervisor) {
++ switch (sun4v_chip_type) {
++ case SUN4V_CHIP_NIAGARA1:
++ case SUN4V_CHIP_NIAGARA2:
++ /* T1 and T2 support 48-bit virtual addresses. */
++ sparc64_va_hole_top = 0xffff800000000000UL;
++ sparc64_va_hole_bottom = 0x0000800000000000UL;
++
++ max_phys_bits = 39;
++ break;
++ case SUN4V_CHIP_NIAGARA3:
++ /* T3 supports 48-bit virtual addresses. */
++ sparc64_va_hole_top = 0xffff800000000000UL;
++ sparc64_va_hole_bottom = 0x0000800000000000UL;
++
++ max_phys_bits = 43;
++ break;
++ case SUN4V_CHIP_NIAGARA4:
++ case SUN4V_CHIP_NIAGARA5:
++ case SUN4V_CHIP_SPARC64X:
++ case SUN4V_CHIP_SPARC_M6:
++ /* T4 and later support 52-bit virtual addresses. */
++ sparc64_va_hole_top = 0xfff8000000000000UL;
++ sparc64_va_hole_bottom = 0x0008000000000000UL;
++ max_phys_bits = 47;
++ break;
++ case SUN4V_CHIP_SPARC_M7:
++ default:
++ /* M7 and later support 52-bit virtual addresses. */
++ sparc64_va_hole_top = 0xfff8000000000000UL;
++ sparc64_va_hole_bottom = 0x0008000000000000UL;
++ max_phys_bits = 49;
++ break;
++ }
++ }
++
++ if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
++ prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
++ max_phys_bits);
++ prom_halt();
++ }
++
++ PAGE_OFFSET = sparc64_va_hole_top;
++ VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
++ (sparc64_va_hole_bottom >> 2));
++
++ pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
++ PAGE_OFFSET, max_phys_bits);
++ pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
++ VMALLOC_START, VMALLOC_END);
++ pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
++ VMEMMAP_BASE, VMEMMAP_BASE << 1);
++}
++
+ static void __init tsb_phys_patch(void)
+ {
+ struct tsb_ldquad_phys_patch_entry *pquad;
+@@ -1603,21 +1761,42 @@ static void __init tsb_phys_patch(void)
+ #define NUM_KTSB_DESCR 1
+ #endif
+ static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
+-extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
++
++/* The swapper TSBs are loaded with a base sequence of:
++ *
++ * sethi %uhi(SYMBOL), REG1
++ * sethi %hi(SYMBOL), REG2
++ * or REG1, %ulo(SYMBOL), REG1
++ * or REG2, %lo(SYMBOL), REG2
++ * sllx REG1, 32, REG1
++ * or REG1, REG2, REG1
++ *
++ * When we use physical addressing for the TSB accesses, we patch the
++ * first four instructions in the above sequence.
++ */
+
+ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
+ {
+- pa >>= KTSB_PHYS_SHIFT;
++ unsigned long high_bits, low_bits;
++
++ high_bits = (pa >> 32) & 0xffffffff;
++ low_bits = (pa >> 0) & 0xffffffff;
+
+ while (start < end) {
+ unsigned int *ia = (unsigned int *)(unsigned long)*start;
+
+- ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
++ ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
+ __asm__ __volatile__("flush %0" : : "r" (ia));
+
+- ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
++ ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
+ __asm__ __volatile__("flush %0" : : "r" (ia + 1));
+
++ ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
++ __asm__ __volatile__("flush %0" : : "r" (ia + 2));
++
++ ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
++ __asm__ __volatile__("flush %0" : : "r" (ia + 3));
++
+ start++;
+ }
+ }
+@@ -1726,7 +1905,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ #ifndef CONFIG_DEBUG_PAGEALLOC
+ if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
+ kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
+- 0xfffff80000000000UL;
++ PAGE_OFFSET;
+ kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+ } else {
+@@ -1735,7 +1914,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+
+ if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
+ kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
+- 0xfffff80000000000UL;
++ PAGE_OFFSET;
+ kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+ } else {
+@@ -1744,7 +1923,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+
+ if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
+ kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
+- 0xfffff80000000000UL;
++ PAGE_OFFSET;
+ kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+ } else {
+@@ -1756,7 +1935,6 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ /* paging_init() sets up the page tables */
+
+ static unsigned long last_valid_pfn;
+-pgd_t swapper_pg_dir[2048];
+
+ static void sun4u_pgprot_init(void);
+ static void sun4v_pgprot_init(void);
+@@ -1767,6 +1945,8 @@ void __init paging_init(void)
+ unsigned long real_end, i;
+ int node;
+
++ setup_page_offset();
++
+ /* These build time checkes make sure that the dcache_dirty_cpu()
+ * page->flags usage will work.
+ *
+@@ -1792,7 +1972,7 @@ void __init paging_init(void)
+
+ BUILD_BUG_ON(NR_CPUS > 4096);
+
+- kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
++ kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
+ kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+
+ /* Invalidate both kernel TSBs. */
+@@ -1848,7 +2028,7 @@ void __init paging_init(void)
+ shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
+
+ real_end = (unsigned long)_end;
+- num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
++ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
+ printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
+ num_kernel_image_mappings);
+
+@@ -1857,16 +2037,10 @@ void __init paging_init(void)
+ */
+ init_mm.pgd += ((shift) / (sizeof(pgd_t)));
+
+- memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
++ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+- /* Now can init the kernel/bad page tables. */
+- pud_set(pud_offset(&swapper_pg_dir[0], 0),
+- swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
+-
+ inherit_prom_mappings();
+
+- init_kpte_bitmap();
+-
+ /* Ok, we can use our TLB miss and window trap handlers safely. */
+ setup_tba();
+
+@@ -1973,70 +2147,6 @@ int page_in_phys_avail(unsigned long paddr)
+ return 0;
+ }
+
+-static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+-static int pavail_rescan_ents __initdata;
+-
+-/* Certain OBP calls, such as fetching "available" properties, can
+- * claim physical memory. So, along with initializing the valid
+- * address bitmap, what we do here is refetch the physical available
+- * memory list again, and make sure it provides at least as much
+- * memory as 'pavail' does.
+- */
+-static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
+-{
+- int i;
+-
+- read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
+-
+- for (i = 0; i < pavail_ents; i++) {
+- unsigned long old_start, old_end;
+-
+- old_start = pavail[i].phys_addr;
+- old_end = old_start + pavail[i].reg_size;
+- while (old_start < old_end) {
+- int n;
+-
+- for (n = 0; n < pavail_rescan_ents; n++) {
+- unsigned long new_start, new_end;
+-
+- new_start = pavail_rescan[n].phys_addr;
+- new_end = new_start +
+- pavail_rescan[n].reg_size;
+-
+- if (new_start <= old_start &&
+- new_end >= (old_start + PAGE_SIZE)) {
+- set_bit(old_start >> 22, bitmap);
+- goto do_next_page;
+- }
+- }
+-
+- prom_printf("mem_init: Lost memory in pavail\n");
+- prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
+- pavail[i].phys_addr,
+- pavail[i].reg_size);
+- prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
+- pavail_rescan[i].phys_addr,
+- pavail_rescan[i].reg_size);
+- prom_printf("mem_init: Cannot continue, aborting.\n");
+- prom_halt();
+-
+- do_next_page:
+- old_start += PAGE_SIZE;
+- }
+- }
+-}
+-
+-static void __init patch_tlb_miss_handler_bitmap(void)
+-{
+- extern unsigned int valid_addr_bitmap_insn[];
+- extern unsigned int valid_addr_bitmap_patch[];
+-
+- valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
+- mb();
+- valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
+- flushi(&valid_addr_bitmap_insn[0]);
+-}
+-
+ static void __init register_page_bootmem_info(void)
+ {
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+@@ -2049,18 +2159,6 @@ static void __init register_page_bootmem_info(void)
+ }
+ void __init mem_init(void)
+ {
+- unsigned long addr, last;
+-
+- addr = PAGE_OFFSET + kern_base;
+- last = PAGE_ALIGN(kern_size) + addr;
+- while (addr < last) {
+- set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+- addr += PAGE_SIZE;
+- }
+-
+- setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
+- patch_tlb_miss_handler_bitmap();
+-
+ high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+
+ register_page_bootmem_info();
+@@ -2150,18 +2248,9 @@ unsigned long _PAGE_CACHE __read_mostly;
+ EXPORT_SYMBOL(_PAGE_CACHE);
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+-unsigned long vmemmap_table[VMEMMAP_SIZE];
+-
+-static long __meminitdata addr_start, addr_end;
+-static int __meminitdata node_start;
+-
+ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ int node)
+ {
+- unsigned long phys_start = (vstart - VMEMMAP_BASE);
+- unsigned long phys_end = (vend - VMEMMAP_BASE);
+- unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
+- unsigned long end = VMEMMAP_ALIGN(phys_end);
+ unsigned long pte_base;
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+@@ -2172,47 +2261,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+- for (; addr < end; addr += VMEMMAP_CHUNK) {
+- unsigned long *vmem_pp =
+- vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
+- void *block;
++ pte_base |= _PAGE_PMD_HUGE;
+
+- if (!(*vmem_pp & _PAGE_VALID)) {
+- block = vmemmap_alloc_block(1UL << 22, node);
+- if (!block)
++ vstart = vstart & PMD_MASK;
++ vend = ALIGN(vend, PMD_SIZE);
++ for (; vstart < vend; vstart += PMD_SIZE) {
++ pgd_t *pgd = pgd_offset_k(vstart);
++ unsigned long pte;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ if (pgd_none(*pgd)) {
++ pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
++
++ if (!new)
+ return -ENOMEM;
++ pgd_populate(&init_mm, pgd, new);
++ }
+
+- *vmem_pp = pte_base | __pa(block);
++ pud = pud_offset(pgd, vstart);
++ if (pud_none(*pud)) {
++ pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
+
+- /* check to see if we have contiguous blocks */
+- if (addr_end != addr || node_start != node) {
+- if (addr_start)
+- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+- addr_start, addr_end-1, node_start);
+- addr_start = addr;
+- node_start = node;
+- }
+- addr_end = addr + VMEMMAP_CHUNK;
++ if (!new)
++ return -ENOMEM;
++ pud_populate(&init_mm, pud, new);
+ }
+- }
+- return 0;
+-}
+
+-void __meminit vmemmap_populate_print_last(void)
+-{
+- if (addr_start) {
+- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+- addr_start, addr_end-1, node_start);
+- addr_start = 0;
+- addr_end = 0;
+- node_start = 0;
++ pmd = pmd_offset(pud, vstart);
++
++ pte = pmd_val(*pmd);
++ if (!(pte & _PAGE_VALID)) {
++ void *block = vmemmap_alloc_block(PMD_SIZE, node);
++
++ if (!block)
++ return -ENOMEM;
++
++ pmd_val(*pmd) = pte_base | __pa(block);
++ }
+ }
++
++ return 0;
+ }
+
+ void vmemmap_free(unsigned long start, unsigned long end)
+ {
+ }
+-
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ static void prot_init_common(unsigned long page_none,
+@@ -2265,10 +2359,10 @@ static void __init sun4u_pgprot_init(void)
+ __ACCESS_BITS_4U | _PAGE_E_4U);
+
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+- kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
++ kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
+ #else
+ kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
+- 0xfffff80000000000UL;
++ PAGE_OFFSET;
+ #endif
+ kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+@@ -2312,10 +2406,10 @@ static void __init sun4v_pgprot_init(void)
+ _PAGE_CACHE = _PAGE_CACHE_4V;
+
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+- kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
++ kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
+ #else
+ kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
+- 0xfffff80000000000UL;
++ PAGE_OFFSET;
+ #endif
+ kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+@@ -2459,53 +2553,13 @@ void __flush_tlb_all(void)
+ : : "r" (pstate));
+ }
+
+-static pte_t *get_from_cache(struct mm_struct *mm)
+-{
+- struct page *page;
+- pte_t *ret;
+-
+- spin_lock(&mm->page_table_lock);
+- page = mm->context.pgtable_page;
+- ret = NULL;
+- if (page) {
+- void *p = page_address(page);
+-
+- mm->context.pgtable_page = NULL;
+-
+- ret = (pte_t *) (p + (PAGE_SIZE / 2));
+- }
+- spin_unlock(&mm->page_table_lock);
+-
+- return ret;
+-}
+-
+-static struct page *__alloc_for_cache(struct mm_struct *mm)
+-{
+- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+- __GFP_REPEAT | __GFP_ZERO);
+-
+- if (page) {
+- spin_lock(&mm->page_table_lock);
+- if (!mm->context.pgtable_page) {
+- atomic_set(&page->_count, 2);
+- mm->context.pgtable_page = page;
+- }
+- spin_unlock(&mm->page_table_lock);
+- }
+- return page;
+-}
+-
+ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+ {
+- struct page *page;
+- pte_t *pte;
+-
+- pte = get_from_cache(mm);
+- if (pte)
+- return pte;
++ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
++ __GFP_REPEAT | __GFP_ZERO);
++ pte_t *pte = NULL;
+
+- page = __alloc_for_cache(mm);
+ if (page)
+ pte = (pte_t *) page_address(page);
+
+@@ -2515,14 +2569,10 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+ {
+- struct page *page;
+- pte_t *pte;
+-
+- pte = get_from_cache(mm);
+- if (pte)
+- return pte;
++ struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
++ __GFP_REPEAT | __GFP_ZERO);
++ pte_t *pte = NULL;
+
+- page = __alloc_for_cache(mm);
+ if (page) {
+ pgtable_page_ctor(page);
+ pte = (pte_t *) page_address(page);
+@@ -2533,18 +2583,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm,
+
+ void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+ {
+- struct page *page = virt_to_page(pte);
+- if (put_page_testzero(page))
+- free_hot_cold_page(page, 0);
++ free_page((unsigned long)pte);
+ }
+
+ static void __pte_free(pgtable_t pte)
+ {
+ struct page *page = virt_to_page(pte);
+- if (put_page_testzero(page)) {
+- pgtable_page_dtor(page);
+- free_hot_cold_page(page, 0);
+- }
++
++ pgtable_page_dtor(page);
++ __free_page(page);
+ }
+
+ void pte_free(struct mm_struct *mm, pgtable_t pte)
+@@ -2561,124 +2608,27 @@ void pgtable_free(void *table, bool is_page)
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
+-{
+- if (pgprot_val(pgprot) & _PAGE_VALID)
+- pmd_val(pmd) |= PMD_HUGE_PRESENT;
+- if (tlb_type == hypervisor) {
+- if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
+- pmd_val(pmd) |= PMD_HUGE_WRITE;
+- if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
+- pmd_val(pmd) |= PMD_HUGE_EXEC;
+-
+- if (!for_modify) {
+- if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
+- pmd_val(pmd) |= PMD_HUGE_ACCESSED;
+- if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
+- pmd_val(pmd) |= PMD_HUGE_DIRTY;
+- }
+- } else {
+- if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
+- pmd_val(pmd) |= PMD_HUGE_WRITE;
+- if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
+- pmd_val(pmd) |= PMD_HUGE_EXEC;
+-
+- if (!for_modify) {
+- if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
+- pmd_val(pmd) |= PMD_HUGE_ACCESSED;
+- if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
+- pmd_val(pmd) |= PMD_HUGE_DIRTY;
+- }
+- }
+-
+- return pmd;
+-}
+-
+-pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+-{
+- pmd_t pmd;
+-
+- pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
+- pmd_val(pmd) |= PMD_ISHUGE;
+- pmd = pmd_set_protbits(pmd, pgprot, false);
+- return pmd;
+-}
+-
+-pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+-{
+- pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
+- PMD_HUGE_WRITE |
+- PMD_HUGE_EXEC);
+- pmd = pmd_set_protbits(pmd, newprot, true);
+- return pmd;
+-}
+-
+-pgprot_t pmd_pgprot(pmd_t entry)
+-{
+- unsigned long pte = 0;
+-
+- if (pmd_val(entry) & PMD_HUGE_PRESENT)
+- pte |= _PAGE_VALID;
+-
+- if (tlb_type == hypervisor) {
+- if (pmd_val(entry) & PMD_HUGE_PRESENT)
+- pte |= _PAGE_PRESENT_4V;
+- if (pmd_val(entry) & PMD_HUGE_EXEC)
+- pte |= _PAGE_EXEC_4V;
+- if (pmd_val(entry) & PMD_HUGE_WRITE)
+- pte |= _PAGE_W_4V;
+- if (pmd_val(entry) & PMD_HUGE_ACCESSED)
+- pte |= _PAGE_ACCESSED_4V;
+- if (pmd_val(entry) & PMD_HUGE_DIRTY)
+- pte |= _PAGE_MODIFIED_4V;
+- pte |= _PAGE_CP_4V|_PAGE_CV_4V;
+- } else {
+- if (pmd_val(entry) & PMD_HUGE_PRESENT)
+- pte |= _PAGE_PRESENT_4U;
+- if (pmd_val(entry) & PMD_HUGE_EXEC)
+- pte |= _PAGE_EXEC_4U;
+- if (pmd_val(entry) & PMD_HUGE_WRITE)
+- pte |= _PAGE_W_4U;
+- if (pmd_val(entry) & PMD_HUGE_ACCESSED)
+- pte |= _PAGE_ACCESSED_4U;
+- if (pmd_val(entry) & PMD_HUGE_DIRTY)
+- pte |= _PAGE_MODIFIED_4U;
+- pte |= _PAGE_CP_4U|_PAGE_CV_4U;
+- }
+-
+- return __pgprot(pte);
+-}
+-
+ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd)
+ {
+ unsigned long pte, flags;
+ struct mm_struct *mm;
+ pmd_t entry = *pmd;
+- pgprot_t prot;
+
+ if (!pmd_large(entry) || !pmd_young(entry))
+ return;
+
+- pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
+- pte <<= PMD_PADDR_SHIFT;
+- pte |= _PAGE_VALID;
+-
+- prot = pmd_pgprot(entry);
+-
+- if (tlb_type == hypervisor)
+- pgprot_val(prot) |= _PAGE_SZHUGE_4V;
+- else
+- pgprot_val(prot) |= _PAGE_SZHUGE_4U;
++ pte = pmd_val(entry);
+
+- pte |= pgprot_val(prot);
++ /* We are fabricating 8MB pages using 4MB real hw pages. */
++ pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
+
+ mm = vma->vm_mm;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
+- __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
++ __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+ addr, pte);
+
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+@@ -2765,8 +2715,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+ }
+ if (end > HI_OBP_ADDRESS) {
+- flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
+- do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
++ flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
++ do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
+ }
+ } else {
+ flush_tsb_kernel_range(start, end);
+diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
+index 0661aa606dec..ac491193cb54 100644
+--- a/arch/sparc/mm/init_64.h
++++ b/arch/sparc/mm/init_64.h
+@@ -1,20 +1,15 @@
+ #ifndef _SPARC64_MM_INIT_H
+ #define _SPARC64_MM_INIT_H
+
++#include <asm/page.h>
++
+ /* Most of the symbols in this file are defined in init.c and
+ * marked non-static so that assembler code can get at them.
+ */
+
+-#define MAX_PHYS_ADDRESS (1UL << 41UL)
+-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
+-#define KPTE_BITMAP_BYTES \
+- ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
+-#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
+-#define VALID_ADDR_BITMAP_BYTES \
+- ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
++#define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
+
+ extern unsigned long kern_linear_pte_xor[4];
+-extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+ extern unsigned int sparc64_highest_unlocked_tlb_ent;
+ extern unsigned long sparc64_kern_pri_context;
+ extern unsigned long sparc64_kern_pri_nuc_bits;
+@@ -36,15 +31,4 @@ extern unsigned long kern_locked_tte_data;
+
+ extern void prom_world(int enter);
+
+-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+-#define VMEMMAP_CHUNK_SHIFT 22
+-#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
+-#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
+-#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
+-
+-#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
+- sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
+-extern unsigned long vmemmap_table[VMEMMAP_SIZE];
+-#endif
+-
+ #endif /* _SPARC64_MM_INIT_H */
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index 7a91f288c708..c24d0aa2b615 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -135,7 +135,7 @@ no_cache_flush:
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+- pmd_t pmd, bool exec)
++ pmd_t pmd)
+ {
+ unsigned long end;
+ pte_t *pte;
+@@ -143,8 +143,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+ pte = pte_offset_map(&pmd, vaddr);
+ end = vaddr + HPAGE_SIZE;
+ while (vaddr < end) {
+- if (pte_val(*pte) & _PAGE_VALID)
++ if (pte_val(*pte) & _PAGE_VALID) {
++ bool exec = pte_exec(*pte);
++
+ tlb_batch_add_one(mm, vaddr, exec);
++ }
+ pte++;
+ vaddr += PAGE_SIZE;
+ }
+@@ -161,8 +164,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ if (mm == &init_mm)
+ return;
+
+- if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
+- if (pmd_val(pmd) & PMD_ISHUGE)
++ if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
++ if (pmd_val(pmd) & _PAGE_PMD_HUGE)
+ mm->context.huge_pte_count++;
+ else
+ mm->context.huge_pte_count--;
+@@ -178,16 +181,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ }
+
+ if (!pmd_none(orig)) {
+- bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
+-
+ addr &= HPAGE_MASK;
+- if (pmd_val(orig) & PMD_ISHUGE)
++ if (pmd_trans_huge(orig)) {
++ pte_t orig_pte = __pte(pmd_val(orig));
++ bool exec = pte_exec(orig_pte);
++
+ tlb_batch_add_one(mm, addr, exec);
+- else
+- tlb_batch_pmd_scan(mm, addr, orig, exec);
++ tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
++ } else {
++ tlb_batch_pmd_scan(mm, addr, orig);
++ }
+ }
+ }
+
++void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
++ pmd_t *pmdp)
++{
++ pmd_t entry = *pmdp;
++
++ pmd_val(entry) &= ~_PAGE_VALID;
++
++ set_pmd_at(vma->vm_mm, address, pmdp, entry);
++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
++}
++
+ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
+ {
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 71d99a6c75a7..10a69f47745a 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb)
+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+- __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
++ __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
+ }
+ #endif
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+@@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+- __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
++ __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
+ }
+ #endif
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+@@ -484,8 +484,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ mm->context.huge_pte_count = 0;
+ #endif
+
+- mm->context.pgtable_page = NULL;
+-
+ /* copy_mm() copies over the parent's mm_struct before calling
+ * us, so we need to zero out the TSB pointer or else tsb_grow()
+ * will be confused and think there is an older TSB to free up.
+@@ -524,17 +522,10 @@ static void tsb_destroy_one(struct tsb_config *tp)
+ void destroy_context(struct mm_struct *mm)
+ {
+ unsigned long flags, i;
+- struct page *page;
+
+ for (i = 0; i < MM_NUM_TSBS; i++)
+ tsb_destroy_one(&mm->context.tsb_block[i]);
+
+- page = mm->context.pgtable_page;
+- if (page && put_page_testzero(page)) {
+- pgtable_page_dtor(page);
+- free_hot_cold_page(page, 0);
+- }
+-
+ spin_lock_irqsave(&ctx_alloc_lock, flags);
+
+ if (CTX_VALID(mm->context)) {
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index 432aa0cb1b38..b4f4733abc6e 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -153,10 +153,10 @@ __spitfire_flush_tlb_mm_slow:
+ .globl __flush_icache_page
+ __flush_icache_page: /* %o0 = phys_page */
+ srlx %o0, PAGE_SHIFT, %o0
+- sethi %uhi(PAGE_OFFSET), %g1
++ sethi %hi(PAGE_OFFSET), %g1
+ sllx %o0, PAGE_SHIFT, %o0
+ sethi %hi(PAGE_SIZE), %g2
+- sllx %g1, 32, %g1
++ ldx [%g1 + %lo(PAGE_OFFSET)], %g1
+ add %o0, %g1, %o0
+ 1: subcc %g2, 32, %g2
+ bne,pt %icc, 1b
+@@ -178,8 +178,8 @@ __flush_icache_page: /* %o0 = phys_page */
+ .align 64
+ .globl __flush_dcache_page
+ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
+- sethi %uhi(PAGE_OFFSET), %g1
+- sllx %g1, 32, %g1
++ sethi %hi(PAGE_OFFSET), %g1
++ ldx [%g1 + %lo(PAGE_OFFSET)], %g1
+ sub %o0, %g1, %o0 ! physical address
+ srlx %o0, 11, %o0 ! make D-cache TAG
+ sethi %hi(1 << 14), %o2 ! D-cache size
+@@ -287,8 +287,8 @@ __cheetah_flush_tlb_pending: /* 27 insns */
+
+ #ifdef DCACHE_ALIASING_POSSIBLE
+ __cheetah_flush_dcache_page: /* 11 insns */
+- sethi %uhi(PAGE_OFFSET), %g1
+- sllx %g1, 32, %g1
++ sethi %hi(PAGE_OFFSET), %g1
++ ldx [%g1 + %lo(PAGE_OFFSET)], %g1
+ sub %o0, %g1, %o0
+ sethi %hi(PAGE_SIZE), %o4
+ 1: subcc %o4, (1 << 5), %o4
+diff --git a/arch/sparc/power/hibernate_asm.S b/arch/sparc/power/hibernate_asm.S
+index 79942166df84..d7d9017dcb15 100644
+--- a/arch/sparc/power/hibernate_asm.S
++++ b/arch/sparc/power/hibernate_asm.S
+@@ -54,8 +54,8 @@ ENTRY(swsusp_arch_resume)
+ nop
+
+ /* Write PAGE_OFFSET to %g7 */
+- sethi %uhi(PAGE_OFFSET), %g7
+- sllx %g7, 32, %g7
++ sethi %hi(PAGE_OFFSET), %g7
++ ldx [%g7 + %lo(PAGE_OFFSET)], %g7
+
+ setuw (PAGE_SIZE-8), %g3
+
+diff --git a/arch/sparc/prom/bootstr_64.c b/arch/sparc/prom/bootstr_64.c
+index ab9ccc63b388..7149e77714a4 100644
+--- a/arch/sparc/prom/bootstr_64.c
++++ b/arch/sparc/prom/bootstr_64.c
+@@ -14,7 +14,10 @@
+ * the .bss section or it will break things.
+ */
+
+-#define BARG_LEN 256
++/* We limit BARG_LEN to 1024 because this is the size of the
++ * 'barg_out' command line buffer in the SILO bootloader.
++ */
++#define BARG_LEN 1024
+ struct {
+ int bootstr_len;
+ int bootstr_valid;
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
+index 9c86b4b7d429..8050f381f518 100644
+--- a/arch/sparc/prom/cif.S
++++ b/arch/sparc/prom/cif.S
+@@ -11,11 +11,10 @@
+ .text
+ .globl prom_cif_direct
+ prom_cif_direct:
++ save %sp, -192, %sp
+ sethi %hi(p1275buf), %o1
+ or %o1, %lo(p1275buf), %o1
+- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
+- save %o2, -192, %sp
+- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
++ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
+ mov %g4, %l0
+ mov %g5, %l1
+ mov %g6, %l3
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index d95db755828f..110b0d78b864 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,13 +26,13 @@ phandle prom_chosen_node;
+ * It gets passed the pointer to the PROM vector.
+ */
+
+-extern void prom_cif_init(void *, void *);
++extern void prom_cif_init(void *);
+
+-void __init prom_init(void *cif_handler, void *cif_stack)
++void __init prom_init(void *cif_handler)
+ {
+ phandle node;
+
+- prom_cif_init(cif_handler, cif_stack);
++ prom_cif_init(cif_handler);
+
+ prom_chosen_node = prom_finddevice(prom_chosen_path);
+ if (!prom_chosen_node || (s32)prom_chosen_node == -1)
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index 04a4540509dd..fda23e6e1d93 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -10,6 +10,7 @@
+ #include <linux/smp.h>
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
++#include <linux/irqflags.h>
+
+ #include <asm/openprom.h>
+ #include <asm/oplib.h>
+@@ -20,7 +21,6 @@
+ struct {
+ long prom_callback; /* 0x00 */
+ void (*prom_cif_handler)(long *); /* 0x08 */
+- unsigned long prom_cif_stack; /* 0x10 */
+ } p1275buf;
+
+ extern void prom_world(int);
+@@ -37,8 +37,8 @@ void p1275_cmd_direct(unsigned long *args)
+ {
+ unsigned long flags;
+
+- raw_local_save_flags(flags);
+- raw_local_irq_restore((unsigned long)PIL_NMI);
++ local_save_flags(flags);
++ local_irq_restore((unsigned long)PIL_NMI);
+ raw_spin_lock(&prom_entry_lock);
+
+ prom_world(1);
+@@ -46,11 +46,10 @@ void p1275_cmd_direct(unsigned long *args)
+ prom_world(0);
+
+ raw_spin_unlock(&prom_entry_lock);
+- raw_local_irq_restore(flags);
++ local_irq_restore(flags);
+ }
+
+ void prom_cif_init(void *cif_handler, void *cif_stack)
+ {
+ p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+- p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+ }
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ec6c0395b512..847b165b9f9e 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -473,6 +473,7 @@ struct kvm_vcpu_arch {
+ u64 mmio_gva;
+ unsigned access;
+ gfn_t mmio_gfn;
++ u64 mmio_gen;
+
+ struct kvm_pmu pmu;
+
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 87c0be59970a..1d8152b764a7 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -154,6 +154,21 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_ERMS);
+ }
+ }
++
++ /*
++ * Intel Quark Core DevMan_001.pdf section 6.4.11
++ * "The operating system also is required to invalidate (i.e., flush)
++ * the TLB when any changes are made to any of the page table entries.
++ * The operating system must reload CR3 to cause the TLB to be flushed"
++ *
++ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
++ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
++ * to be modified
++ */
++ if (c->x86 == 5 && c->x86_model == 9) {
++ pr_info("Disabling PGE capability bit\n");
++ setup_clear_cpu_cap(X86_FEATURE_PGE);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index aa4b5c132c66..959bbf204dae 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2413,6 +2413,9 @@ __init int intel_pmu_init(void)
+ case 62: /* IvyBridge EP */
+ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
++ /* dTLB-load-misses on IVB is different than SNB */
++ hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
++
+ memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 74dd12952ea8..073b39d13696 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -198,16 +198,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+
+ /*
+- * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
+- * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
+- * number.
++ * the low bit of the generation number is always presumed to be zero.
++ * This disables mmio caching during memslot updates. The concept is
++ * similar to a seqcount but instead of retrying the access we just punt
++ * and ignore the cache.
++ *
++ * spte bits 3-11 are used as bits 1-9 of the generation number,
++ * the bits 52-61 are used as bits 10-19 of the generation number.
+ */
+-#define MMIO_SPTE_GEN_LOW_SHIFT 3
++#define MMIO_SPTE_GEN_LOW_SHIFT 2
+ #define MMIO_SPTE_GEN_HIGH_SHIFT 52
+
+-#define MMIO_GEN_SHIFT 19
+-#define MMIO_GEN_LOW_SHIFT 9
+-#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1)
++#define MMIO_GEN_SHIFT 20
++#define MMIO_GEN_LOW_SHIFT 10
++#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
+ #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
+ #define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
+
+@@ -3161,7 +3165,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+
+- vcpu_clear_mmio_info(vcpu, ~0ul);
++ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+ if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
+ hpa_t root = vcpu->arch.mmu.root_hpa;
+@@ -4424,7 +4428,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
+ * The very rare case: if the generation-number is round,
+ * zap all shadow pages.
+ */
+- if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) {
++ if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
+ printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ }
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 3186542f2fa3..7626d3efa064 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ vcpu->arch.mmio_gva = gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
++ vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
++}
++
++static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
+ }
+
+ /*
+- * Clear the mmio cache info for the given gva,
+- * specially, if gva is ~0ul, we clear all mmio cache info.
++ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
++ * clear all mmio cache info.
+ */
++#define MMIO_GVA_ANY (~(gva_t)0)
++
+ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+- if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
++ if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+ return;
+
+ vcpu->arch.mmio_gva = 0;
+@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+
+ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+ {
+- if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
++ vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+ return true;
+
+ return false;
+@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+
+ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+ {
+- if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
++ vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+ return true;
+
+ return false;
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 701212ba38b7..ec85b816fd5a 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -1063,6 +1063,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+ if (!firmware_p)
+ return -EINVAL;
+
++ if (!name || name[0] == '\0')
++ return -EINVAL;
++
+ ret = _request_firmware_prepare(&fw, name, device);
+ if (ret <= 0) /* error or already assigned */
+ goto out;
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index de11ecaf3833..b18c7da77067 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -464,16 +464,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ {
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
++ const char *devname = "dummy";
+
+ INIT_LIST_HEAD(&map->debugfs_off_cache);
+ mutex_init(&map->cache_lock);
+
++ if (map->dev)
++ devname = dev_name(map->dev);
++
+ if (name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+- dev_name(map->dev), name);
++ devname, name);
+ name = map->debugfs_name;
+ } else {
+- name = dev_name(map->dev);
++ name = devname;
+ }
+
+ map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 18ea82c9146c..7a58be457eb5 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1281,7 +1281,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
+ }
+
+ #ifdef LOG_DEVICE
+- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
++ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x <= %x\n", reg, val);
+ #endif
+
+@@ -1403,6 +1403,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ if (val_bytes == 1) {
+ wval = (void *)val;
+ } else {
++ if (!val_count)
++ return -EINVAL;
++
+ wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ if (!wval) {
+ ret = -ENOMEM;
+@@ -1557,7 +1560,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
+ ret = map->reg_read(context, reg, val);
+ if (ret == 0) {
+ #ifdef LOG_DEVICE
+- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
++ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x => %x\n", reg, *val);
+ #endif
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6e6740b9521b..238dea6f6c5f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -306,6 +306,9 @@ static void btusb_intr_complete(struct urb *urb)
+ BT_ERR("%s corrupted event packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
+@@ -394,6 +397,9 @@ static void btusb_bulk_complete(struct urb *urb)
+ BT_ERR("%s corrupted ACL packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
+@@ -488,6 +494,9 @@ static void btusb_isoc_complete(struct urb *urb)
+ hdev->stat.err_rx++;
+ }
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index db0be2fb05fe..db35c542eb20 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5)
+ break;
+
+ to_remove--;
+- seq = (seq - 1) % 8;
++ seq = (seq - 1) & 0x07;
+ }
+
+ if (seq != h5->rx_ack)
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index af1b17a0db66..2b25d65b7a0e 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
+ if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
+ /* oh nos! */
+ dev_err(dev->dev, "no encoders/connectors found\n");
++ drm_mode_config_cleanup(dev);
+ return -ENXIO;
+ }
+
+@@ -178,33 +179,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
++ if (!priv->wq) {
++ ret = -ENOMEM;
++ goto fail_free_priv;
++ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+- goto fail;
++ goto fail_free_wq;
+ }
+
+ priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->mmio) {
+ dev_err(dev->dev, "failed to ioremap\n");
+ ret = -ENOMEM;
+- goto fail;
++ goto fail_free_wq;
+ }
+
+ priv->clk = clk_get(dev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+- goto fail;
++ goto fail_iounmap;
+ }
+
+ priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get display clock\n");
+ ret = -ENODEV;
+- goto fail;
++ goto fail_put_clk;
+ }
+
+ #ifdef CONFIG_CPU_FREQ
+@@ -214,7 +219,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev->dev, "failed to register cpufreq notifier\n");
+- goto fail;
++ goto fail_put_disp_clk;
+ }
+ #endif
+
+@@ -259,13 +264,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ ret = modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+- goto fail;
++ goto fail_cpufreq_unregister;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+- goto fail;
++ goto fail_mode_config_cleanup;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+@@ -273,7 +278,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+- goto fail;
++ goto fail_vblank_cleanup;
+ }
+
+ platform_set_drvdata(pdev, dev);
+@@ -289,13 +294,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ priv->fbdev = drm_fbdev_cma_init(dev, bpp,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
++ if (IS_ERR(priv->fbdev)) {
++ ret = PTR_ERR(priv->fbdev);
++ goto fail_irq_uninstall;
++ }
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+-fail:
+- tilcdc_unload(dev);
++fail_irq_uninstall:
++ pm_runtime_get_sync(dev->dev);
++ drm_irq_uninstall(dev);
++ pm_runtime_put_sync(dev->dev);
++
++fail_vblank_cleanup:
++ drm_vblank_cleanup(dev);
++
++fail_mode_config_cleanup:
++ drm_mode_config_cleanup(dev);
++
++fail_cpufreq_unregister:
++ pm_runtime_disable(dev->dev);
++#ifdef CONFIG_CPU_FREQ
++ cpufreq_unregister_notifier(&priv->freq_transition,
++ CPUFREQ_TRANSITION_NOTIFIER);
++fail_put_disp_clk:
++ clk_put(priv->disp_clk);
++#endif
++
++fail_put_clk:
++ clk_put(priv->clk);
++
++fail_iounmap:
++ iounmap(priv->mmio);
++
++fail_free_wq:
++ flush_workqueue(priv->wq);
++ destroy_workqueue(priv->wq);
++
++fail_free_priv:
++ dev->dev_private = NULL;
++ kfree(priv);
+ return ret;
+ }
+
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 6de6c98ce6eb..dea661331351 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -208,8 +208,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ ret = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel));
+
+- if (ret != 0)
++ if (ret != 0) {
++ err = ret;
+ goto error1;
++ }
+
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ if (t == 0) {
+@@ -404,7 +406,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 next_gpadl_handle;
+ unsigned long flags;
+ int ret = 0;
+- int t;
+
+ next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+ atomic_inc(&vmbus_connection.next_gpadl_handle);
+@@ -451,9 +452,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+
+ }
+ }
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+- BUG_ON(t == 0);
+-
++ wait_for_completion(&msginfo->waitevent);
+
+ /* At this point, we received the gpadl created msg */
+ *gpadl_handle = gpadlmsg->gpadl;
+@@ -476,7 +475,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ struct vmbus_channel_gpadl_teardown *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+- int ret, t;
++ int ret;
+
+ info = kmalloc(sizeof(*info) +
+ sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
+@@ -498,11 +497,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_gpadl_teardown));
+
+- BUG_ON(ret != 0);
+- t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
+- BUG_ON(t == 0);
++ if (ret)
++ goto post_msg_err;
++
++ wait_for_completion(&info->waitevent);
+
+- /* Received a torndown response */
++post_msg_err:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+@@ -512,7 +512,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ }
+ EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+
+-static void vmbus_close_internal(struct vmbus_channel *channel)
++static int vmbus_close_internal(struct vmbus_channel *channel)
+ {
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+@@ -534,11 +534,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+
+- BUG_ON(ret != 0);
++ if (ret) {
++ pr_err("Close failed: close post msg return is %d\n", ret);
++ /*
++ * If we failed to post the close msg,
++ * it is perhaps better to leak memory.
++ */
++ return ret;
++ }
++
+ /* Tear down the gpadl for the channel's ring buffer */
+- if (channel->ringbuffer_gpadlhandle)
+- vmbus_teardown_gpadl(channel,
+- channel->ringbuffer_gpadlhandle);
++ if (channel->ringbuffer_gpadlhandle) {
++ ret = vmbus_teardown_gpadl(channel,
++ channel->ringbuffer_gpadlhandle);
++ if (ret) {
++ pr_err("Close failed: teardown gpadl return %d\n", ret);
++ /*
++ * If we failed to teardown gpadl,
++ * it is perhaps better to leak memory.
++ */
++ return ret;
++ }
++ }
+
+ /* Cleanup the ring buffers for this channel */
+ hv_ringbuffer_cleanup(&channel->outbound);
+@@ -547,7 +564,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
+ free_pages((unsigned long)channel->ringbuffer_pages,
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+
+-
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 59ef4e7afdd7..30688f6a0161 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -409,10 +409,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
+ * insufficient resources. Retry the operation a couple of
+ * times before giving up.
+ */
+- while (retries < 3) {
+- ret = hv_post_message(conn_id, 1, buffer, buflen);
+- if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
++ while (retries < 10) {
++ ret = hv_post_message(conn_id, 1, buffer, buflen);
++
++ switch (ret) {
++ case HV_STATUS_INSUFFICIENT_BUFFERS:
++ ret = -ENOMEM;
++ case -ENOMEM:
++ break;
++ case HV_STATUS_SUCCESS:
+ return ret;
++ default:
++ pr_err("hv_post_msg() failed; error code:%d\n", ret);
++ return -EINVAL;
++ }
++
+ retries++;
+ msleep(100);
+ }
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 0f12382aa35d..7552207a479b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -663,9 +663,13 @@ struct gc_stat {
+ * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
+ * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
+ * flushing dirty data).
++ *
++ * CACHE_SET_RUNNING means all cache devices have been registered and journal
++ * replay is complete.
+ */
+ #define CACHE_SET_UNREGISTERING 0
+ #define CACHE_SET_STOPPING 1
++#define CACHE_SET_RUNNING 2
+
+ struct cache_set {
+ struct closure cl;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 547c4c57b052..f5004c5c4b96 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1235,6 +1235,9 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
+ if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ return -EINTR;
+
++ if (!test_bit(CACHE_SET_RUNNING, &c->flags))
++ return -EPERM;
++
+ u = uuid_find_empty(c);
+ if (!u) {
+ pr_err("Can't create volume, no room for UUID");
+@@ -1300,8 +1303,11 @@ static void cache_set_free(struct closure *cl)
+ bch_journal_free(c);
+
+ for_each_cache(ca, c, i)
+- if (ca)
++ if (ca) {
++ ca->set = NULL;
++ c->cache[ca->sb.nr_this_dev] = NULL;
+ kobject_put(&ca->kobj);
++ }
+
+ free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+ free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
+@@ -1637,6 +1643,7 @@ static void run_cache_set(struct cache_set *c)
+
+ flash_devs_run(c);
+
++ set_bit(CACHE_SET_RUNNING, &c->flags);
+ return;
+ err_unlock_gc:
+ closure_set_stopped(&c->gc.cl);
+@@ -1722,8 +1729,10 @@ void bch_cache_release(struct kobject *kobj)
+ {
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+
+- if (ca->set)
++ if (ca->set) {
++ BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
+ ca->set->cache[ca->sb.nr_this_dev] = NULL;
++ }
+
+ bch_cache_allocator_exit(ca);
+
+@@ -1794,7 +1803,7 @@ err:
+ }
+
+ static void register_cache(struct cache_sb *sb, struct page *sb_page,
+- struct block_device *bdev, struct cache *ca)
++ struct block_device *bdev, struct cache *ca)
+ {
+ char name[BDEVNAME_SIZE];
+ const char *err = "cannot allocate memory";
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index ea345c6896f4..4dd1e2c6edca 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -417,8 +417,8 @@ do { \
+ average_frequency, frequency_units); \
+ __print_time_stat(stats, name, \
+ average_duration, duration_units); \
+- __print_time_stat(stats, name, \
+- max_duration, duration_units); \
++ sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
++ div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
+ \
+ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
+ ? div_s64(local_clock() - (stats)->last, \
+diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
+index 5653e505f91f..424f51d1e2ce 100644
+--- a/drivers/message/fusion/mptspi.c
++++ b/drivers/message/fusion/mptspi.c
+@@ -1422,6 +1422,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto out_mptspi_probe;
+ }
+
++ /* VMWare emulation doesn't properly implement WRITE_SAME
++ */
++ if (pdev->subsystem_vendor == 0x15AD)
++ sh->no_write_same = 1;
++
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index cd2033cd7120..72b682379611 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
+
+ dev_dbg(dev, "Device probe\n");
+
+- strncpy(id.name, dev_name(dev), sizeof(id.name));
++ strlcpy(id.name, dev_name(dev), sizeof(id.name));
+
+ return driver->probe(device, &id);
+ }
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 26108a1a29fa..968c1287a598 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -272,6 +272,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+@@ -312,6 +314,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index e3eb95292a7f..bc82d55d77c0 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -2041,7 +2041,7 @@ struct mac_iveiv_entry {
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
+ */
+-#define BBP1_TX_POWER_CTRL FIELD8(0x07)
++#define BBP1_TX_POWER_CTRL FIELD8(0x03)
+ #define BBP1_TX_ANTENNA FIELD8(0x18)
+
+ /*
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index fdd81f24a9cf..1324c3b93ee5 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -747,7 +747,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ rangesz = pna + na + ns;
+ nranges = rlen / sizeof(__be32) / rangesz;
+
+- for (i = 0; i < nranges; i++) {
++ for (i = 0; i < nranges; i++, range += rangesz) {
+ u32 flags = of_read_number(range, 1);
+ u32 slot = of_read_number(range + 1, 1);
+ u64 cpuaddr = of_read_number(range + na, pna);
+@@ -757,14 +757,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ rtype = IORESOURCE_IO;
+ else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ rtype = IORESOURCE_MEM;
++ else
++ continue;
+
+ if (slot == PCI_SLOT(devfn) && type == rtype) {
+ *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+ *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ return 0;
+ }
+-
+- range += rangesz;
+ }
+
+ return -ENOENT;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 7128cfdd64aa..7919b7f10daf 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -175,7 +175,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ {
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
++ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 3af18b94d0d3..a7b7eeaf35e8 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -28,6 +28,7 @@
+ #include <linux/ioport.h>
+ #include <linux/sched.h>
+ #include <linux/ktime.h>
++#include <linux/mm.h>
+ #include <asm/dma.h> /* isa_dma_bridge_buggy */
+ #include "pci.h"
+
+@@ -291,6 +292,25 @@ static void quirk_citrine(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+
++/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
++static void quirk_extend_bar_to_page(struct pci_dev *dev)
++{
++ int i;
++
++ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
++ struct resource *r = &dev->resource[i];
++
++ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
++ r->end = PAGE_SIZE - 1;
++ r->start = 0;
++ r->flags |= IORESOURCE_UNSET;
++ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
++ i, r);
++ }
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
++
+ /*
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
+diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
+index fa9a2171cc13..b264d8fe1908 100644
+--- a/drivers/platform/x86/dell-wmi.c
++++ b/drivers/platform/x86/dell-wmi.c
+@@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context)
+ const struct key_entry *key;
+ int reported_key;
+ u16 *buffer_entry = (u16 *)obj->buffer.pointer;
++ int buffer_size = obj->buffer.length/2;
+
+- if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
++ if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
+ pr_info("Received unknown WMI event (0x%x)\n",
+ buffer_entry[1]);
+ kfree(obj);
+ return;
+ }
+
+- if (dell_new_hk_type || buffer_entry[1] == 0x0)
++ if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
+ reported_key = (int)buffer_entry[2];
+- else
++ else if (buffer_size >= 2)
+ reported_key = (int)buffer_entry[1] & 0xffff;
++ else {
++ pr_info("Received unknown WMI event\n");
++ kfree(obj);
++ return;
++ }
+
+ key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
+ reported_key);
+diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
+index 245a9595a93a..ef0a78b0d730 100644
+--- a/drivers/scsi/be2iscsi/be_mgmt.c
++++ b/drivers/scsi/be2iscsi/be_mgmt.c
+@@ -812,17 +812,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
+
+ if (ip_action == IP_ACTION_ADD) {
+ memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
+- ip_param->len);
++ sizeof(req->ip_params.ip_record.ip_addr.addr));
+
+ if (subnet_param)
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+- subnet_param->value, subnet_param->len);
++ subnet_param->value,
++ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ } else {
+ memcpy(req->ip_params.ip_record.ip_addr.addr,
+- if_info->ip_addr.addr, ip_param->len);
++ if_info->ip_addr.addr,
++ sizeof(req->ip_params.ip_record.ip_addr.addr));
+
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+- if_info->ip_addr.subnet_mask, ip_param->len);
++ if_info->ip_addr.subnet_mask,
++ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+@@ -850,7 +853,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
+ req->action = gtway_action;
+ req->ip_addr.ip_type = BE2_IPV4;
+
+- memcpy(req->ip_addr.addr, gt_addr, param_len);
++ memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index e4fa6fb7e72a..30788321ac2b 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1361,12 +1361,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ uint32_t req_cnt)
+ {
+- struct qla_hw_data *ha = vha->hw;
+- device_reg_t __iomem *reg = ha->iobase;
+ uint32_t cnt;
+
+ if (vha->req->cnt < (req_cnt + 2)) {
+- cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
++ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+ "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index b9f0192758d6..0791c92e8c50 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -89,7 +89,13 @@ err_exit:
+
+ static void mid_spi_dma_exit(struct dw_spi *dws)
+ {
++ if (!dws->dma_inited)
++ return;
++
++ dmaengine_terminate_all(dws->txchan);
+ dma_release_channel(dws->txchan);
++
++ dmaengine_terminate_all(dws->rxchan);
+ dma_release_channel(dws->rxchan);
+ }
+
+@@ -136,7 +142,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ txconf.dst_addr = dws->dma_addr;
+ txconf.dst_maxburst = LNW_DMA_MSIZE_16;
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ txconf.dst_addr_width = dws->dma_width;
+ txconf.device_fc = false;
+
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+@@ -159,7 +165,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ rxconf.src_addr = dws->dma_addr;
+ rxconf.src_maxburst = LNW_DMA_MSIZE_16;
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ rxconf.src_addr_width = dws->dma_width;
+ rxconf.device_fc = false;
+
+ rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 816d1a23f9d0..e2b09fdf79b3 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -240,8 +240,16 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
+ {
+ unsigned int n13 = port->uartclk / (13 * baud);
+ unsigned int n16 = port->uartclk / (16 * baud);
+- int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
+- int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
++ int baudAbsDiff13;
++ int baudAbsDiff16;
++
++ if (n13 == 0)
++ n13 = 1;
++ if (n16 == 0)
++ n16 = 1;
++
++ baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
++ baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
+ if(baudAbsDiff13 < 0)
+ baudAbsDiff13 = -baudAbsDiff13;
+ if(baudAbsDiff16 < 0)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d990898ed4dc..48d3eed8e250 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1171,7 +1171,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ /* Tell khubd to disconnect the device or
+ * check for a new connection
+ */
+- if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
++ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
++ (portstatus & USB_PORT_STAT_OVERCURRENT))
+ set_bit(port1, hub->change_bits);
+
+ } else if (portstatus & USB_PORT_STAT_ENABLE) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 347dce4aa76e..280ff96a3945 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -161,6 +161,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* USB3503 */
+ { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* ASUS Base Station(T100) */
++ { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
++ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ { } /* terminating entry must be last */
+ };
+
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index 48cddf3cd6b8..53c95e9f97d6 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -424,7 +424,7 @@ config USB_GOKU
+ gadget drivers to also be dynamically linked.
+
+ config USB_EG20T
+- tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
++ tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
+ depends on PCI
+ help
+ This is a USB device driver for EG20T PCH.
+@@ -445,6 +445,7 @@ config USB_EG20T
+ ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7831 is completely compatible for Intel EG20T PCH.
+
++ This driver can be used with Intel's Quark X1000 SOC platform
+ #
+ # LAST -- dummy/emulated controller
+ #
+diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
+index 24174e1d1564..ea00b277d5d4 100644
+--- a/drivers/usb/gadget/pch_udc.c
++++ b/drivers/usb/gadget/pch_udc.c
+@@ -343,6 +343,7 @@ struct pch_vbus_gpio_data {
+ * @setup_data: Received setup data
+ * @phys_addr: of device memory
+ * @base_addr: for mapped device memory
++ * @bar: Indicates which PCI BAR for USB regs
+ * @irq: IRQ line for the device
+ * @cfg_data: current cfg, intf, and alt in use
+ * @vbus_gpio: GPIO informaton for detecting VBUS
+@@ -370,14 +371,17 @@ struct pch_udc_dev {
+ struct usb_ctrlrequest setup_data;
+ unsigned long phys_addr;
+ void __iomem *base_addr;
++ unsigned bar;
+ unsigned irq;
+ struct pch_udc_cfg_data cfg_data;
+ struct pch_vbus_gpio_data vbus_gpio;
+ };
+ #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
+
++#define PCH_UDC_PCI_BAR_QUARK_X1000 0
+ #define PCH_UDC_PCI_BAR 1
+ #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
+ #define PCI_VENDOR_ID_ROHM 0x10DB
+ #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
+ #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
+@@ -3076,7 +3080,7 @@ static void pch_udc_remove(struct pci_dev *pdev)
+ iounmap(dev->base_addr);
+ if (dev->mem_region)
+ release_mem_region(dev->phys_addr,
+- pci_resource_len(pdev, PCH_UDC_PCI_BAR));
++ pci_resource_len(pdev, dev->bar));
+ if (dev->active)
+ pci_disable_device(pdev);
+ kfree(dev);
+@@ -3145,9 +3149,15 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ dev->active = 1;
+ pci_set_drvdata(pdev, dev);
+
++ /* Determine BAR based on PCI ID */
++ if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
++ dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
++ else
++ dev->bar = PCH_UDC_PCI_BAR;
++
+ /* PCI resource allocation */
+- resource = pci_resource_start(pdev, 1);
+- len = pci_resource_len(pdev, 1);
++ resource = pci_resource_start(pdev, dev->bar);
++ len = pci_resource_len(pdev, dev->bar);
+
+ if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
+ dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
+@@ -3213,6 +3223,12 @@ finished:
+
+ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
+ {
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
++ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
++ .class_mask = 0xffffffff,
++ },
++ {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class_mask = 0xffffffff,
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index a9984c700d2c..5d7966b8fe98 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -229,6 +229,9 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
+ phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
+ if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
+ pr_err("unable to find transceiver\n");
++ if (!IS_ERR(phy))
++ phy = ERR_PTR(-ENODEV);
++
+ goto err0;
+ }
+
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index d966b59f7d7b..b8029ec9280d 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -259,7 +259,7 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+
+- WARN_ON(!spin_is_locked(&devinfo->lock));
++ lockdep_assert_held(&devinfo->lock);
+ if (cmdinfo->state & (COMMAND_INFLIGHT |
+ DATA_IN_URB_INFLIGHT |
+ DATA_OUT_URB_INFLIGHT |
+@@ -558,7 +558,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ int err;
+
+- WARN_ON(!spin_is_locked(&devinfo->lock));
++ lockdep_assert_held(&devinfo->lock);
+ if (cmdinfo->state & SUBMIT_STATUS_URB) {
+ err = uas_submit_sense_urb(cmnd->device->host, gfp,
+ cmdinfo->stream);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 7f625306ea80..16a36b2ed902 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -492,18 +492,24 @@ UNUSUAL_DEV( 0x04e6, 0x000a, 0x0200, 0x0200,
+ "eUSB CompactFlash Adapter",
+ USB_SC_8020, USB_PR_CB, NULL, 0),
+
+-UNUSUAL_DEV( 0x04e6, 0x000B, 0x0100, 0x0100,
++UNUSUAL_DEV( 0x04e6, 0x000b, 0x0100, 0x0100,
+ "Shuttle",
+ "eUSCSI Bridge",
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
+-UNUSUAL_DEV( 0x04e6, 0x000C, 0x0100, 0x0100,
++UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
+ "Shuttle",
+ "eUSCSI Bridge",
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
+
++UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
++ "SCM Microsystems",
++ "eUSB SCSI Adapter (Bus Powered)",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++ US_FL_SCM_MULT_TARG ),
++
+ UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
+ "Shuttle",
+ "CD-RW Device",
+@@ -1093,6 +1099,13 @@ UNUSUAL_DEV( 0x0840, 0x0085, 0x0001, 0x0001,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY),
+
++/* Supplied with some Castlewood ORB removable drives */
++UNUSUAL_DEV( 0x084b, 0xa001, 0x0000, 0x9999,
++ "Castlewood Systems",
++ "USB to SCSI cable",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++ US_FL_SCM_MULT_TARG ),
++
+ /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+ * Flag will support Bulk devices which use a standards-violating 32-byte
+ * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
+@@ -2050,6 +2063,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
++/* Supplied with some Castlewood ORB removable drives */
++UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
++ "Double-H Technology",
++ "USB to SCSI Intelligent Cable",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
++ US_FL_SCM_MULT_TARG ),
++
+ UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
+ "ST",
+ "2A",
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 7e6758d075ad..68f7a1ff104a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3548,7 +3548,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ * without delay
+ */
+ if (!btrfs_is_free_space_inode(inode)
+- && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
++ && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
++ && !root->fs_info->log_root_recovering) {
+ btrfs_update_root_times(trans, root);
+
+ ret = btrfs_delayed_update_inode(trans, root, inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index ad6a08c5801e..50a06debb1bd 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4571,6 +4571,12 @@ long btrfs_ioctl(struct file *file, unsigned int
+ if (ret)
+ return ret;
+ ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
++ /*
++ * The transaction thread may want to do more work,
++ * namely it pokes the cleaner ktread that will start
++ * processing uncleaned subvols.
++ */
++ wake_up_process(root->fs_info->transaction_kthread);
+ return ret;
+ }
+ case BTRFS_IOC_START_SYNC:
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 225c5b2e748f..7b83e0df892e 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -732,7 +732,8 @@ again:
+ err = ret;
+ goto out;
+ }
+- BUG_ON(!ret || !path1->slots[0]);
++ ASSERT(ret);
++ ASSERT(path1->slots[0]);
+
+ path1->slots[0]--;
+
+@@ -742,10 +743,10 @@ again:
+ * the backref was added previously when processing
+ * backref of type BTRFS_TREE_BLOCK_REF_KEY
+ */
+- BUG_ON(!list_is_singular(&cur->upper));
++ ASSERT(list_is_singular(&cur->upper));
+ edge = list_entry(cur->upper.next, struct backref_edge,
+ list[LOWER]);
+- BUG_ON(!list_empty(&edge->list[UPPER]));
++ ASSERT(list_empty(&edge->list[UPPER]));
+ exist = edge->node[UPPER];
+ /*
+ * add the upper level block to pending list if we need
+@@ -827,7 +828,7 @@ again:
+ cur->cowonly = 1;
+ }
+ #else
+- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
++ ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
+ if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ #endif
+ if (key.objectid == key.offset) {
+@@ -836,7 +837,7 @@ again:
+ * backref of this type.
+ */
+ root = find_reloc_root(rc, cur->bytenr);
+- BUG_ON(!root);
++ ASSERT(root);
+ cur->root = root;
+ break;
+ }
+@@ -864,7 +865,7 @@ again:
+ } else {
+ upper = rb_entry(rb_node, struct backref_node,
+ rb_node);
+- BUG_ON(!upper->checked);
++ ASSERT(upper->checked);
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ }
+ list_add_tail(&edge->list[LOWER], &cur->upper);
+@@ -888,7 +889,7 @@ again:
+
+ if (btrfs_root_level(&root->root_item) == cur->level) {
+ /* tree root */
+- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
++ ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ cur->bytenr);
+ if (should_ignore_root(root))
+ list_add(&cur->list, &useless);
+@@ -923,7 +924,7 @@ again:
+ need_check = true;
+ for (; level < BTRFS_MAX_LEVEL; level++) {
+ if (!path2->nodes[level]) {
+- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
++ ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ lower->bytenr);
+ if (should_ignore_root(root))
+ list_add(&lower->list, &useless);
+@@ -972,12 +973,15 @@ again:
+ need_check = false;
+ list_add_tail(&edge->list[UPPER],
+ &list);
+- } else
++ } else {
++ if (upper->checked)
++ need_check = true;
+ INIT_LIST_HEAD(&edge->list[UPPER]);
++ }
+ } else {
+ upper = rb_entry(rb_node, struct backref_node,
+ rb_node);
+- BUG_ON(!upper->checked);
++ ASSERT(upper->checked);
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ if (!upper->owner)
+ upper->owner = btrfs_header_owner(eb);
+@@ -1021,7 +1025,7 @@ next:
+ * everything goes well, connect backref nodes and insert backref nodes
+ * into the cache.
+ */
+- BUG_ON(!node->checked);
++ ASSERT(node->checked);
+ cowonly = node->cowonly;
+ if (!cowonly) {
+ rb_node = tree_insert(&cache->rb_root, node->bytenr,
+@@ -1057,8 +1061,21 @@ next:
+ continue;
+ }
+
+- BUG_ON(!upper->checked);
+- BUG_ON(cowonly != upper->cowonly);
++ if (!upper->checked) {
++ /*
++ * Still want to blow up for developers since this is a
++ * logic bug.
++ */
++ ASSERT(0);
++ err = -EINVAL;
++ goto out;
++ }
++ if (cowonly != upper->cowonly) {
++ ASSERT(0);
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (!cowonly) {
+ rb_node = tree_insert(&cache->rb_root, upper->bytenr,
+ &upper->rb_node);
+@@ -1081,7 +1098,7 @@ next:
+ while (!list_empty(&useless)) {
+ upper = list_entry(useless.next, struct backref_node, list);
+ list_del_init(&upper->list);
+- BUG_ON(!list_empty(&upper->upper));
++ ASSERT(list_empty(&upper->upper));
+ if (upper == node)
+ node = NULL;
+ if (upper->lowest) {
+@@ -1114,29 +1131,45 @@ out:
+ if (err) {
+ while (!list_empty(&useless)) {
+ lower = list_entry(useless.next,
+- struct backref_node, upper);
+- list_del_init(&lower->upper);
++ struct backref_node, list);
++ list_del_init(&lower->list);
+ }
+- upper = node;
+- INIT_LIST_HEAD(&list);
+- while (upper) {
+- if (RB_EMPTY_NODE(&upper->rb_node)) {
+- list_splice_tail(&upper->upper, &list);
+- free_backref_node(cache, upper);
+- }
+-
+- if (list_empty(&list))
+- break;
+-
+- edge = list_entry(list.next, struct backref_edge,
+- list[LOWER]);
++ while (!list_empty(&list)) {
++ edge = list_first_entry(&list, struct backref_edge,
++ list[UPPER]);
++ list_del(&edge->list[UPPER]);
+ list_del(&edge->list[LOWER]);
++ lower = edge->node[LOWER];
+ upper = edge->node[UPPER];
+ free_backref_edge(cache, edge);
++
++ /*
++ * Lower is no longer linked to any upper backref nodes
++ * and isn't in the cache, we can free it ourselves.
++ */
++ if (list_empty(&lower->upper) &&
++ RB_EMPTY_NODE(&lower->rb_node))
++ list_add(&lower->list, &useless);
++
++ if (!RB_EMPTY_NODE(&upper->rb_node))
++ continue;
++
++ /* Add this guy's upper edges to the list to proces */
++ list_for_each_entry(edge, &upper->upper, list[LOWER])
++ list_add_tail(&edge->list[UPPER], &list);
++ if (list_empty(&upper->upper))
++ list_add(&upper->list, &useless);
++ }
++
++ while (!list_empty(&useless)) {
++ lower = list_entry(useless.next,
++ struct backref_node, list);
++ list_del_init(&lower->list);
++ free_backref_node(cache, lower);
+ }
+ return ERR_PTR(err);
+ }
+- BUG_ON(node && node->detached);
++ ASSERT(!node || !node->detached);
+ return node;
+ }
+
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 977314e2d078..069c2fd37ce7 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -594,7 +594,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ if (transid <= root->fs_info->last_trans_committed)
+ goto out;
+
+- ret = -EINVAL;
+ /* find specified transaction */
+ spin_lock(&root->fs_info->trans_lock);
+ list_for_each_entry(t, &root->fs_info->trans_list, list) {
+@@ -610,9 +609,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ }
+ }
+ spin_unlock(&root->fs_info->trans_lock);
+- /* The specified transaction doesn't exist */
+- if (!cur_trans)
++
++ /*
++ * The specified transaction doesn't exist, or we
++ * raced with btrfs_commit_transaction
++ */
++ if (!cur_trans) {
++ if (transid > root->fs_info->last_trans_committed)
++ ret = -EINVAL;
+ goto out;
++ }
+ } else {
+ /* find newest transaction that is committing | committed */
+ spin_lock(&root->fs_info->trans_lock);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 87b70fe7eccc..e15f90c0e96a 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -274,16 +274,8 @@ static void __d_free(struct rcu_head *head)
+ kmem_cache_free(dentry_cache, dentry);
+ }
+
+-/*
+- * no locks, please.
+- */
+-static void d_free(struct dentry *dentry)
++static void dentry_free(struct dentry *dentry)
+ {
+- BUG_ON((int)dentry->d_lockref.count > 0);
+- this_cpu_dec(nr_dentry);
+- if (dentry->d_op && dentry->d_op->d_release)
+- dentry->d_op->d_release(dentry);
+-
+ /* if dentry was never visible to RCU, immediate free is OK */
+ if (!(dentry->d_flags & DCACHE_RCUACCESS))
+ __d_free(&dentry->d_u.d_rcu);
+@@ -430,77 +422,6 @@ static void dentry_lru_add(struct dentry *dentry)
+ d_lru_add(dentry);
+ }
+
+-/*
+- * Remove a dentry with references from the LRU.
+- *
+- * If we are on the shrink list, then we can get to try_prune_one_dentry() and
+- * lose our last reference through the parent walk. In this case, we need to
+- * remove ourselves from the shrink list, not the LRU.
+- */
+-static void dentry_lru_del(struct dentry *dentry)
+-{
+- if (dentry->d_flags & DCACHE_LRU_LIST) {
+- if (dentry->d_flags & DCACHE_SHRINK_LIST)
+- return d_shrink_del(dentry);
+- d_lru_del(dentry);
+- }
+-}
+-
+-/**
+- * d_kill - kill dentry and return parent
+- * @dentry: dentry to kill
+- * @parent: parent dentry
+- *
+- * The dentry must already be unhashed and removed from the LRU.
+- *
+- * If this is the root of the dentry tree, return NULL.
+- *
+- * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
+- * d_kill.
+- */
+-static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
+- __releases(dentry->d_lock)
+- __releases(parent->d_lock)
+- __releases(dentry->d_inode->i_lock)
+-{
+- list_del(&dentry->d_u.d_child);
+- /*
+- * Inform try_to_ascend() that we are no longer attached to the
+- * dentry tree
+- */
+- dentry->d_flags |= DCACHE_DENTRY_KILLED;
+- if (parent)
+- spin_unlock(&parent->d_lock);
+- dentry_iput(dentry);
+- /*
+- * dentry_iput drops the locks, at which point nobody (except
+- * transient RCU lookups) can reach this dentry.
+- */
+- d_free(dentry);
+- return parent;
+-}
+-
+-/*
+- * Unhash a dentry without inserting an RCU walk barrier or checking that
+- * dentry->d_lock is locked. The caller must take care of that, if
+- * appropriate.
+- */
+-static void __d_shrink(struct dentry *dentry)
+-{
+- if (!d_unhashed(dentry)) {
+- struct hlist_bl_head *b;
+- if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+- b = &dentry->d_sb->s_anon;
+- else
+- b = d_hash(dentry->d_parent, dentry->d_name.hash);
+-
+- hlist_bl_lock(b);
+- __hlist_bl_del(&dentry->d_hash);
+- dentry->d_hash.pprev = NULL;
+- hlist_bl_unlock(b);
+- }
+-}
+-
+ /**
+ * d_drop - drop a dentry
+ * @dentry: dentry to drop
+@@ -519,7 +440,16 @@ static void __d_shrink(struct dentry *dentry)
+ void __d_drop(struct dentry *dentry)
+ {
+ if (!d_unhashed(dentry)) {
+- __d_shrink(dentry);
++ struct hlist_bl_head *b;
++ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++ b = &dentry->d_sb->s_anon;
++ else
++ b = d_hash(dentry->d_parent, dentry->d_name.hash);
++
++ hlist_bl_lock(b);
++ __hlist_bl_del(&dentry->d_hash);
++ dentry->d_hash.pprev = NULL;
++ hlist_bl_unlock(b);
+ dentry_rcuwalk_barrier(dentry);
+ }
+ }
+@@ -533,37 +463,12 @@ void d_drop(struct dentry *dentry)
+ }
+ EXPORT_SYMBOL(d_drop);
+
+-/*
+- * Finish off a dentry we've decided to kill.
+- * dentry->d_lock must be held, returns with it unlocked.
+- * If ref is non-zero, then decrement the refcount too.
+- * Returns dentry requiring refcount drop, or NULL if we're done.
+- */
+-static struct dentry *
+-dentry_kill(struct dentry *dentry, int unlock_on_failure)
+- __releases(dentry->d_lock)
++static void __dentry_kill(struct dentry *dentry)
+ {
+- struct inode *inode;
+- struct dentry *parent;
+-
+- inode = dentry->d_inode;
+- if (inode && !spin_trylock(&inode->i_lock)) {
+-relock:
+- if (unlock_on_failure) {
+- spin_unlock(&dentry->d_lock);
+- cpu_relax();
+- }
+- return dentry; /* try again with same dentry */
+- }
+- if (IS_ROOT(dentry))
+- parent = NULL;
+- else
++ struct dentry *parent = NULL;
++ bool can_free = true;
++ if (!IS_ROOT(dentry))
+ parent = dentry->d_parent;
+- if (parent && !spin_trylock(&parent->d_lock)) {
+- if (inode)
+- spin_unlock(&inode->i_lock);
+- goto relock;
+- }
+
+ /*
+ * The dentry is now unrecoverably dead to the world.
+@@ -577,10 +482,105 @@ relock:
+ if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
+ dentry->d_op->d_prune(dentry);
+
+- dentry_lru_del(dentry);
++ if (dentry->d_flags & DCACHE_LRU_LIST) {
++ if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
++ d_lru_del(dentry);
++ }
+ /* if it was on the hash then remove it */
+ __d_drop(dentry);
+- return d_kill(dentry, parent);
++ list_del(&dentry->d_u.d_child);
++ /*
++ * Inform d_walk() that we are no longer attached to the
++ * dentry tree
++ */
++ dentry->d_flags |= DCACHE_DENTRY_KILLED;
++ if (parent)
++ spin_unlock(&parent->d_lock);
++ dentry_iput(dentry);
++ /*
++ * dentry_iput drops the locks, at which point nobody (except
++ * transient RCU lookups) can reach this dentry.
++ */
++ BUG_ON((int)dentry->d_lockref.count > 0);
++ this_cpu_dec(nr_dentry);
++ if (dentry->d_op && dentry->d_op->d_release)
++ dentry->d_op->d_release(dentry);
++
++ spin_lock(&dentry->d_lock);
++ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
++ dentry->d_flags |= DCACHE_MAY_FREE;
++ can_free = false;
++ }
++ spin_unlock(&dentry->d_lock);
++ if (likely(can_free))
++ dentry_free(dentry);
++}
++
++/*
++ * Finish off a dentry we've decided to kill.
++ * dentry->d_lock must be held, returns with it unlocked.
++ * If ref is non-zero, then decrement the refcount too.
++ * Returns dentry requiring refcount drop, or NULL if we're done.
++ */
++static struct dentry *dentry_kill(struct dentry *dentry)
++ __releases(dentry->d_lock)
++{
++ struct inode *inode = dentry->d_inode;
++ struct dentry *parent = NULL;
++
++ if (inode && unlikely(!spin_trylock(&inode->i_lock)))
++ goto failed;
++
++ if (!IS_ROOT(dentry)) {
++ parent = dentry->d_parent;
++ if (unlikely(!spin_trylock(&parent->d_lock))) {
++ if (inode)
++ spin_unlock(&inode->i_lock);
++ goto failed;
++ }
++ }
++
++ __dentry_kill(dentry);
++ return parent;
++
++failed:
++ spin_unlock(&dentry->d_lock);
++ cpu_relax();
++ return dentry; /* try again with same dentry */
++}
++
++static inline struct dentry *lock_parent(struct dentry *dentry)
++{
++ struct dentry *parent = dentry->d_parent;
++ if (IS_ROOT(dentry))
++ return NULL;
++ if (unlikely((int)dentry->d_lockref.count < 0))
++ return NULL;
++ if (likely(spin_trylock(&parent->d_lock)))
++ return parent;
++ rcu_read_lock();
++ spin_unlock(&dentry->d_lock);
++again:
++ parent = ACCESS_ONCE(dentry->d_parent);
++ spin_lock(&parent->d_lock);
++ /*
++ * We can't blindly lock dentry until we are sure
++ * that we won't violate the locking order.
++ * Any changes of dentry->d_parent must have
++ * been done with parent->d_lock held, so
++ * spin_lock() above is enough of a barrier
++ * for checking if it's still our child.
++ */
++ if (unlikely(parent != dentry->d_parent)) {
++ spin_unlock(&parent->d_lock);
++ goto again;
++ }
++ rcu_read_unlock();
++ if (parent != dentry)
++ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ else
++ parent = NULL;
++ return parent;
+ }
+
+ /*
+@@ -636,7 +636,7 @@ repeat:
+ return;
+
+ kill_it:
+- dentry = dentry_kill(dentry, 1);
++ dentry = dentry_kill(dentry);
+ if (dentry)
+ goto repeat;
+ }
+@@ -849,64 +849,15 @@ restart:
+ }
+ EXPORT_SYMBOL(d_prune_aliases);
+
+-/*
+- * Try to throw away a dentry - free the inode, dput the parent.
+- * Requires dentry->d_lock is held, and dentry->d_count == 0.
+- * Releases dentry->d_lock.
+- *
+- * This may fail if locks cannot be acquired no problem, just try again.
+- */
+-static struct dentry * try_prune_one_dentry(struct dentry *dentry)
+- __releases(dentry->d_lock)
+-{
+- struct dentry *parent;
+-
+- parent = dentry_kill(dentry, 0);
+- /*
+- * If dentry_kill returns NULL, we have nothing more to do.
+- * if it returns the same dentry, trylocks failed. In either
+- * case, just loop again.
+- *
+- * Otherwise, we need to prune ancestors too. This is necessary
+- * to prevent quadratic behavior of shrink_dcache_parent(), but
+- * is also expected to be beneficial in reducing dentry cache
+- * fragmentation.
+- */
+- if (!parent)
+- return NULL;
+- if (parent == dentry)
+- return dentry;
+-
+- /* Prune ancestors. */
+- dentry = parent;
+- while (dentry) {
+- if (lockref_put_or_lock(&dentry->d_lockref))
+- return NULL;
+- dentry = dentry_kill(dentry, 1);
+- }
+- return NULL;
+-}
+-
+ static void shrink_dentry_list(struct list_head *list)
+ {
+- struct dentry *dentry;
++ struct dentry *dentry, *parent;
+
+- rcu_read_lock();
+- for (;;) {
+- dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
+- if (&dentry->d_lru == list)
+- break; /* empty */
+-
+- /*
+- * Get the dentry lock, and re-verify that the dentry is
+- * this on the shrinking list. If it is, we know that
+- * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
+- */
++ while (!list_empty(list)) {
++ struct inode *inode;
++ dentry = list_entry(list->prev, struct dentry, d_lru);
+ spin_lock(&dentry->d_lock);
+- if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
+- spin_unlock(&dentry->d_lock);
+- continue;
+- }
++ parent = lock_parent(dentry);
+
+ /*
+ * The dispose list is isolated and dentries are not accounted
+@@ -919,30 +870,63 @@ static void shrink_dentry_list(struct list_head *list)
+ * We found an inuse dentry which was not removed from
+ * the LRU because of laziness during lookup. Do not free it.
+ */
+- if (dentry->d_lockref.count) {
++ if ((int)dentry->d_lockref.count > 0) {
+ spin_unlock(&dentry->d_lock);
++ if (parent)
++ spin_unlock(&parent->d_lock);
+ continue;
+ }
+- rcu_read_unlock();
+
+- /*
+- * If 'try_to_prune()' returns a dentry, it will
+- * be the same one we passed in, and d_lock will
+- * have been held the whole time, so it will not
+- * have been added to any other lists. We failed
+- * to get the inode lock.
+- *
+- * We just add it back to the shrink list.
+- */
+- dentry = try_prune_one_dentry(dentry);
+
+- rcu_read_lock();
+- if (dentry) {
++ if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
++ bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
++ spin_unlock(&dentry->d_lock);
++ if (parent)
++ spin_unlock(&parent->d_lock);
++ if (can_free)
++ dentry_free(dentry);
++ continue;
++ }
++
++ inode = dentry->d_inode;
++ if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
+ d_shrink_add(dentry, list);
+ spin_unlock(&dentry->d_lock);
++ if (parent)
++ spin_unlock(&parent->d_lock);
++ continue;
++ }
++
++ __dentry_kill(dentry);
++
++ /*
++ * We need to prune ancestors too. This is necessary to prevent
++ * quadratic behavior of shrink_dcache_parent(), but is also
++ * expected to be beneficial in reducing dentry cache
++ * fragmentation.
++ */
++ dentry = parent;
++ while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
++ parent = lock_parent(dentry);
++ if (dentry->d_lockref.count != 1) {
++ dentry->d_lockref.count--;
++ spin_unlock(&dentry->d_lock);
++ if (parent)
++ spin_unlock(&parent->d_lock);
++ break;
++ }
++ inode = dentry->d_inode; /* can't be NULL */
++ if (unlikely(!spin_trylock(&inode->i_lock))) {
++ spin_unlock(&dentry->d_lock);
++ if (parent)
++ spin_unlock(&parent->d_lock);
++ cpu_relax();
++ continue;
++ }
++ __dentry_kill(dentry);
++ dentry = parent;
+ }
+ }
+- rcu_read_unlock();
+ }
+
+ static enum lru_status
+@@ -1072,144 +1056,6 @@ void shrink_dcache_sb(struct super_block *sb)
+ }
+ EXPORT_SYMBOL(shrink_dcache_sb);
+
+-/*
+- * destroy a single subtree of dentries for unmount
+- * - see the comments on shrink_dcache_for_umount() for a description of the
+- * locking
+- */
+-static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
+-{
+- struct dentry *parent;
+-
+- BUG_ON(!IS_ROOT(dentry));
+-
+- for (;;) {
+- /* descend to the first leaf in the current subtree */
+- while (!list_empty(&dentry->d_subdirs))
+- dentry = list_entry(dentry->d_subdirs.next,
+- struct dentry, d_u.d_child);
+-
+- /* consume the dentries from this leaf up through its parents
+- * until we find one with children or run out altogether */
+- do {
+- struct inode *inode;
+-
+- /*
+- * inform the fs that this dentry is about to be
+- * unhashed and destroyed.
+- */
+- if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
+- !d_unhashed(dentry))
+- dentry->d_op->d_prune(dentry);
+-
+- dentry_lru_del(dentry);
+- __d_shrink(dentry);
+-
+- if (dentry->d_lockref.count != 0) {
+- printk(KERN_ERR
+- "BUG: Dentry %p{i=%lx,n=%s}"
+- " still in use (%d)"
+- " [unmount of %s %s]\n",
+- dentry,
+- dentry->d_inode ?
+- dentry->d_inode->i_ino : 0UL,
+- dentry->d_name.name,
+- dentry->d_lockref.count,
+- dentry->d_sb->s_type->name,
+- dentry->d_sb->s_id);
+- BUG();
+- }
+-
+- if (IS_ROOT(dentry)) {
+- parent = NULL;
+- list_del(&dentry->d_u.d_child);
+- } else {
+- parent = dentry->d_parent;
+- parent->d_lockref.count--;
+- list_del(&dentry->d_u.d_child);
+- }
+-
+- inode = dentry->d_inode;
+- if (inode) {
+- dentry->d_inode = NULL;
+- hlist_del_init(&dentry->d_alias);
+- if (dentry->d_op && dentry->d_op->d_iput)
+- dentry->d_op->d_iput(dentry, inode);
+- else
+- iput(inode);
+- }
+-
+- d_free(dentry);
+-
+- /* finished when we fall off the top of the tree,
+- * otherwise we ascend to the parent and move to the
+- * next sibling if there is one */
+- if (!parent)
+- return;
+- dentry = parent;
+- } while (list_empty(&dentry->d_subdirs));
+-
+- dentry = list_entry(dentry->d_subdirs.next,
+- struct dentry, d_u.d_child);
+- }
+-}
+-
+-/*
+- * destroy the dentries attached to a superblock on unmounting
+- * - we don't need to use dentry->d_lock because:
+- * - the superblock is detached from all mountings and open files, so the
+- * dentry trees will not be rearranged by the VFS
+- * - s_umount is write-locked, so the memory pressure shrinker will ignore
+- * any dentries belonging to this superblock that it comes across
+- * - the filesystem itself is no longer permitted to rearrange the dentries
+- * in this superblock
+- */
+-void shrink_dcache_for_umount(struct super_block *sb)
+-{
+- struct dentry *dentry;
+-
+- if (down_read_trylock(&sb->s_umount))
+- BUG();
+-
+- dentry = sb->s_root;
+- sb->s_root = NULL;
+- dentry->d_lockref.count--;
+- shrink_dcache_for_umount_subtree(dentry);
+-
+- while (!hlist_bl_empty(&sb->s_anon)) {
+- dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
+- shrink_dcache_for_umount_subtree(dentry);
+- }
+-}
+-
+-/*
+- * This tries to ascend one level of parenthood, but
+- * we can race with renaming, so we need to re-check
+- * the parenthood after dropping the lock and check
+- * that the sequence number still matches.
+- */
+-static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
+-{
+- struct dentry *new = old->d_parent;
+-
+- rcu_read_lock();
+- spin_unlock(&old->d_lock);
+- spin_lock(&new->d_lock);
+-
+- /*
+- * might go back up the wrong parent if we have had a rename
+- * or deletion
+- */
+- if (new != old->d_parent ||
+- (old->d_flags & DCACHE_DENTRY_KILLED) ||
+- need_seqretry(&rename_lock, seq)) {
+- spin_unlock(&new->d_lock);
+- new = NULL;
+- }
+- rcu_read_unlock();
+- return new;
+-}
+-
+ /**
+ * enum d_walk_ret - action to talke during tree walk
+ * @D_WALK_CONTINUE: contrinue walk
+@@ -1298,9 +1144,24 @@ resume:
+ */
+ if (this_parent != parent) {
+ struct dentry *child = this_parent;
+- this_parent = try_to_ascend(this_parent, seq);
+- if (!this_parent)
++ this_parent = child->d_parent;
++
++ rcu_read_lock();
++ spin_unlock(&child->d_lock);
++ spin_lock(&this_parent->d_lock);
++
++ /*
++ * might go back up the wrong parent if we have had a rename
++ * or deletion
++ */
++ if (this_parent != child->d_parent ||
++ (child->d_flags & DCACHE_DENTRY_KILLED) ||
++ need_seqretry(&rename_lock, seq)) {
++ spin_unlock(&this_parent->d_lock);
++ rcu_read_unlock();
+ goto rename_retry;
++ }
++ rcu_read_unlock();
+ next = child->d_u.d_child.next;
+ goto resume;
+ }
+@@ -1418,34 +1279,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+ if (data->start == dentry)
+ goto out;
+
+- /*
+- * move only zero ref count dentries to the dispose list.
+- *
+- * Those which are presently on the shrink list, being processed
+- * by shrink_dentry_list(), shouldn't be moved. Otherwise the
+- * loop in shrink_dcache_parent() might not make any progress
+- * and loop forever.
+- */
+- if (dentry->d_lockref.count) {
+- dentry_lru_del(dentry);
+- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
+- /*
+- * We can't use d_lru_shrink_move() because we
+- * need to get the global LRU lock and do the
+- * LRU accounting.
+- */
+- d_lru_del(dentry);
+- d_shrink_add(dentry, &data->dispose);
++ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
+ data->found++;
+- ret = D_WALK_NORETRY;
++ } else {
++ if (dentry->d_flags & DCACHE_LRU_LIST)
++ d_lru_del(dentry);
++ if (!dentry->d_lockref.count) {
++ d_shrink_add(dentry, &data->dispose);
++ data->found++;
++ }
+ }
+ /*
+ * We can return to the caller if we have found some (this
+ * ensures forward progress). We'll be coming back to find
+ * the rest.
+ */
+- if (data->found && need_resched())
+- ret = D_WALK_QUIT;
++ if (!list_empty(&data->dispose))
++ ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
+ out:
+ return ret;
+ }
+@@ -1475,6 +1325,56 @@ void shrink_dcache_parent(struct dentry *parent)
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+
++static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
++{
++ /* it has busy descendents; complain about those instead */
++ if (!list_empty(&dentry->d_subdirs))
++ return D_WALK_CONTINUE;
++
++ /* root with refcount 1 is fine */
++ if (dentry == _data && dentry->d_lockref.count == 1)
++ return D_WALK_CONTINUE;
++
++ printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
++ " still in use (%d) [unmount of %s %s]\n",
++ dentry,
++ dentry->d_inode ?
++ dentry->d_inode->i_ino : 0UL,
++ dentry,
++ dentry->d_lockref.count,
++ dentry->d_sb->s_type->name,
++ dentry->d_sb->s_id);
++ WARN_ON(1);
++ return D_WALK_CONTINUE;
++}
++
++static void do_one_tree(struct dentry *dentry)
++{
++ shrink_dcache_parent(dentry);
++ d_walk(dentry, dentry, umount_check, NULL);
++ d_drop(dentry);
++ dput(dentry);
++}
++
++/*
++ * destroy the dentries attached to a superblock on unmounting
++ */
++void shrink_dcache_for_umount(struct super_block *sb)
++{
++ struct dentry *dentry;
++
++ WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
++
++ dentry = sb->s_root;
++ sb->s_root = NULL;
++ do_one_tree(dentry);
++
++ while (!hlist_bl_empty(&sb->s_anon)) {
++ dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
++ do_one_tree(dentry);
++ }
++}
++
+ static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
+ {
+ struct select_data *data = _data;
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 67e9b6339691..69b488c509e6 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -1051,7 +1051,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ }
+
+ rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+- if (!rc)
++ if (!rc && dentry->d_inode)
+ fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
+ out:
+ return rc;
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index c260de6d7b6d..8a337640a46a 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -632,6 +632,8 @@ static int ext2_get_blocks(struct inode *inode,
+ int count = 0;
+ ext2_fsblk_t first_block = 0;
+
++ BUG_ON(maxblocks == 0);
++
+ depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
+
+ if (depth == 0)
+diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
+index 1c3312858fcf..e98171a11cfe 100644
+--- a/fs/ext2/xip.c
++++ b/fs/ext2/xip.c
+@@ -35,6 +35,7 @@ __ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
+ int rc;
+
+ memset(&tmp, 0, sizeof(struct buffer_head));
++ tmp.b_size = 1 << inode->i_blkbits;
+ rc = ext2_get_block(inode, pgoff, &tmp, create);
+ *result = tmp.b_blocknr;
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 4ea2b7378d8c..4b14bfc4cfce 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1277,6 +1277,8 @@ static int do_umount(struct mount *mnt, int flags)
+ * Special case for "unmounting" root ...
+ * we just try to remount it readonly.
+ */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
+ down_write(&sb->s_umount);
+ if (!(sb->s_flags & MS_RDONLY))
+ retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 609621532fc0..9f7f1a0d30dc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6891,7 +6891,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+ int ret = 0;
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+- return 0;
++ return -EAGAIN;
+ task = _nfs41_proc_sequence(clp, cred, false);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
+index 1720d32ffa54..e1ba58c3d1ad 100644
+--- a/fs/nfs/nfs4renewd.c
++++ b/fs/nfs/nfs4renewd.c
+@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
+ }
+ nfs_expire_all_delegations(clp);
+ } else {
++ int ret;
++
+ /* Queue an asynchronous RENEW. */
+- ops->sched_state_renewal(clp, cred, renew_flags);
++ ret = ops->sched_state_renewal(clp, cred, renew_flags);
+ put_rpccred(cred);
+- goto out_exp;
++ switch (ret) {
++ default:
++ goto out_exp;
++ case -EAGAIN:
++ case -ENOMEM:
++ break;
++ }
+ }
+ } else {
+ dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 26c07f9efdb3..03c531529982 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1690,7 +1690,8 @@ restart:
+ if (status < 0) {
+ set_bit(ops->owner_flag_bit, &sp->so_flags);
+ nfs4_put_state_owner(sp);
+- return nfs4_recovery_handle_error(clp, status);
++ status = nfs4_recovery_handle_error(clp, status);
++ return (status != 0) ? status : -EAGAIN;
+ }
+
+ nfs4_put_state_owner(sp);
+@@ -1699,7 +1700,7 @@ restart:
+ spin_unlock(&clp->cl_lock);
+ }
+ rcu_read_unlock();
+- return status;
++ return 0;
+ }
+
+ static int nfs4_check_lease(struct nfs_client *clp)
+@@ -1746,7 +1747,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+ break;
+ case -NFS4ERR_STALE_CLIENTID:
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+- nfs4_state_clear_reclaim_reboot(clp);
+ nfs4_state_start_reclaim_reboot(clp);
+ break;
+ case -NFS4ERR_CLID_INUSE:
+@@ -2173,14 +2173,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ section = "reclaim reboot";
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->reboot_recovery_ops);
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+- continue;
+- nfs4_state_end_reclaim_reboot(clp);
+- if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
++ if (status == -EAGAIN)
+ continue;
+ if (status < 0)
+ goto out_error;
++ nfs4_state_end_reclaim_reboot(clp);
+ }
+
+ /* Now recover expired state... */
+@@ -2188,9 +2185,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ section = "reclaim nograce";
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->nograce_recovery_ops);
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+- test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
++ if (status == -EAGAIN)
+ continue;
+ if (status < 0)
+ goto out_error;
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 6663511ab33a..cc80b0a55a23 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -69,7 +69,7 @@ static int create_fd(struct fsnotify_group *group,
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+- client_fd = get_unused_fd();
++ client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
+ if (client_fd < 0)
+ return client_fd;
+
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 0415a628b2ab..ab28ad576b16 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -431,10 +431,22 @@ xfs_start_page_writeback(
+ {
+ ASSERT(PageLocked(page));
+ ASSERT(!PageWriteback(page));
+- if (clear_dirty)
++
++ /*
++ * if the page was not fully cleaned, we need to ensure that the higher
++ * layers come back to it correctly. That means we need to keep the page
++ * dirty, and for WB_SYNC_ALL writeback we need to ensure the
++ * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
++ * write this page in this writeback sweep will be made.
++ */
++ if (clear_dirty) {
+ clear_page_dirty_for_io(page);
+- set_page_writeback(page);
++ set_page_writeback(page);
++ } else
++ set_page_writeback_keepwrite(page);
++
+ unlock_page(page);
++
+ /* If no buffers on the page are to be written, finish it here */
+ if (!buffers)
+ end_page_writeback(page);
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
+new file mode 100644
+index 000000000000..cdd1cc202d51
+--- /dev/null
++++ b/include/linux/compiler-gcc5.h
+@@ -0,0 +1,66 @@
++#ifndef __LINUX_COMPILER_H
++#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
++#endif
++
++#define __used __attribute__((__used__))
++#define __must_check __attribute__((warn_unused_result))
++#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
++
++/* Mark functions as cold. gcc will assume any path leading to a call
++ to them will be unlikely. This means a lot of manual unlikely()s
++ are unnecessary now for any paths leading to the usual suspects
++ like BUG(), printk(), panic() etc. [but let's keep them for now for
++ older compilers]
++
++ Early snapshots of gcc 4.3 don't support this and we can't detect this
++ in the preprocessor, but we can live with this because they're unreleased.
++ Maketime probing would be overkill here.
++
++ gcc also has a __attribute__((__hot__)) to move hot functions into
++ a special section, but I don't see any sense in this right now in
++ the kernel context */
++#define __cold __attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++
++/*
++ * Mark a position in code as unreachable. This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased. Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone __attribute__((__noclone__))
++
++/*
++ * Tell the optimizer that something else uses this function or variable.
++ */
++#define __visible __attribute__((externally_visible))
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#define __HAVE_BUILTIN_BSWAP16__
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 59066e0b4ff1..cbde0540d4dd 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -211,6 +211,8 @@ struct dentry_operations {
+ #define DCACHE_LRU_LIST 0x80000
+ #define DCACHE_DENTRY_KILLED 0x100000
+
++#define DCACHE_MAY_FREE 0x00800000
++
+ extern seqlock_t rename_lock;
+
+ static inline int dname_external(const struct dentry *dentry)
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 0fbbc7aa02cb..e47c7e2f4d04 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -464,8 +464,6 @@ void kvm_exit(void);
+
+ void kvm_get_kvm(struct kvm *kvm);
+ void kvm_put_kvm(struct kvm *kvm);
+-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+- u64 last_generation);
+
+ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+ {
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 97fbecdd7a40..057c1d8c77e5 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2551,6 +2551,7 @@
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
+ #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+ #define PCI_DEVICE_ID_INTEL_I960 0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index cb67b4e2dba2..a4d7d19fc338 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1691,11 +1691,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
+ #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+ #define used_math() tsk_used_math(current)
+
+-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
++/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
++ * __GFP_FS is also cleared as it implies __GFP_IO.
++ */
+ static inline gfp_t memalloc_noio_flags(gfp_t flags)
+ {
+ if (unlikely(current->flags & PF_MEMALLOC_NOIO))
+- flags &= ~__GFP_IO;
++ flags &= ~(__GFP_IO | __GFP_FS);
+ return flags;
+ }
+
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 32e0f5c04e72..c3ddcdc36598 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -44,4 +44,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
++/* device generates spurious wakeup, ignore remote wakeup capability */
++#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/regulatory.h b/include/net/regulatory.h
+index 23a019668705..3e827aad1ec6 100644
+--- a/include/net/regulatory.h
++++ b/include/net/regulatory.h
+@@ -78,7 +78,7 @@ struct regulatory_request {
+ int wiphy_idx;
+ enum nl80211_reg_initiator initiator;
+ enum nl80211_user_reg_hint_type user_reg_hint_type;
+- char alpha2[3];
++ char alpha2[2];
+ u8 dfs_region;
+ bool intersect;
+ bool processed;
+@@ -106,7 +106,7 @@ struct ieee80211_reg_rule {
+ struct ieee80211_regdomain {
+ struct rcu_head rcu_head;
+ u32 n_reg_rules;
+- char alpha2[2];
++ char alpha2[3];
+ u8 dfs_region;
+ struct ieee80211_reg_rule reg_rules[];
+ };
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index cf2413f6ce7f..63bd27c861fe 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -39,6 +39,7 @@
+ #include <linux/hw_breakpoint.h>
+ #include <linux/mm_types.h>
+ #include <linux/cgroup.h>
++#include <linux/compat.h>
+
+ #include "internal.h"
+
+@@ -3630,6 +3631,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ return 0;
+ }
+
++#ifdef CONFIG_COMPAT
++static long perf_compat_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ switch (_IOC_NR(cmd)) {
++ case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
++ case _IOC_NR(PERF_EVENT_IOC_ID):
++ /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
++ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
++ cmd &= ~IOCSIZE_MASK;
++ cmd |= sizeof(void *) << IOCSIZE_SHIFT;
++ }
++ break;
++ }
++ return perf_ioctl(file, cmd, arg);
++}
++#else
++# define perf_compat_ioctl NULL
++#endif
++
+ int perf_event_task_enable(void)
+ {
+ struct perf_event *event;
+@@ -4122,7 +4143,7 @@ static const struct file_operations perf_fops = {
+ .read = perf_read,
+ .poll = perf_poll,
+ .unlocked_ioctl = perf_ioctl,
+- .compat_ioctl = perf_ioctl,
++ .compat_ioctl = perf_compat_ioctl,
+ .mmap = perf_mmap,
+ .fasync = perf_fasync,
+ };
+diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
+index 8563081e8da3..a1c387f6afba 100644
+--- a/lib/lzo/lzo1x_decompress_safe.c
++++ b/lib/lzo/lzo1x_decompress_safe.c
+@@ -19,31 +19,21 @@
+ #include <linux/lzo.h>
+ #include "lzodefs.h"
+
+-#define HAVE_IP(t, x) \
+- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+- (((t + x) >= t) && ((t + x) >= x)))
++#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
++#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
++#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
++#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
++#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+
+-#define HAVE_OP(t, x) \
+- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+- (((t + x) >= t) && ((t + x) >= x)))
+-
+-#define NEED_IP(t, x) \
+- do { \
+- if (!HAVE_IP(t, x)) \
+- goto input_overrun; \
+- } while (0)
+-
+-#define NEED_OP(t, x) \
+- do { \
+- if (!HAVE_OP(t, x)) \
+- goto output_overrun; \
+- } while (0)
+-
+-#define TEST_LB(m_pos) \
+- do { \
+- if ((m_pos) < out) \
+- goto lookbehind_overrun; \
+- } while (0)
++/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
++ * count without overflowing an integer. The multiply will overflow when
++ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
++ * depending on the base count. Since the base count is taken from a u8
++ * and a few bits, it is safe to assume that it will always be lower than
++ * or equal to 2*255, thus we can always prevent any overflow by accepting
++ * two less 255 steps. See Documentation/lzo.txt for more information.
++ */
++#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
+
+ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ if (t < 16) {
+ if (likely(state == 0)) {
+ if (unlikely(t == 0)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 15 + *ip++;
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 15 + *ip++;
+ }
+ t += 3;
+ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
++ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ const unsigned char *ie = ip + t;
+ unsigned char *oe = op + t;
+ do {
+@@ -101,8 +98,8 @@ copy_literal_run:
+ } else
+ #endif
+ {
+- NEED_OP(t, 0);
+- NEED_IP(t, 3);
++ NEED_OP(t);
++ NEED_IP(t + 3);
+ do {
+ *op++ = *ip++;
+ } while (--t > 0);
+@@ -115,7 +112,7 @@ copy_literal_run:
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LB(m_pos);
+- NEED_OP(2, 0);
++ NEED_OP(2);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+@@ -136,13 +133,20 @@ copy_literal_run:
+ } else if (t >= 32) {
+ t = (t & 31) + (3 - 1);
+ if (unlikely(t == 2)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 31 + *ip++;
+- NEED_IP(2, 0);
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 31 + *ip++;
++ NEED_IP(2);
+ }
+ m_pos = op - 1;
+ next = get_unaligned_le16(ip);
+@@ -154,13 +158,20 @@ copy_literal_run:
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 7 + *ip++;
+- NEED_IP(2, 0);
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 7 + *ip++;
++ NEED_IP(2);
+ }
+ next = get_unaligned_le16(ip);
+ ip += 2;
+@@ -174,7 +185,7 @@ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (op - m_pos >= 8) {
+ unsigned char *oe = op + t;
+- if (likely(HAVE_OP(t, 15))) {
++ if (likely(HAVE_OP(t + 15))) {
+ do {
+ COPY8(op, m_pos);
+ op += 8;
+@@ -184,7 +195,7 @@ copy_literal_run:
+ m_pos += 8;
+ } while (op < oe);
+ op = oe;
+- if (HAVE_IP(6, 0)) {
++ if (HAVE_IP(6)) {
+ state = next;
+ COPY4(op, ip);
+ op += next;
+@@ -192,7 +203,7 @@ copy_literal_run:
+ continue;
+ }
+ } else {
+- NEED_OP(t, 0);
++ NEED_OP(t);
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+@@ -201,7 +212,7 @@ copy_literal_run:
+ #endif
+ {
+ unsigned char *oe = op + t;
+- NEED_OP(t, 0);
++ NEED_OP(t);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+@@ -214,15 +225,15 @@ match_next:
+ state = next;
+ t = next;
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
++ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ COPY4(op, ip);
+ op += t;
+ ip += t;
+ } else
+ #endif
+ {
+- NEED_IP(t, 3);
+- NEED_OP(t, 0);
++ NEED_IP(t + 3);
++ NEED_OP(t);
+ while (t > 0) {
+ *op++ = *ip++;
+ t--;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4e705ed74b81..ff648969e402 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -250,6 +250,9 @@ struct mem_cgroup {
+ /* vmpressure notifications */
+ struct vmpressure vmpressure;
+
++ /* css_online() has been completed */
++ int initialized;
++
+ /*
+ * the counter to account for mem+swap usage.
+ */
+@@ -1089,9 +1092,23 @@ skip_node:
+ * skipping css reference should be safe.
+ */
+ if (next_css) {
+- if ((next_css == &root->css) ||
+- ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
+- return mem_cgroup_from_css(next_css);
++ struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
++
++ if (next_css == &root->css)
++ return memcg;
++
++ if (css_tryget(next_css)) {
++ if (memcg->initialized) {
++ /*
++ * Make sure the memcg is initialized:
++ * mem_cgroup_css_online() orders the the
++ * initialization against setting the flag.
++ */
++ smp_rmb();
++ return memcg;
++ }
++ css_put(next_css);
++ }
+
+ prev_css = next_css;
+ goto skip_node;
+@@ -6331,6 +6348,16 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
+
+ error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
+ mutex_unlock(&memcg_create_mutex);
++
++ if (!error) {
++ /*
++ * Make sure the memcg is initialized: mem_cgroup_iter()
++ * orders reading memcg->initialized against its callers
++ * reading the memcg members.
++ */
++ smp_wmb();
++ memcg->initialized = 1;
++ }
+ return error;
+ }
+
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 94e21b9b1c87..057017bd3b42 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -290,7 +290,11 @@ int ceph_msgr_init(void)
+ if (ceph_msgr_slab_init())
+ return -ENOMEM;
+
+- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
++ /*
++ * The number of active work items is limited by the number of
++ * connections, so leave @max_active at default.
++ */
++ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
+ if (ceph_msgr_wq)
+ return 0;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4b1f8d02c68f..70876db1ade2 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2511,13 +2511,19 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ return harmonize_features(skb, dev, features);
+ }
+
+- features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+- NETIF_F_HW_VLAN_STAG_TX);
++ features = netdev_intersect_features(features,
++ dev->vlan_features |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_STAG_TX);
+
+ if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
+- features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+- NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+- NETIF_F_HW_VLAN_STAG_TX;
++ features = netdev_intersect_features(features,
++ NETIF_F_SG |
++ NETIF_F_HIGHDMA |
++ NETIF_F_FRAGLIST |
++ NETIF_F_GEN_CSUM |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_STAG_TX);
+
+ return harmonize_features(skb, dev, features);
+ }
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index a68d4c6d702c..c882d07e56c9 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3187,7 +3187,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
+
+ #ifndef ARCH_HAS_DMA_MMAP_COHERENT
+ /* This should be defined / handled globally! */
+-#ifdef CONFIG_ARM
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ #define ARCH_HAS_DMA_MMAP_COHERENT
+ #endif
+ #endif
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index cae36597aa71..0a34b5f1c475 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
+ * get more voice for pcm
+ *
+ * terminate most inactive voice and give it as a pcm voice.
++ *
++ * voice_lock is already held.
+ */
+ int
+ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ struct snd_emux *emu;
+ struct snd_emux_voice *vp;
+ struct best_voice best[V_END];
+- unsigned long flags;
+ int i;
+
+ emu = hw->synth;
+
+- spin_lock_irqsave(&emu->voice_lock, flags);
+ lookup_voices(emu, hw, best, 1); /* no OFF voices */
+ for (i = 0; i < V_END; i++) {
+ if (best[i].voice >= 0) {
+@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ vp->emu->num_voices--;
+ vp->ch = -1;
+ vp->state = SNDRV_EMUX_ST_OFF;
+- spin_unlock_irqrestore(&emu->voice_lock, flags);
+ return ch;
+ }
+ }
+- spin_unlock_irqrestore(&emu->voice_lock, flags);
+
+ /* not found */
+ return -ENOMEM;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 9d1a53f2a510..27c99528b823 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1478,19 +1478,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ }
+ }
+
+- if (pin_eld->eld_valid && !eld->eld_valid) {
+- update_eld = true;
++ if (pin_eld->eld_valid != eld->eld_valid)
+ eld_changed = true;
+- }
++
++ if (pin_eld->eld_valid && !eld->eld_valid)
++ update_eld = true;
++
+ if (update_eld) {
+ bool old_eld_valid = pin_eld->eld_valid;
+ pin_eld->eld_valid = eld->eld_valid;
+- eld_changed = pin_eld->eld_size != eld->eld_size ||
++ if (pin_eld->eld_size != eld->eld_size ||
+ memcmp(pin_eld->eld_buffer, eld->eld_buffer,
+- eld->eld_size) != 0;
+- if (eld_changed)
++ eld->eld_size) != 0) {
+ memcpy(pin_eld->eld_buffer, eld->eld_buffer,
+ eld->eld_size);
++ eld_changed = true;
++ }
+ pin_eld->eld_size = eld->eld_size;
+ pin_eld->info = eld->info;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0b94d48331f3..8be86358f640 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2764,6 +2764,9 @@ static void alc283_shutup(struct hda_codec *codec)
+
+ alc_write_coef_idx(codec, 0x43, 0x9004);
+
++ /*depop hp during suspend*/
++ alc_write_coef_idx(codec, 0x06, 0x2100);
++
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 0a81a51dd997..01fac71992ba 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -386,6 +386,36 @@ YAMAHA_DEVICE(0x105d, NULL),
+ }
+ },
+ {
++ USB_DEVICE(0x0499, 0x1509),
++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ /* .vendor_name = "Yamaha", */
++ /* .product_name = "Steinberg UR22", */
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 3,
++ .type = QUIRK_MIDI_YAMAHA
++ },
++ {
++ .ifnum = 4,
++ .type = QUIRK_IGNORE_INTERFACE
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
++{
+ USB_DEVICE(0x0499, 0x150a),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "Yamaha", */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index aac732d17c17..b9bf29490b12 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -52,6 +52,7 @@
+
+ #include <asm/processor.h>
+ #include <asm/io.h>
++#include <asm/ioctl.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+
+@@ -673,8 +674,7 @@ static void sort_memslots(struct kvm_memslots *slots)
+ slots->id_to_index[slots->memslots[i].id] = i;
+ }
+
+-void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+- u64 last_generation)
++void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
+ {
+ if (new) {
+ int id = new->id;
+@@ -685,8 +685,6 @@ void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
+ if (new->npages != npages)
+ sort_memslots(slots);
+ }
+-
+- slots->generation = last_generation + 1;
+ }
+
+ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
+@@ -708,10 +706,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
+ {
+ struct kvm_memslots *old_memslots = kvm->memslots;
+
+- update_memslots(slots, new, kvm->memslots->generation);
++ /*
++ * Set the low bit in the generation, which disables SPTE caching
++ * until the end of synchronize_srcu_expedited.
++ */
++ WARN_ON(old_memslots->generation & 1);
++ slots->generation = old_memslots->generation + 1;
++
++ update_memslots(slots, new);
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+
++ /*
++ * Increment the new memslot generation a second time. This prevents
++ * vm exits that race with memslot updates from caching a memslot
++ * generation that will (potentially) be valid forever.
++ */
++ slots->generation++;
++
+ kvm_arch_memslots_updated(kvm);
+
+ return old_memslots;
+@@ -1970,6 +1982,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
+ if (vcpu->kvm->mm != current->mm)
+ return -EIO;
+
++ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
++ return -EINVAL;
++
+ #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
+ /*
+ * Special cases: vcpu ioctls that are asynchronous to vcpu execution,