summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--3.14.23/0000_README (renamed from 3.14.22/0000_README)6
-rw-r--r--3.14.23/1022_linux-3.14.23.patch5877
-rw-r--r--3.14.23/4420_grsecurity-3.0-3.14.23-201410312212.patch (renamed from 3.14.22/4420_grsecurity-3.0-3.14.22-201410250026.patch)930
-rw-r--r--3.14.23/4425_grsec_remove_EI_PAX.patch (renamed from 3.14.22/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.14.23/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.14.22/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.14.23/4430_grsec-remove-localversion-grsec.patch (renamed from 3.14.22/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.14.23/4435_grsec-mute-warnings.patch (renamed from 3.14.22/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.14.23/4440_grsec-remove-protected-paths.patch (renamed from 3.14.22/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.14.23/4450_grsec-kconfig-default-gids.patch (renamed from 3.14.22/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.14.23/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.14.22/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.14.23/4470_disable-compat_vdso.patch (renamed from 3.14.22/4470_disable-compat_vdso.patch)0
-rw-r--r--3.14.23/4475_emutramp_default_on.patch (renamed from 3.14.22/4475_emutramp_default_on.patch)0
-rw-r--r--3.17.2/0000_README (renamed from 3.17.1/0000_README)2
-rw-r--r--3.17.2/4420_grsecurity-3.0-3.17.2-201410312213.patch (renamed from 3.17.1/4420_grsecurity-3.0-3.17.1-201410250027.patch)2031
-rw-r--r--3.17.2/4425_grsec_remove_EI_PAX.patch (renamed from 3.17.1/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.17.2/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.17.1/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.17.2/4430_grsec-remove-localversion-grsec.patch (renamed from 3.17.1/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.17.2/4435_grsec-mute-warnings.patch (renamed from 3.17.1/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.17.2/4440_grsec-remove-protected-paths.patch (renamed from 3.17.1/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.17.2/4450_grsec-kconfig-default-gids.patch (renamed from 3.17.1/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.17.2/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.17.1/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.17.2/4470_disable-compat_vdso.patch (renamed from 3.17.1/4470_disable-compat_vdso.patch)0
-rw-r--r--3.17.2/4475_emutramp_default_on.patch (renamed from 3.17.1/4475_emutramp_default_on.patch)0
-rw-r--r--3.2.63/0000_README2
-rw-r--r--3.2.63/4420_grsecurity-3.0-3.2.63-201410312211.patch (renamed from 3.2.63/4420_grsecurity-3.0-3.2.63-201410250023.patch)298
25 files changed, 7813 insertions, 1333 deletions
diff --git a/3.14.22/0000_README b/3.14.23/0000_README
index de2e1c4..ceedf6a 100644
--- a/3.14.22/0000_README
+++ b/3.14.23/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.22-201410250026.patch
+Patch: 1022_linux-3.14.23.patch
+From: http://www.kernel.org
+Desc: Linux 3.14.23
+
+Patch: 4420_grsecurity-3.0-3.14.23-201410312212.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.23/1022_linux-3.14.23.patch b/3.14.23/1022_linux-3.14.23.patch
new file mode 100644
index 0000000..d74580b
--- /dev/null
+++ b/3.14.23/1022_linux-3.14.23.patch
@@ -0,0 +1,5877 @@
+diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
+new file mode 100644
+index 0000000..ea45dd3
+--- /dev/null
++++ b/Documentation/lzo.txt
+@@ -0,0 +1,164 @@
++
++LZO stream format as understood by Linux's LZO decompressor
++===========================================================
++
++Introduction
++
++ This is not a specification. No specification seems to be publicly available
++ for the LZO stream format. This document describes what input format the LZO
++ decompressor as implemented in the Linux kernel understands. The file subject
++ of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on
++ the compressor nor on any other implementations though it seems likely that
++ the format matches the standard one. The purpose of this document is to
++ better understand what the code does in order to propose more efficient fixes
++ for future bug reports.
++
++Description
++
++ The stream is composed of a series of instructions, operands, and data. The
++ instructions consist in a few bits representing an opcode, and bits forming
++ the operands for the instruction, whose size and position depend on the
++ opcode and on the number of literals copied by previous instruction. The
++ operands are used to indicate :
++
++ - a distance when copying data from the dictionary (past output buffer)
++ - a length (number of bytes to copy from dictionary)
++ - the number of literals to copy, which is retained in variable "state"
++ as a piece of information for next instructions.
++
++ Optionally depending on the opcode and operands, extra data may follow. These
++ extra data can be a complement for the operand (eg: a length or a distance
++ encoded on larger values), or a literal to be copied to the output buffer.
++
++ The first byte of the block follows a different encoding from other bytes, it
++ seems to be optimized for literal use only, since there is no dictionary yet
++ prior to that byte.
++
++ Lengths are always encoded on a variable size starting with a small number
++ of bits in the operand. If the number of bits isn't enough to represent the
++ length, up to 255 may be added in increments by consuming more bytes with a
++ rate of at most 255 per extra byte (thus the compression ratio cannot exceed
++ around 255:1). The variable length encoding using #bits is always the same :
++
++ length = byte & ((1 << #bits) - 1)
++ if (!length) {
++ length = ((1 << #bits) - 1)
++ length += 255*(number of zero bytes)
++ length += first-non-zero-byte
++ }
++ length += constant (generally 2 or 3)
++
++ For references to the dictionary, distances are relative to the output
++ pointer. Distances are encoded using very few bits belonging to certain
++ ranges, resulting in multiple copy instructions using different encodings.
++ Certain encodings involve one extra byte, others involve two extra bytes
++ forming a little-endian 16-bit quantity (marked LE16 below).
++
++ After any instruction except the large literal copy, 0, 1, 2 or 3 literals
++ are copied before starting the next instruction. The number of literals that
++ were copied may change the meaning and behaviour of the next instruction. In
++ practice, only one instruction needs to know whether 0, less than 4, or more
++ literals were copied. This is the information stored in the <state> variable
++ in this implementation. This number of immediate literals to be copied is
++ generally encoded in the last two bits of the instruction but may also be
++ taken from the last two bits of an extra operand (eg: distance).
++
++ End of stream is declared when a block copy of distance 0 is seen. Only one
++ instruction may encode this distance (0001HLLL), it takes one LE16 operand
++ for the distance, thus requiring 3 bytes.
++
++ IMPORTANT NOTE : in the code some length checks are missing because certain
++ instructions are called under the assumption that a certain number of bytes
++ follow because it has already been garanteed before parsing the instructions.
++ They just have to "refill" this credit if they consume extra bytes. This is
++ an implementation design choice independant on the algorithm or encoding.
++
++Byte sequences
++
++ First byte encoding :
++
++ 0..17 : follow regular instruction encoding, see below. It is worth
++ noting that codes 16 and 17 will represent a block copy from
++ the dictionary which is empty, and that they will always be
++ invalid at this place.
++
++ 18..21 : copy 0..3 literals
++ state = (byte - 17) = 0..3 [ copy <state> literals ]
++ skip byte
++
++ 22..255 : copy literal string
++ length = (byte - 17) = 4..238
++ state = 4 [ don't copy extra literals ]
++ skip byte
++
++ Instruction encoding :
++
++ 0 0 0 0 X X X X (0..15)
++ Depends on the number of literals copied by the last instruction.
++ If last instruction did not copy any literal (state == 0), this
++ encoding will be a copy of 4 or more literal, and must be interpreted
++ like this :
++
++ 0 0 0 0 L L L L (0..15) : copy long literal string
++ length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte)
++ state = 4 (no extra literals are copied)
++
++ If last instruction used to copy between 1 to 3 literals (encoded in
++ the instruction's opcode or distance), the instruction is a copy of a
++ 2-byte block from the dictionary within a 1kB distance. It is worth
++ noting that this instruction provides little savings since it uses 2
++ bytes to encode a copy of 2 other bytes but it encodes the number of
++ following literals for free. It must be interpreted like this :
++
++ 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance
++ length = 2
++ state = S (copy S literals after this block)
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 2) + D + 1
++
++ If last instruction used to copy 4 or more literals (as detected by
++ state == 4), the instruction becomes a copy of a 3-byte block from the
++ dictionary from a 2..3kB distance, and must be interpreted like this :
++
++ 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance
++ length = 3
++ state = S (copy S literals after this block)
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 2) + D + 2049
++
++ 0 0 0 1 H L L L (16..31)
++ Copy of a block within 16..48kB distance (preferably less than 10B)
++ length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte)
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
++ distance = 16384 + (H << 14) + D
++ state = S (copy S literals after this block)
++ End of stream is reached if distance == 16384
++
++ 0 0 1 L L L L L (32..63)
++ Copy of small block within 16kB distance (preferably less than 34B)
++ length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
++ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
++ distance = D + 1
++ state = S (copy S literals after this block)
++
++ 0 1 L D D D S S (64..127)
++ Copy 3-4 bytes from block within 2kB distance
++ state = S (copy S literals after this block)
++ length = 3 + L
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 3) + D + 1
++
++ 1 L L D D D S S (128..255)
++ Copy 5-8 bytes from block within 2kB distance
++ state = S (copy S literals after this block)
++ length = 5 + L
++ Always followed by exactly one byte : H H H H H H H H
++ distance = (H << 3) + D + 1
++
++Authors
++
++ This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
++ analysis of the decompression code available in Linux 3.16-rc5. The code is
++ tricky, it is possible that this document contains mistakes or that a few
++ corner cases were overlooked. In any case, please report any doubt, fix, or
++ proposed updates to the author(s) so that the document can be updated.
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
+index 2908941..53838d9 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -425,6 +425,20 @@ fault through the slow path.
+ Since only 19 bits are used to store generation-number on mmio spte, all
+ pages are zapped when there is an overflow.
+
++Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
++times, the last one happening when the generation number is retrieved and
++stored into the MMIO spte. Thus, the MMIO spte might be created based on
++out-of-date information, but with an up-to-date generation number.
++
++To avoid this, the generation number is incremented again after synchronize_srcu
++returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
++memslot update, while some SRCU readers might be using the old copy. We do not
++want to use an MMIO sptes created with an odd generation number, and we can do
++this without losing a bit in the MMIO spte. The low bit of the generation
++is not stored in MMIO spte, and presumed zero when it is extracted out of the
++spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1,
++the next access to the spte will always be a cache miss.
++
+
+ Further reading
+ ===============
+diff --git a/Makefile b/Makefile
+index a59980e..135a04a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Remembering Coco
+
+diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+index 651aeb5..f3188e9 100644
+--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
++++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+@@ -144,6 +144,10 @@
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+
++ /* Use Hardware BCH ECC */
++ nand-ecc-strength = <4>;
++ nand-ecc-step-size = <512>;
++
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x180000>; /* 1.5MB */
+diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
+index 4e27587..da406c1 100644
+--- a/arch/arm/boot/dts/armada-370-netgear-rn104.dts
++++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
+@@ -146,6 +146,10 @@
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+
++ /* Use Hardware BCH ECC */
++ nand-ecc-strength = <4>;
++ nand-ecc-step-size = <512>;
++
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x180000>; /* 1.5MB */
+diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+index ff049ee..b4aba09 100644
+--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
++++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+@@ -224,6 +224,10 @@
+ marvell,nand-enable-arbiter;
+ nand-on-flash-bbt;
+
++ /* Use Hardware BCH ECC */
++ nand-ecc-strength = <4>;
++ nand-ecc-step-size = <512>;
++
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x180000>; /* 1.5MB */
+diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
+index fece866..b8f234b 100644
+--- a/arch/arm/boot/dts/at91sam9263.dtsi
++++ b/arch/arm/boot/dts/at91sam9263.dtsi
+@@ -535,6 +535,7 @@
+ compatible = "atmel,hsmci";
+ reg = <0xfff80000 0x600>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>;
++ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+@@ -544,6 +545,7 @@
+ compatible = "atmel,hsmci";
+ reg = <0xfff84000 0x600>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>;
++ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
+index a077585..eaf4145 100644
+--- a/arch/arm/boot/dts/sama5d3_can.dtsi
++++ b/arch/arm/boot/dts/sama5d3_can.dtsi
+@@ -40,7 +40,7 @@
+ atmel,clk-output-range = <0 66000000>;
+ };
+
+- can1_clk: can0_clk {
++ can1_clk: can1_clk {
+ #clock-cells = <0>;
+ reg = <41>;
+ atmel,clk-output-range = <0 66000000>;
+diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
+index 034529d..d66f102 100644
+--- a/arch/arm/mach-at91/clock.c
++++ b/arch/arm/mach-at91/clock.c
+@@ -962,6 +962,7 @@ static int __init at91_clock_reset(void)
+ }
+
+ at91_pmc_write(AT91_PMC_SCDR, scdr);
++ at91_pmc_write(AT91_PMC_PCDR, pcdr);
+ if (cpu_is_sama5d3())
+ at91_pmc_write(AT91_PMC_PCDR1, pcdr1);
+
+diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
+index fda2704..e72289a 100644
+--- a/arch/arm64/include/asm/compat.h
++++ b/arch/arm64/include/asm/compat.h
+@@ -37,8 +37,8 @@ typedef s32 compat_ssize_t;
+ typedef s32 compat_time_t;
+ typedef s32 compat_clock_t;
+ typedef s32 compat_pid_t;
+-typedef u32 __compat_uid_t;
+-typedef u32 __compat_gid_t;
++typedef u16 __compat_uid_t;
++typedef u16 __compat_gid_t;
+ typedef u16 __compat_uid16_t;
+ typedef u16 __compat_gid16_t;
+ typedef u32 __compat_uid32_t;
+diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
+index 2c7dde3..2a5259f 100644
+--- a/arch/m68k/mm/hwtest.c
++++ b/arch/m68k/mm/hwtest.c
+@@ -28,9 +28,11 @@
+ int hwreg_present( volatile void *regp )
+ {
+ int ret = 0;
++ unsigned long flags;
+ long save_sp, save_vbr;
+ long tmp_vectors[3];
+
++ local_irq_save(flags);
+ __asm__ __volatile__
+ ( "movec %/vbr,%2\n\t"
+ "movel #Lberr1,%4@(8)\n\t"
+@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ : "a" (regp), "a" (tmp_vectors)
+ );
++ local_irq_restore(flags);
+
+ return( ret );
+ }
+@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
+ int hwreg_write( volatile void *regp, unsigned short val )
+ {
+ int ret;
++ unsigned long flags;
+ long save_sp, save_vbr;
+ long tmp_vectors[3];
+
++ local_irq_save(flags);
+ __asm__ __volatile__
+ ( "movec %/vbr,%2\n\t"
+ "movel #Lberr2,%4@(8)\n\t"
+@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
+ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
+ : "a" (regp), "a" (tmp_vectors), "g" (val)
+ );
++ local_irq_restore(flags);
+
+ return( ret );
+ }
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 4642d6a..de1ec54 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -329,16 +329,16 @@ struct direct_window {
+
+ /* Dynamic DMA Window support */
+ struct ddw_query_response {
+- __be32 windows_available;
+- __be32 largest_available_block;
+- __be32 page_size;
+- __be32 migration_capable;
++ u32 windows_available;
++ u32 largest_available_block;
++ u32 page_size;
++ u32 migration_capable;
+ };
+
+ struct ddw_create_response {
+- __be32 liobn;
+- __be32 addr_hi;
+- __be32 addr_lo;
++ u32 liobn;
++ u32 addr_hi;
++ u32 addr_lo;
+ };
+
+ static LIST_HEAD(direct_window_list);
+@@ -725,16 +725,18 @@ static void remove_ddw(struct device_node *np, bool remove_prop)
+ {
+ struct dynamic_dma_window_prop *dwp;
+ struct property *win64;
+- const u32 *ddw_avail;
++ u32 ddw_avail[3];
+ u64 liobn;
+- int len, ret = 0;
++ int ret = 0;
++
++ ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
++ &ddw_avail[0], 3);
+
+- ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+ if (!win64)
+ return;
+
+- if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
++ if (ret || win64->length < sizeof(*dwp))
+ goto delprop;
+
+ dwp = win64->value;
+@@ -872,8 +874,9 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+
+ do {
+ /* extra outputs are LIOBN and dma-addr (hi, lo) */
+- ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
+- BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
++ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
++ cfg_addr, BUID_HI(buid), BUID_LO(buid),
++ page_shift, window_shift);
+ } while (rtas_busy_delay(ret));
+ dev_info(&dev->dev,
+ "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
+@@ -910,7 +913,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ int page_shift;
+ u64 dma_addr, max_addr;
+ struct device_node *dn;
+- const u32 *uninitialized_var(ddw_avail);
++ u32 ddw_avail[3];
+ struct direct_window *window;
+ struct property *win64;
+ struct dynamic_dma_window_prop *ddwprop;
+@@ -942,8 +945,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ * for the given node in that order.
+ * the property is actually in the parent, not the PE
+ */
+- ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
+- if (!ddw_avail || len < 3 * sizeof(u32))
++ ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
++ &ddw_avail[0], 3);
++ if (ret)
+ goto out_failed;
+
+ /*
+@@ -966,11 +970,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ dev_dbg(&dev->dev, "no free dynamic windows");
+ goto out_failed;
+ }
+- if (be32_to_cpu(query.page_size) & 4) {
++ if (query.page_size & 4) {
+ page_shift = 24; /* 16MB */
+- } else if (be32_to_cpu(query.page_size) & 2) {
++ } else if (query.page_size & 2) {
+ page_shift = 16; /* 64kB */
+- } else if (be32_to_cpu(query.page_size) & 1) {
++ } else if (query.page_size & 1) {
+ page_shift = 12; /* 4kB */
+ } else {
+ dev_dbg(&dev->dev, "no supported direct page size in mask %x",
+@@ -980,7 +984,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ /* verify the window * number of ptes will map the partition */
+ /* check largest block * page size > max memory hotplug addr */
+ max_addr = memory_hotplug_max();
+- if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
++ if (query.largest_available_block < (max_addr >> page_shift)) {
+ dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
+ "%llu-sized pages\n", max_addr, query.largest_available_block,
+ 1ULL << page_shift);
+@@ -1006,8 +1010,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ if (ret != 0)
+ goto out_free_prop;
+
+- ddwprop->liobn = create.liobn;
+- ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
++ ddwprop->liobn = cpu_to_be32(create.liobn);
++ ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
++ create.addr_lo);
+ ddwprop->tce_shift = cpu_to_be32(page_shift);
+ ddwprop->window_shift = cpu_to_be32(len);
+
+@@ -1039,7 +1044,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
+
+- dma_addr = of_read_number(&create.addr_hi, 2);
++ dma_addr = be64_to_cpu(ddwprop->dma_base);
+ goto out_unlock;
+
+ out_free_window:
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 5f79d2d..f1ba119 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -71,6 +71,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+ return 0;
+ if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
+ return 1;
++ return 0;
+ case KVM_S390_INT_EMERGENCY:
+ if (psw_extint_disabled(vcpu))
+ return 0;
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index b398c68..a38513c 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -67,6 +67,7 @@ config SPARC64
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_CONTEXT_TRACKING
+ select HAVE_DEBUG_KMEMLEAK
++ select SPARSE_IRQ
+ select RTC_DRV_CMOS
+ select RTC_DRV_BQ4802
+ select RTC_DRV_SUN4V
+diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
+index ca121f0..17be9d6 100644
+--- a/arch/sparc/include/asm/hypervisor.h
++++ b/arch/sparc/include/asm/hypervisor.h
+@@ -2944,6 +2944,16 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
+ #endif
+
++#define HV_FAST_T5_GET_PERFREG 0x1a8
++#define HV_FAST_T5_SET_PERFREG 0x1a9
++
++#ifndef __ASSEMBLY__
++unsigned long sun4v_t5_get_perfreg(unsigned long reg_num,
++ unsigned long *reg_val);
++unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
++ unsigned long reg_val);
++#endif
++
+ /* Function numbers for HV_CORE_TRAP. */
+ #define HV_CORE_SET_VER 0x00
+ #define HV_CORE_PUTCHAR 0x01
+@@ -2975,6 +2985,7 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+ #define HV_GRP_VF_CPU 0x0205
+ #define HV_GRP_KT_CPU 0x0209
+ #define HV_GRP_VT_CPU 0x020c
++#define HV_GRP_T5_CPU 0x0211
+ #define HV_GRP_DIAG 0x0300
+
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
+index abf6afe..3deb07f 100644
+--- a/arch/sparc/include/asm/irq_64.h
++++ b/arch/sparc/include/asm/irq_64.h
+@@ -37,7 +37,7 @@
+ *
+ * ino_bucket->irq allocation is made during {sun4v_,}build_irq().
+ */
+-#define NR_IRQS 255
++#define NR_IRQS (2048)
+
+ extern void irq_install_pre_handler(int irq,
+ void (*func)(unsigned int, void *, void *),
+@@ -57,11 +57,8 @@ extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
+ unsigned long iclr_base);
+ extern void sun4u_destroy_msi(unsigned int irq);
+
+-extern unsigned char irq_alloc(unsigned int dev_handle,
+- unsigned int dev_ino);
+-#ifdef CONFIG_PCI_MSI
+-extern void irq_free(unsigned int irq);
+-#endif
++unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
++void irq_free(unsigned int irq);
+
+ extern void __init init_IRQ(void);
+ extern void fixup_irqs(void);
+diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
+index bdb524a..8732ed3 100644
+--- a/arch/sparc/include/asm/ldc.h
++++ b/arch/sparc/include/asm/ldc.h
+@@ -53,13 +53,14 @@ struct ldc_channel;
+ /* Allocate state for a channel. */
+ extern struct ldc_channel *ldc_alloc(unsigned long id,
+ const struct ldc_channel_config *cfgp,
+- void *event_arg);
++ void *event_arg,
++ const char *name);
+
+ /* Shut down and free state for a channel. */
+ extern void ldc_free(struct ldc_channel *lp);
+
+ /* Register TX and RX queues of the link with the hypervisor. */
+-extern int ldc_bind(struct ldc_channel *lp, const char *name);
++extern int ldc_bind(struct ldc_channel *lp);
+
+ /* For non-RAW protocols we need to complete a handshake before
+ * communication can proceed. ldc_connect() does that, if the
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index a12dbe3..e48fdf4 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
+ /* You must call prom_init() before using any of the library services,
+ * preferably as early as possible. Pass it the romvec pointer.
+ */
+-extern void prom_init(void *cif_handler, void *cif_stack);
++extern void prom_init(void *cif_handler);
++extern void prom_init_report(void);
+
+ /* Boot argument acquisition, returns the boot command line string. */
+ extern char *prom_getbootargs(void);
+diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
+index aac53fc..b18e602 100644
+--- a/arch/sparc/include/asm/page_64.h
++++ b/arch/sparc/include/asm/page_64.h
+@@ -57,18 +57,21 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
+ typedef struct { unsigned long pte; } pte_t;
+ typedef struct { unsigned long iopte; } iopte_t;
+ typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
+ typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+
+ #define pte_val(x) ((x).pte)
+ #define iopte_val(x) ((x).iopte)
+ #define pmd_val(x) ((x).pmd)
++#define pud_val(x) ((x).pud)
+ #define pgd_val(x) ((x).pgd)
+ #define pgprot_val(x) ((x).pgprot)
+
+ #define __pte(x) ((pte_t) { (x) } )
+ #define __iopte(x) ((iopte_t) { (x) } )
+ #define __pmd(x) ((pmd_t) { (x) } )
++#define __pud(x) ((pud_t) { (x) } )
+ #define __pgd(x) ((pgd_t) { (x) } )
+ #define __pgprot(x) ((pgprot_t) { (x) } )
+
+@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; } pgprot_t;
+ typedef unsigned long pte_t;
+ typedef unsigned long iopte_t;
+ typedef unsigned long pmd_t;
++typedef unsigned long pud_t;
+ typedef unsigned long pgd_t;
+ typedef unsigned long pgprot_t;
+
+ #define pte_val(x) (x)
+ #define iopte_val(x) (x)
+ #define pmd_val(x) (x)
++#define pud_val(x) (x)
+ #define pgd_val(x) (x)
+ #define pgprot_val(x) (x)
+
+ #define __pte(x) (x)
+ #define __iopte(x) (x)
+ #define __pmd(x) (x)
++#define __pud(x) (x)
+ #define __pgd(x) (x)
+ #define __pgprot(x) (x)
+
+@@ -96,21 +102,14 @@ typedef unsigned long pgprot_t;
+
+ typedef pte_t *pgtable_t;
+
+-/* These two values define the virtual address space range in which we
+- * must forbid 64-bit user processes from making mappings. It used to
+- * represent precisely the virtual address space hole present in most
+- * early sparc64 chips including UltraSPARC-I. But now it also is
+- * further constrained by the limits of our page tables, which is
+- * 43-bits of virtual address.
+- */
+-#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL)
+-#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL)
++extern unsigned long sparc64_va_hole_top;
++extern unsigned long sparc64_va_hole_bottom;
+
+ /* The next two defines specify the actual exclusion region we
+ * enforce, wherein we use a 4GB red zone on each side of the VA hole.
+ */
+-#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL))
+-#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL))
++#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL))
++#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL))
+
+ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
+ _AC(0x0000000070000000,UL) : \
+@@ -118,20 +117,16 @@ typedef pte_t *pgtable_t;
+
+ #include <asm-generic/memory_model.h>
+
+-#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X)))
+ extern unsigned long PAGE_OFFSET;
+
+ #endif /* !(__ASSEMBLY__) */
+
+-/* The maximum number of physical memory address bits we support, this
+- * is used to size various tables used to manage kernel TLB misses and
+- * also the sparsemem code.
++/* The maximum number of physical memory address bits we support. The
++ * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS"
++ * evaluates to.
+ */
+-#define MAX_PHYS_ADDRESS_BITS 47
++#define MAX_PHYS_ADDRESS_BITS 53
+
+-/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
+- * and kpte_linear_bitmap.
+- */
+ #define ILOG2_4MB 22
+ #define ILOG2_256MB 28
+
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index bcfe063..2c8d41f 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -15,6 +15,13 @@
+
+ extern struct kmem_cache *pgtable_cache;
+
++static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
++{
++ pgd_set(pgd, pud);
++}
++
++#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
++
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+ return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
+@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ kmem_cache_free(pgtable_cache, pgd);
+ }
+
+-#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
++static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
++{
++ pud_set(pud, pmd);
++}
++
++#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ return kmem_cache_alloc(pgtable_cache,
++ GFP_KERNEL|__GFP_REPEAT);
++}
++
++static inline void pud_free(struct mm_struct *mm, pud_t *pud)
++{
++ kmem_cache_free(pgtable_cache, pud);
++}
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
+ #define __pmd_free_tlb(tlb, pmd, addr) \
+ pgtable_free_tlb(tlb, pmd, false)
+
++#define __pud_free_tlb(tlb, pud, addr) \
++ pgtable_free_tlb(tlb, pud, false)
++
+ #endif /* _SPARC64_PGALLOC_H */
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 1a49ffd..e8dfabf 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -20,8 +20,6 @@
+ #include <asm/page.h>
+ #include <asm/processor.h>
+
+-#include <asm-generic/pgtable-nopud.h>
+-
+ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
+ * The page copy blockops can use 0x6000000 to 0x8000000.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+@@ -42,10 +40,7 @@
+ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
+ #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
+ #define VMALLOC_START _AC(0x0000000100000000,UL)
+-#define VMALLOC_END _AC(0x0000010000000000,UL)
+-#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
+-
+-#define vmemmap ((struct page *)VMEMMAP_BASE)
++#define VMEMMAP_BASE VMALLOC_END
+
+ /* PMD_SHIFT determines the size of the area a second-level page
+ * table can map
+@@ -55,13 +50,25 @@
+ #define PMD_MASK (~(PMD_SIZE-1))
+ #define PMD_BITS (PAGE_SHIFT - 3)
+
+-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
++/* PUD_SHIFT determines the size of the area a third-level page
++ * table can map
++ */
++#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++#define PUD_BITS (PAGE_SHIFT - 3)
++
++/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
++#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
+ #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK (~(PGDIR_SIZE-1))
+ #define PGDIR_BITS (PAGE_SHIFT - 3)
+
+-#if (PGDIR_SHIFT + PGDIR_BITS) != 43
++#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
++#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
++#endif
++
++#if (PGDIR_SHIFT + PGDIR_BITS) != 53
+ #error Page table parameters do not cover virtual address space properly.
+ #endif
+
+@@ -71,28 +78,18 @@
+
+ #ifndef __ASSEMBLY__
+
+-#include <linux/sched.h>
+-
+-extern unsigned long sparc64_valid_addr_bitmap[];
++extern unsigned long VMALLOC_END;
+
+-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+-static inline bool __kern_addr_valid(unsigned long paddr)
+-{
+- if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
+- return false;
+- return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
+-}
++#define vmemmap ((struct page *)VMEMMAP_BASE)
+
+-static inline bool kern_addr_valid(unsigned long addr)
+-{
+- unsigned long paddr = __pa(addr);
++#include <linux/sched.h>
+
+- return __kern_addr_valid(paddr);
+-}
++bool kern_addr_valid(unsigned long addr);
+
+ /* Entries per page directory level. */
+ #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
+ #define PTRS_PER_PMD (1UL << PMD_BITS)
++#define PTRS_PER_PUD (1UL << PUD_BITS)
+ #define PTRS_PER_PGD (1UL << PGDIR_BITS)
+
+ /* Kernel has a separate 44bit address space. */
+@@ -101,6 +98,9 @@ static inline bool kern_addr_valid(unsigned long addr)
+ #define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
++#define pud_ERROR(e) \
++ pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
++ __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
+ #define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
+@@ -112,6 +112,7 @@ static inline bool kern_addr_valid(unsigned long addr)
+ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
+ #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
+ #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
++#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
+
+ /* Advertise support for _PAGE_SPECIAL */
+ #define __HAVE_ARCH_PTE_SPECIAL
+@@ -658,26 +659,26 @@ static inline unsigned long pmd_large(pmd_t pmd)
+ return pte_val(pte) & _PAGE_PMD_HUGE;
+ }
+
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static inline unsigned long pmd_young(pmd_t pmd)
++static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+ pte_t pte = __pte(pmd_val(pmd));
+
+- return pte_young(pte);
++ return pte_pfn(pte);
+ }
+
+-static inline unsigned long pmd_write(pmd_t pmd)
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline unsigned long pmd_young(pmd_t pmd)
+ {
+ pte_t pte = __pte(pmd_val(pmd));
+
+- return pte_write(pte);
++ return pte_young(pte);
+ }
+
+-static inline unsigned long pmd_pfn(pmd_t pmd)
++static inline unsigned long pmd_write(pmd_t pmd)
+ {
+ pte_t pte = __pte(pmd_val(pmd));
+
+- return pte_pfn(pte);
++ return pte_write(pte);
+ }
+
+ static inline unsigned long pmd_trans_huge(pmd_t pmd)
+@@ -771,13 +772,15 @@ static inline int pmd_present(pmd_t pmd)
+ * the top bits outside of the range of any physical address size we
+ * support are clear as well. We also validate the physical itself.
+ */
+-#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
+- !__kern_addr_valid(pmd_val(pmd)))
++#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+
+ #define pud_none(pud) (!pud_val(pud))
+
+-#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
+- !__kern_addr_valid(pud_val(pud)))
++#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
++
++#define pgd_none(pgd) (!pgd_val(pgd))
++
++#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+@@ -815,10 +818,31 @@ static inline unsigned long __pmd_page(pmd_t pmd)
+ #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
+ #define pud_present(pud) (pud_val(pud) != 0U)
+ #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
++#define pgd_page_vaddr(pgd) \
++ ((unsigned long) __va(pgd_val(pgd)))
++#define pgd_present(pgd) (pgd_val(pgd) != 0U)
++#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
++
++static inline unsigned long pud_large(pud_t pud)
++{
++ pte_t pte = __pte(pud_val(pud));
++
++ return pte_val(pte) & _PAGE_PMD_HUGE;
++}
++
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ pte_t pte = __pte(pud_val(pud));
++
++ return pte_pfn(pte);
++}
+
+ /* Same in both SUN4V and SUN4U. */
+ #define pte_none(pte) (!pte_val(pte))
+
++#define pgd_set(pgdp, pudp) \
++ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
++
+ /* to find an entry in a page-table-directory. */
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+ #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+@@ -826,6 +850,11 @@ static inline unsigned long __pmd_page(pmd_t pmd)
+ /* to find an entry in a kernel page-table-directory */
+ #define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
++/* Find an entry in the third-level page table.. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
++#define pud_offset(pgdp, address) \
++ ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
++
+ /* Find an entry in the second-level page table.. */
+ #define pmd_offset(pudp, address) \
+ ((pmd_t *) pud_page_vaddr(*(pudp)) + \
+@@ -898,7 +927,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ #endif
+
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+-extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
+
+ extern void paging_init(void);
+ extern unsigned long find_ecache_flush_span(unsigned long size);
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 5e35e05..acd6146 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -24,6 +24,10 @@ static inline int con_is_present(void)
+ }
+ #endif
+
++#ifdef CONFIG_SPARC64
++extern void __init start_early_boot(void);
++#endif
++
+ extern void sun_do_break(void);
+ extern int stop_a_enabled;
+ extern int scons_pwroff;
+diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
+index 6b67e50..69424d4 100644
+--- a/arch/sparc/include/asm/spitfire.h
++++ b/arch/sparc/include/asm/spitfire.h
+@@ -45,6 +45,8 @@
+ #define SUN4V_CHIP_NIAGARA3 0x03
+ #define SUN4V_CHIP_NIAGARA4 0x04
+ #define SUN4V_CHIP_NIAGARA5 0x05
++#define SUN4V_CHIP_SPARC_M6 0x06
++#define SUN4V_CHIP_SPARC_M7 0x07
+ #define SUN4V_CHIP_SPARC64X 0x8a
+ #define SUN4V_CHIP_UNKNOWN 0xff
+
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index a5f01ac..cc6275c 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -63,7 +63,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
+- unsigned long fpregs[0] __attribute__ ((aligned(64)));
++ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
++ __attribute__ ((aligned(64)));
+ };
+
+ #endif /* !(__ASSEMBLY__) */
+@@ -102,6 +103,7 @@ struct thread_info {
+ #define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
+ #define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
+ #define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
++#define FAULT_CODE_BAD_RA 0x20 /* Bad RA for sun4v */
+
+ #if PAGE_SHIFT == 13
+ #define THREAD_SIZE (2*PAGE_SIZE)
+diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
+index 90916f9..ecb49cf 100644
+--- a/arch/sparc/include/asm/tsb.h
++++ b/arch/sparc/include/asm/tsb.h
+@@ -133,9 +133,24 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ sub TSB, 0x8, TSB; \
+ TSB_STORE(TSB, TAG);
+
+- /* Do a kernel page table walk. Leaves physical PTE pointer in
+- * REG1. Jumps to FAIL_LABEL on early page table walk termination.
+- * VADDR will not be clobbered, but REG2 will.
++ /* Do a kernel page table walk. Leaves valid PTE value in
++ * REG1. Jumps to FAIL_LABEL on early page table walk
++ * termination. VADDR will not be clobbered, but REG2 will.
++ *
++ * There are two masks we must apply to propagate bits from
++ * the virtual address into the PTE physical address field
++ * when dealing with huge pages. This is because the page
++ * table boundaries do not match the huge page size(s) the
++ * hardware supports.
++ *
++ * In these cases we propagate the bits that are below the
++ * page table level where we saw the huge page mapping, but
++ * are still within the relevant physical bits for the huge
++ * page size in question. So for PMD mappings (which fall on
++ * bit 23, for 8MB per PMD) we must propagate bit 22 for a
++ * 4MB huge page. For huge PUDs (which fall on bit 33, for
++ * 8GB per PUD), we have to accomodate 256MB and 2GB huge
++ * pages. So for those we propagate bits 32 to 28.
+ */
+ #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
+ sethi %hi(swapper_pg_dir), REG1; \
+@@ -145,15 +160,40 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ andn REG2, 0x7, REG2; \
+ ldx [REG1 + REG2], REG1; \
+ brz,pn REG1, FAIL_LABEL; \
+- sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
++ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ brz,pn REG1, FAIL_LABEL; \
+- sllx VADDR, 64 - PMD_SHIFT, REG2; \
++ sethi %uhi(_PAGE_PUD_HUGE), REG2; \
++ brz,pn REG1, FAIL_LABEL; \
++ sllx REG2, 32, REG2; \
++ andcc REG1, REG2, %g0; \
++ sethi %hi(0xf8000000), REG2; \
++ bne,pt %xcc, 697f; \
++ sllx REG2, 1, REG2; \
++ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+- add REG1, REG2, REG1;
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
++ brz,pn REG1, FAIL_LABEL; \
++ sllx REG2, 32, REG2; \
++ andcc REG1, REG2, %g0; \
++ be,pn %xcc, 698f; \
++ sethi %hi(0x400000), REG2; \
++697: brgez,pn REG1, FAIL_LABEL; \
++ andn REG1, REG2, REG1; \
++ and VADDR, REG2, REG2; \
++ ba,pt %xcc, 699f; \
++ or REG1, REG2, REG1; \
++698: sllx VADDR, 64 - PMD_SHIFT, REG2; \
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \
++ andn REG2, 0x7, REG2; \
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ brgez,pn REG1, FAIL_LABEL; \
++ nop; \
++699:
+
+ /* PMD has been loaded into REG1, interpret the value, seeing
+ * if it is a HUGE PMD or a normal one. If it is not valid
+@@ -198,6 +238,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ andn REG2, 0x7, REG2; \
+ ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
+ brz,pn REG1, FAIL_LABEL; \
++ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
++ srlx REG2, 64 - PAGE_SHIFT, REG2; \
++ andn REG2, 0x7, REG2; \
++ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
++ brz,pn REG1, FAIL_LABEL; \
+ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+@@ -246,8 +291,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ (KERNEL_TSB_SIZE_BYTES / 16)
+ #define KERNEL_TSB4M_NENTRIES 4096
+
+-#define KTSB_PHYS_SHIFT 15
+-
+ /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
+ * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
+ * and the found TTE will be left in REG1. REG3 and REG4 must
+@@ -256,17 +299,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ * VADDR and TAG will be preserved and not clobbered by this macro.
+ */
+ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+-661: sethi %hi(swapper_tsb), REG1; \
+- or REG1, %lo(swapper_tsb), REG1; \
++661: sethi %uhi(swapper_tsb), REG1; \
++ sethi %hi(swapper_tsb), REG2; \
++ or REG1, %ulo(swapper_tsb), REG1; \
++ or REG2, %lo(swapper_tsb), REG2; \
+ .section .swapper_tsb_phys_patch, "ax"; \
+ .word 661b; \
+ .previous; \
+-661: nop; \
+- .section .tsb_ldquad_phys_patch, "ax"; \
+- .word 661b; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- .previous; \
++ sllx REG1, 32, REG1; \
++ or REG1, REG2, REG1; \
+ srlx VADDR, PAGE_SHIFT, REG2; \
+ and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
+ sllx REG2, 4, REG2; \
+@@ -281,17 +322,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
+ * we can make use of that for the index computation.
+ */
+ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+-661: sethi %hi(swapper_4m_tsb), REG1; \
+- or REG1, %lo(swapper_4m_tsb), REG1; \
++661: sethi %uhi(swapper_4m_tsb), REG1; \
++ sethi %hi(swapper_4m_tsb), REG2; \
++ or REG1, %ulo(swapper_4m_tsb), REG1; \
++ or REG2, %lo(swapper_4m_tsb), REG2; \
+ .section .swapper_4m_tsb_phys_patch, "ax"; \
+ .word 661b; \
+ .previous; \
+-661: nop; \
+- .section .tsb_ldquad_phys_patch, "ax"; \
+- .word 661b; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
+- .previous; \
++ sllx REG1, 32, REG1; \
++ or REG1, REG2, REG1; \
+ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
+ sllx REG2, 4, REG2; \
+ add REG1, REG2, REG2; \
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
+index 39ca301..11fdf0e 100644
+--- a/arch/sparc/include/asm/visasm.h
++++ b/arch/sparc/include/asm/visasm.h
+@@ -39,6 +39,14 @@
+ 297: wr %o5, FPRS_FEF, %fprs; \
+ 298:
+
++#define VISEntryHalfFast(fail_label) \
++ rd %fprs, %o5; \
++ andcc %o5, FPRS_FEF, %g0; \
++ be,pt %icc, 297f; \
++ nop; \
++ ba,a,pt %xcc, fail_label; \
++297: wr %o5, FPRS_FEF, %fprs;
++
+ #define VISExitHalf \
+ wr %o5, 0, %fprs;
+
+diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
+index 5c51258..52e10de 100644
+--- a/arch/sparc/kernel/cpu.c
++++ b/arch/sparc/kernel/cpu.c
+@@ -493,6 +493,18 @@ static void __init sun4v_cpu_probe(void)
+ sparc_pmu_type = "niagara5";
+ break;
+
++ case SUN4V_CHIP_SPARC_M6:
++ sparc_cpu_type = "SPARC-M6";
++ sparc_fpu_type = "SPARC-M6 integrated FPU";
++ sparc_pmu_type = "sparc-m6";
++ break;
++
++ case SUN4V_CHIP_SPARC_M7:
++ sparc_cpu_type = "SPARC-M7";
++ sparc_fpu_type = "SPARC-M7 integrated FPU";
++ sparc_pmu_type = "sparc-m7";
++ break;
++
+ case SUN4V_CHIP_SPARC64X:
+ sparc_cpu_type = "SPARC64-X";
+ sparc_fpu_type = "SPARC64-X integrated FPU";
+diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
+index de1c844..e69ec0e 100644
+--- a/arch/sparc/kernel/cpumap.c
++++ b/arch/sparc/kernel/cpumap.c
+@@ -326,6 +326,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+ case SUN4V_CHIP_NIAGARA3:
+ case SUN4V_CHIP_NIAGARA4:
+ case SUN4V_CHIP_NIAGARA5:
++ case SUN4V_CHIP_SPARC_M6:
++ case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC64X:
+ rover_inc_table = niagara_iterate_method;
+ break;
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index dff60ab..f87a55d 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -1200,14 +1200,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ ds_cfg.tx_irq = vdev->tx_irq;
+ ds_cfg.rx_irq = vdev->rx_irq;
+
+- lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
++ lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out_free_ds_states;
+ }
+ dp->lp = lp;
+
+- err = ldc_bind(lp, "DS");
++ err = ldc_bind(lp);
+ if (err)
+ goto out_free_ldc;
+
+diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
+index b2c2c5b..d668ca14 100644
+--- a/arch/sparc/kernel/dtlb_prot.S
++++ b/arch/sparc/kernel/dtlb_prot.S
+@@ -24,11 +24,11 @@
+ mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
+
+ /* PROT ** ICACHE line 2: More real fault processing */
++ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+ bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
+- ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+- ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
+ mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+- nop
++ ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
++ nop
+ nop
+ nop
+ nop
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index 140966f..c88ffb9 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -66,13 +66,10 @@ struct pause_patch_entry {
+ extern struct pause_patch_entry __pause_3insn_patch,
+ __pause_3insn_patch_end;
+
+-extern void __init per_cpu_patch(void);
+ extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+ struct sun4v_1insn_patch_entry *);
+ extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ struct sun4v_2insn_patch_entry *);
+-extern void __init sun4v_patch(void);
+-extern void __init boot_cpu_id_too_large(int cpu);
+ extern unsigned int dcache_parity_tl1_occurred;
+ extern unsigned int icache_parity_tl1_occurred;
+
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 452f04f..3d61fca 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -427,6 +427,12 @@ sun4v_chip_type:
+ cmp %g2, '5'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_NIAGARA5, %g4
++ cmp %g2, '6'
++ be,pt %xcc, 5f
++ mov SUN4V_CHIP_SPARC_M6, %g4
++ cmp %g2, '7'
++ be,pt %xcc, 5f
++ mov SUN4V_CHIP_SPARC_M7, %g4
+ ba,pt %xcc, 49f
+ nop
+
+@@ -585,6 +591,12 @@ niagara_tlb_fixup:
+ cmp %g1, SUN4V_CHIP_NIAGARA5
+ be,pt %xcc, niagara4_patch
+ nop
++ cmp %g1, SUN4V_CHIP_SPARC_M6
++ be,pt %xcc, niagara4_patch
++ nop
++ cmp %g1, SUN4V_CHIP_SPARC_M7
++ be,pt %xcc, niagara4_patch
++ nop
+
+ call generic_patch_copyops
+ nop
+@@ -660,14 +672,12 @@ tlb_fixup_done:
+ sethi %hi(init_thread_union), %g6
+ or %g6, %lo(init_thread_union), %g6
+ ldx [%g6 + TI_TASK], %g4
+- mov %sp, %l6
+
+ wr %g0, ASI_P, %asi
+ mov 1, %g1
+ sllx %g1, THREAD_SHIFT, %g1
+ sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ add %g6, %g1, %sp
+- mov 0, %fp
+
+ /* Set per-cpu pointer initially to zero, this makes
+ * the boot-cpu use the in-kernel-image per-cpu areas
+@@ -694,44 +704,14 @@ tlb_fixup_done:
+ nop
+ #endif
+
+- mov %l6, %o1 ! OpenPROM stack
+ call prom_init
+ mov %l7, %o0 ! OpenPROM cif handler
+
+- /* Initialize current_thread_info()->cpu as early as possible.
+- * In order to do that accurately we have to patch up the get_cpuid()
+- * assembler sequences. And that, in turn, requires that we know
+- * if we are on a Starfire box or not. While we're here, patch up
+- * the sun4v sequences as well.
++ /* To create a one-register-window buffer between the kernel's
++ * initial stack and the last stack frame we use from the firmware,
++ * do the rest of the boot from a C helper function.
+ */
+- call check_if_starfire
+- nop
+- call per_cpu_patch
+- nop
+- call sun4v_patch
+- nop
+-
+-#ifdef CONFIG_SMP
+- call hard_smp_processor_id
+- nop
+- cmp %o0, NR_CPUS
+- blu,pt %xcc, 1f
+- nop
+- call boot_cpu_id_too_large
+- nop
+- /* Not reached... */
+-
+-1:
+-#else
+- mov 0, %o0
+-#endif
+- sth %o0, [%g6 + TI_CPU]
+-
+- call prom_init_report
+- nop
+-
+- /* Off we go.... */
+- call start_kernel
++ call start_early_boot
+ nop
+ /* Not reached... */
+
+diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
+index c0a2de0..5c55145 100644
+--- a/arch/sparc/kernel/hvapi.c
++++ b/arch/sparc/kernel/hvapi.c
+@@ -46,6 +46,7 @@ static struct api_info api_table[] = {
+ { .group = HV_GRP_VF_CPU, },
+ { .group = HV_GRP_KT_CPU, },
+ { .group = HV_GRP_VT_CPU, },
++ { .group = HV_GRP_T5_CPU, },
+ { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
+ };
+
+diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
+index f3ab509..caedf83 100644
+--- a/arch/sparc/kernel/hvcalls.S
++++ b/arch/sparc/kernel/hvcalls.S
+@@ -821,3 +821,19 @@ ENTRY(sun4v_vt_set_perfreg)
+ retl
+ nop
+ ENDPROC(sun4v_vt_set_perfreg)
++
++ENTRY(sun4v_t5_get_perfreg)
++ mov %o1, %o4
++ mov HV_FAST_T5_GET_PERFREG, %o5
++ ta HV_FAST_TRAP
++ stx %o1, [%o4]
++ retl
++ nop
++ENDPROC(sun4v_t5_get_perfreg)
++
++ENTRY(sun4v_t5_set_perfreg)
++ mov HV_FAST_T5_SET_PERFREG, %o5
++ ta HV_FAST_TRAP
++ retl
++ nop
++ENDPROC(sun4v_t5_set_perfreg)
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
+index b7ddcdd..cdbfec2 100644
+--- a/arch/sparc/kernel/hvtramp.S
++++ b/arch/sparc/kernel/hvtramp.S
+@@ -109,7 +109,6 @@ hv_cpu_startup:
+ sllx %g5, THREAD_SHIFT, %g5
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ add %g6, %g5, %sp
+- mov 0, %fp
+
+ call init_irqwork_curcpu
+ nop
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index e7e215d..c2d81ad 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -278,7 +278,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
+ }
+
+ order = get_order(len_total);
+- if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
++ va = __get_free_pages(gfp, order);
++ if (va == 0)
+ goto err_nopages;
+
+ if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
+@@ -443,7 +444,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
+ }
+
+ order = get_order(len_total);
+- va = (void *) __get_free_pages(GFP_KERNEL, order);
++ va = (void *) __get_free_pages(gfp, order);
+ if (va == NULL) {
+ printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
+ goto err_nopages;
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index 666193f..4033c23 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -47,8 +47,6 @@
+ #include "cpumap.h"
+ #include "kstack.h"
+
+-#define NUM_IVECS (IMAP_INR + 1)
+-
+ struct ino_bucket *ivector_table;
+ unsigned long ivector_table_pa;
+
+@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
+
+ #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
+
+-static struct {
+- unsigned int dev_handle;
+- unsigned int dev_ino;
+- unsigned int in_use;
+-} irq_table[NR_IRQS];
+-static DEFINE_SPINLOCK(irq_alloc_lock);
++static unsigned long hvirq_major __initdata;
++static int __init early_hvirq_major(char *p)
++{
++ int rc = kstrtoul(p, 10, &hvirq_major);
++
++ return rc;
++}
++early_param("hvirq", early_hvirq_major);
++
++static int hv_irq_version;
++
++/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
++ * based interfaces, but:
++ *
++ * 1) Several OSs, Solaris and Linux included, use them even when only
++ * negotiating version 1.0 (or failing to negotiate at all). So the
++ * hypervisor has a workaround that provides the VIRQ interfaces even
++ * when only verion 1.0 of the API is in use.
++ *
++ * 2) Second, and more importantly, with major version 2.0 these VIRQ
++ * interfaces only were actually hooked up for LDC interrupts, even
++ * though the Hypervisor specification clearly stated:
++ *
++ * The new interrupt API functions will be available to a guest
++ * when it negotiates version 2.0 in the interrupt API group 0x2. When
++ * a guest negotiates version 2.0, all interrupt sources will only
++ * support using the cookie interface, and any attempt to use the
++ * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
++ * ENOTSUPPORTED error being returned.
++ *
++ * with an emphasis on "all interrupt sources".
++ *
++ * To correct this, major version 3.0 was created which does actually
++ * support VIRQs for all interrupt sources (not just LDC devices). So
++ * if we want to move completely over the cookie based VIRQs we must
++ * negotiate major version 3.0 or later of HV_GRP_INTR.
++ */
++static bool sun4v_cookie_only_virqs(void)
++{
++ if (hv_irq_version >= 3)
++ return true;
++ return false;
++}
+
+-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
++static void __init irq_init_hv(void)
+ {
+- unsigned long flags;
+- unsigned char ent;
++ unsigned long hv_error, major, minor = 0;
++
++ if (tlb_type != hypervisor)
++ return;
+
+- BUILD_BUG_ON(NR_IRQS >= 256);
++ if (hvirq_major)
++ major = hvirq_major;
++ else
++ major = 3;
+
+- spin_lock_irqsave(&irq_alloc_lock, flags);
++ hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
++ if (!hv_error)
++ hv_irq_version = major;
++ else
++ hv_irq_version = 1;
+
+- for (ent = 1; ent < NR_IRQS; ent++) {
+- if (!irq_table[ent].in_use)
++ pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
++ hv_irq_version,
++ sun4v_cookie_only_virqs() ? "enabled" : "disabled");
++}
++
++/* This function is for the timer interrupt.*/
++int __init arch_probe_nr_irqs(void)
++{
++ return 1;
++}
++
++#define DEFAULT_NUM_IVECS (0xfffU)
++static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
++#define NUM_IVECS (nr_ivec)
++
++static unsigned int __init size_nr_ivec(void)
++{
++ if (tlb_type == hypervisor) {
++ switch (sun4v_chip_type) {
++ /* Athena's devhandle|devino is large.*/
++ case SUN4V_CHIP_SPARC64X:
++ nr_ivec = 0xffff;
+ break;
++ }
+ }
+- if (ent >= NR_IRQS) {
+- printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
+- ent = 0;
+- } else {
+- irq_table[ent].dev_handle = dev_handle;
+- irq_table[ent].dev_ino = dev_ino;
+- irq_table[ent].in_use = 1;
+- }
++ return nr_ivec;
++}
++
++struct irq_handler_data {
++ union {
++ struct {
++ unsigned int dev_handle;
++ unsigned int dev_ino;
++ };
++ unsigned long sysino;
++ };
++ struct ino_bucket bucket;
++ unsigned long iclr;
++ unsigned long imap;
++};
++
++static inline unsigned int irq_data_to_handle(struct irq_data *data)
++{
++ struct irq_handler_data *ihd = data->handler_data;
++
++ return ihd->dev_handle;
++}
++
++static inline unsigned int irq_data_to_ino(struct irq_data *data)
++{
++ struct irq_handler_data *ihd = data->handler_data;
+
+- spin_unlock_irqrestore(&irq_alloc_lock, flags);
++ return ihd->dev_ino;
++}
++
++static inline unsigned long irq_data_to_sysino(struct irq_data *data)
++{
++ struct irq_handler_data *ihd = data->handler_data;
+
+- return ent;
++ return ihd->sysino;
+ }
+
+-#ifdef CONFIG_PCI_MSI
+ void irq_free(unsigned int irq)
+ {
+- unsigned long flags;
++ void *data = irq_get_handler_data(irq);
+
+- if (irq >= NR_IRQS)
+- return;
++ kfree(data);
++ irq_set_handler_data(irq, NULL);
++ irq_free_descs(irq, 1);
++}
+
+- spin_lock_irqsave(&irq_alloc_lock, flags);
++unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
++{
++ int irq;
+
+- irq_table[irq].in_use = 0;
++ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
++ if (irq <= 0)
++ goto out;
+
+- spin_unlock_irqrestore(&irq_alloc_lock, flags);
++ return irq;
++out:
++ return 0;
++}
++
++static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
++{
++ unsigned long hv_err, cookie;
++ struct ino_bucket *bucket;
++ unsigned int irq = 0U;
++
++ hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
++ if (hv_err) {
++ pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
++ goto out;
++ }
++
++ if (cookie & ((1UL << 63UL))) {
++ cookie = ~cookie;
++ bucket = (struct ino_bucket *) __va(cookie);
++ irq = bucket->__irq;
++ }
++out:
++ return irq;
++}
++
++static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
++{
++ unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
++ struct ino_bucket *bucket;
++ unsigned int irq;
++
++ bucket = &ivector_table[sysino];
++ irq = bucket_get_irq(__pa(bucket));
++
++ return irq;
++}
++
++void ack_bad_irq(unsigned int irq)
++{
++ pr_crit("BAD IRQ ack %d\n", irq);
++}
++
++void irq_install_pre_handler(int irq,
++ void (*func)(unsigned int, void *, void *),
++ void *arg1, void *arg2)
++{
++ pr_warn("IRQ pre handler NOT supported.\n");
+ }
+-#endif
+
+ /*
+ * /proc/interrupts printing:
+@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
+ return tid;
+ }
+
+-struct irq_handler_data {
+- unsigned long iclr;
+- unsigned long imap;
+-
+- void (*pre_handler)(unsigned int, void *, void *);
+- void *arg1;
+- void *arg2;
+-};
+-
+ #ifdef CONFIG_SMP
+ static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
+ {
+@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data)
+
+ static void sun4v_irq_enable(struct irq_data *data)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_settarget(ino, cpuid);
+@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data)
+ static int sun4v_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, mask);
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_settarget(ino, cpuid);
+@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data,
+
+ static void sun4v_irq_disable(struct irq_data *data)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
+@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data)
+
+ static void sun4v_irq_eoi(struct irq_data *data)
+ {
+- unsigned int ino = irq_table[data->irq].dev_ino;
++ unsigned int ino = irq_data_to_sysino(data);
+ int err;
+
+ err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data)
+
+ static void sun4v_virq_enable(struct irq_data *data)
+ {
+- unsigned long cpuid, dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
++ unsigned long cpuid;
+ int err;
+
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+-
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data)
+ static int sun4v_virt_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+ {
+- unsigned long cpuid, dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
++ unsigned long cpuid;
+ int err;
+
+ cpuid = irq_choose_cpu(data->irq, mask);
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+-
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
+
+ static void sun4v_virq_disable(struct irq_data *data)
+ {
+- unsigned long dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
+ int err;
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+ HV_INTR_DISABLED);
+@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data)
+
+ static void sun4v_virq_eoi(struct irq_data *data)
+ {
+- unsigned long dev_handle, dev_ino;
++ unsigned long dev_handle = irq_data_to_handle(data);
++ unsigned long dev_ino = irq_data_to_ino(data);
+ int err;
+
+- dev_handle = irq_table[data->irq].dev_handle;
+- dev_ino = irq_table[data->irq].dev_ino;
+-
+ err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = {
+ .flags = IRQCHIP_EOI_IF_HANDLED,
+ };
+
+-static void pre_flow_handler(struct irq_data *d)
+-{
+- struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
+- unsigned int ino = irq_table[d->irq].dev_ino;
+-
+- handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
+-}
+-
+-void irq_install_pre_handler(int irq,
+- void (*func)(unsigned int, void *, void *),
+- void *arg1, void *arg2)
+-{
+- struct irq_handler_data *handler_data = irq_get_handler_data(irq);
+-
+- handler_data->pre_handler = func;
+- handler_data->arg1 = arg1;
+- handler_data->arg2 = arg2;
+-
+- __irq_set_preflow_handler(irq, pre_flow_handler);
+-}
+-
+ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
+ {
+- struct ino_bucket *bucket;
+ struct irq_handler_data *handler_data;
++ struct ino_bucket *bucket;
+ unsigned int irq;
+ int ino;
+
+@@ -537,119 +641,166 @@ out:
+ return irq;
+ }
+
+-static unsigned int sun4v_build_common(unsigned long sysino,
+- struct irq_chip *chip)
++static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
++ void (*handler_data_init)(struct irq_handler_data *data,
++ u32 devhandle, unsigned int devino),
++ struct irq_chip *chip)
+ {
+- struct ino_bucket *bucket;
+- struct irq_handler_data *handler_data;
++ struct irq_handler_data *data;
+ unsigned int irq;
+
+- BUG_ON(tlb_type != hypervisor);
++ irq = irq_alloc(devhandle, devino);
++ if (!irq)
++ goto out;
+
+- bucket = &ivector_table[sysino];
+- irq = bucket_get_irq(__pa(bucket));
+- if (!irq) {
+- irq = irq_alloc(0, sysino);
+- bucket_set_irq(__pa(bucket), irq);
+- irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
+- "IVEC");
++ data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
++ if (unlikely(!data)) {
++ pr_err("IRQ handler data allocation failed.\n");
++ irq_free(irq);
++ irq = 0;
++ goto out;
+ }
+
+- handler_data = irq_get_handler_data(irq);
+- if (unlikely(handler_data))
+- goto out;
++ irq_set_handler_data(irq, data);
++ handler_data_init(data, devhandle, devino);
++ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
++ data->imap = ~0UL;
++ data->iclr = ~0UL;
++out:
++ return irq;
++}
+
+- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+- if (unlikely(!handler_data)) {
+- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
+- prom_halt();
+- }
+- irq_set_handler_data(irq, handler_data);
++static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
++ unsigned int devino)
++{
++ struct irq_handler_data *ihd = irq_get_handler_data(irq);
++ unsigned long hv_error, cookie;
+
+- /* Catch accidental accesses to these things. IMAP/ICLR handling
+- * is done by hypervisor calls on sun4v platforms, not by direct
+- * register accesses.
++ /* handler_irq needs to find the irq. cookie is seen signed in
++ * sun4v_dev_mondo and treated as a non ivector_table delivery.
+ */
+- handler_data->imap = ~0UL;
+- handler_data->iclr = ~0UL;
++ ihd->bucket.__irq = irq;
++ cookie = ~__pa(&ihd->bucket);
+
+-out:
+- return irq;
++ hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
++ if (hv_error)
++ pr_err("HV vintr set cookie failed = %ld\n", hv_error);
++
++ return hv_error;
+ }
+
+-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
++static void cookie_handler_data(struct irq_handler_data *data,
++ u32 devhandle, unsigned int devino)
+ {
+- unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
++ data->dev_handle = devhandle;
++ data->dev_ino = devino;
++}
+
+- return sun4v_build_common(sysino, &sun4v_irq);
++static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
++ struct irq_chip *chip)
++{
++ unsigned long hv_error;
++ unsigned int irq;
++
++ irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
++
++ hv_error = cookie_assign(irq, devhandle, devino);
++ if (hv_error) {
++ irq_free(irq);
++ irq = 0;
++ }
++
++ return irq;
+ }
+
+-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
++static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
+ {
+- struct irq_handler_data *handler_data;
+- unsigned long hv_err, cookie;
+- struct ino_bucket *bucket;
+ unsigned int irq;
+
+- bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
+- if (unlikely(!bucket))
+- return 0;
++ irq = cookie_exists(devhandle, devino);
++ if (irq)
++ goto out;
+
+- /* The only reference we store to the IRQ bucket is
+- * by physical address which kmemleak can't see, tell
+- * it that this object explicitly is not a leak and
+- * should be scanned.
+- */
+- kmemleak_not_leak(bucket);
++ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
+
+- __flush_dcache_range((unsigned long) bucket,
+- ((unsigned long) bucket +
+- sizeof(struct ino_bucket)));
++out:
++ return irq;
++}
+
+- irq = irq_alloc(devhandle, devino);
++static void sysino_set_bucket(unsigned int irq)
++{
++ struct irq_handler_data *ihd = irq_get_handler_data(irq);
++ struct ino_bucket *bucket;
++ unsigned long sysino;
++
++ sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
++ BUG_ON(sysino >= nr_ivec);
++ bucket = &ivector_table[sysino];
+ bucket_set_irq(__pa(bucket), irq);
++}
+
+- irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
+- "IVEC");
++static void sysino_handler_data(struct irq_handler_data *data,
++ u32 devhandle, unsigned int devino)
++{
++ unsigned long sysino;
+
+- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+- if (unlikely(!handler_data))
+- return 0;
++ sysino = sun4v_devino_to_sysino(devhandle, devino);
++ data->sysino = sysino;
++}
+
+- /* In order to make the LDC channel startup sequence easier,
+- * especially wrt. locking, we do not let request_irq() enable
+- * the interrupt.
+- */
+- irq_set_status_flags(irq, IRQ_NOAUTOEN);
+- irq_set_handler_data(irq, handler_data);
++static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
++ struct irq_chip *chip)
++{
++ unsigned int irq;
+
+- /* Catch accidental accesses to these things. IMAP/ICLR handling
+- * is done by hypervisor calls on sun4v platforms, not by direct
+- * register accesses.
+- */
+- handler_data->imap = ~0UL;
+- handler_data->iclr = ~0UL;
++ irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
++ if (!irq)
++ goto out;
+
+- cookie = ~__pa(bucket);
+- hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
+- if (hv_err) {
+- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
+- "err=%lu\n", devhandle, devino, hv_err);
+- prom_halt();
+- }
++ sysino_set_bucket(irq);
++out:
++ return irq;
++}
+
++static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
++{
++ int irq;
++
++ irq = sysino_exists(devhandle, devino);
++ if (irq)
++ goto out;
++
++ irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
++out:
+ return irq;
+ }
+
+-void ack_bad_irq(unsigned int irq)
++unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+ {
+- unsigned int ino = irq_table[irq].dev_ino;
++ unsigned int irq;
+
+- if (!ino)
+- ino = 0xdeadbeef;
++ if (sun4v_cookie_only_virqs())
++ irq = sun4v_build_cookie(devhandle, devino);
++ else
++ irq = sun4v_build_sysino(devhandle, devino);
+
+- printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
+- ino, irq);
++ return irq;
++}
++
++unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
++{
++ int irq;
++
++ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
++ if (!irq)
++ goto out;
++
++ /* This is borrowed from the original function.
++ */
++ irq_set_status_flags(irq, IRQ_NOAUTOEN);
++
++out:
++ return irq;
+ }
+
+ void *hardirq_stack[NR_CPUS];
+@@ -720,9 +871,12 @@ void fixup_irqs(void)
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct irq_desc *desc = irq_to_desc(irq);
+- struct irq_data *data = irq_desc_get_irq_data(desc);
++ struct irq_data *data;
+ unsigned long flags;
+
++ if (!desc)
++ continue;
++ data = irq_desc_get_irq_data(desc);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->action && !irqd_is_per_cpu(data)) {
+ if (data->chip->irq_set_affinity)
+@@ -922,16 +1076,22 @@ static struct irqaction timer_irq_action = {
+ .name = "timer",
+ };
+
+-/* Only invoked on boot processor. */
+-void __init init_IRQ(void)
++static void __init irq_ivector_init(void)
+ {
+- unsigned long size;
++ unsigned long size, order;
++ unsigned int ivecs;
+
+- map_prom_timers();
+- kill_prom_timer();
++ /* If we are doing cookie only VIRQs then we do not need the ivector
++ * table to process interrupts.
++ */
++ if (sun4v_cookie_only_virqs())
++ return;
+
+- size = sizeof(struct ino_bucket) * NUM_IVECS;
+- ivector_table = kzalloc(size, GFP_KERNEL);
++ ivecs = size_nr_ivec();
++ size = sizeof(struct ino_bucket) * ivecs;
++ order = get_order(size);
++ ivector_table = (struct ino_bucket *)
++ __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!ivector_table) {
+ prom_printf("Fatal error, cannot allocate ivector_table\n");
+ prom_halt();
+@@ -940,6 +1100,15 @@ void __init init_IRQ(void)
+ ((unsigned long) ivector_table) + size);
+
+ ivector_table_pa = __pa(ivector_table);
++}
++
++/* Only invoked on boot processor.*/
++void __init init_IRQ(void)
++{
++ irq_init_hv();
++ irq_ivector_init();
++ map_prom_timers();
++ kill_prom_timer();
+
+ if (tlb_type == hypervisor)
+ sun4v_init_mondo_queues();
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
+index 605d492..ef0d8e9 100644
+--- a/arch/sparc/kernel/ktlb.S
++++ b/arch/sparc/kernel/ktlb.S
+@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr:
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
+
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+-
+- /* Load and check PTE. */
+- ldxa [%g5] ASI_PHYS_USE_EC, %g5
+- mov 1, %g7
+- sllx %g7, TSB_TAG_INVALID_BIT, %g7
+- brgez,a,pn %g5, kvmap_itlb_longpath
+- TSB_STORE(%g1, %g7)
+-
+ TSB_WRITE(%g1, %g5, %g6)
+
+ /* fallthrough to TLB load */
+@@ -118,6 +110,12 @@ kvmap_dtlb_obp:
+ ba,pt %xcc, kvmap_dtlb_load
+ nop
+
++kvmap_linear_early:
++ sethi %hi(kern_linear_pte_xor), %g7
++ ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
++ ba,pt %xcc, kvmap_dtlb_tsb4m_load
++ xor %g2, %g4, %g5
++
+ .align 32
+ kvmap_dtlb_tsb4m_load:
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+@@ -146,105 +144,17 @@ kvmap_dtlb_4v:
+ /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
+ KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+ #endif
+- /* TSB entry address left in %g1, lookup linear PTE.
+- * Must preserve %g1 and %g6 (TAG).
+- */
+-kvmap_dtlb_tsb4m_miss:
+- /* Clear the PAGE_OFFSET top virtual bits, shift
+- * down to get PFN, and make sure PFN is in range.
+- */
+-661: sllx %g4, 0, %g5
+- .section .page_offset_shift_patch, "ax"
+- .word 661b
+- .previous
+-
+- /* Check to see if we know about valid memory at the 4MB
+- * chunk this physical address will reside within.
++ /* Linear mapping TSB lookup failed. Fallthrough to kernel
++ * page table based lookup.
+ */
+-661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2
+- .section .page_offset_shift_patch, "ax"
+- .word 661b
+- .previous
+-
+- brnz,pn %g2, kvmap_dtlb_longpath
+- nop
+-
+- /* This unconditional branch and delay-slot nop gets patched
+- * by the sethi sequence once the bitmap is properly setup.
+- */
+- .globl valid_addr_bitmap_insn
+-valid_addr_bitmap_insn:
+- ba,pt %xcc, 2f
+- nop
+- .subsection 2
+- .globl valid_addr_bitmap_patch
+-valid_addr_bitmap_patch:
+- sethi %hi(sparc64_valid_addr_bitmap), %g7
+- or %g7, %lo(sparc64_valid_addr_bitmap), %g7
+- .previous
+-
+-661: srlx %g5, ILOG2_4MB, %g2
+- .section .page_offset_shift_patch, "ax"
+- .word 661b
+- .previous
+-
+- srlx %g2, 6, %g5
+- and %g2, 63, %g2
+- sllx %g5, 3, %g5
+- ldx [%g7 + %g5], %g5
+- mov 1, %g7
+- sllx %g7, %g2, %g7
+- andcc %g5, %g7, %g0
+- be,pn %xcc, kvmap_dtlb_longpath
+-
+-2: sethi %hi(kpte_linear_bitmap), %g2
+-
+- /* Get the 256MB physical address index. */
+-661: sllx %g4, 0, %g5
+- .section .page_offset_shift_patch, "ax"
+- .word 661b
+- .previous
+-
+- or %g2, %lo(kpte_linear_bitmap), %g2
+-
+-661: srlx %g5, ILOG2_256MB, %g5
+- .section .page_offset_shift_patch, "ax"
+- .word 661b
+- .previous
+-
+- and %g5, (32 - 1), %g7
+-
+- /* Divide by 32 to get the offset into the bitmask. */
+- srlx %g5, 5, %g5
+- add %g7, %g7, %g7
+- sllx %g5, 3, %g5
+-
+- /* kern_linear_pte_xor[(mask >> shift) & 3)] */
+- ldx [%g2 + %g5], %g2
+- srlx %g2, %g7, %g7
+- sethi %hi(kern_linear_pte_xor), %g5
+- and %g7, 3, %g7
+- or %g5, %lo(kern_linear_pte_xor), %g5
+- sllx %g7, 3, %g7
+- ldx [%g5 + %g7], %g2
+-
+ .globl kvmap_linear_patch
+ kvmap_linear_patch:
+- ba,pt %xcc, kvmap_dtlb_tsb4m_load
+- xor %g2, %g4, %g5
++ ba,a,pt %xcc, kvmap_linear_early
+
+ kvmap_dtlb_vmalloc_addr:
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
+
+ TSB_LOCK_TAG(%g1, %g2, %g7)
+-
+- /* Load and check PTE. */
+- ldxa [%g5] ASI_PHYS_USE_EC, %g5
+- mov 1, %g7
+- sllx %g7, TSB_TAG_INVALID_BIT, %g7
+- brgez,a,pn %g5, kvmap_dtlb_longpath
+- TSB_STORE(%g1, %g7)
+-
+ TSB_WRITE(%g1, %g5, %g6)
+
+ /* fallthrough to TLB load */
+@@ -276,13 +186,8 @@ kvmap_dtlb_load:
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ kvmap_vmemmap:
+- sub %g4, %g5, %g5
+- srlx %g5, ILOG2_4MB, %g5
+- sethi %hi(vmemmap_table), %g1
+- sllx %g5, 3, %g5
+- or %g1, %lo(vmemmap_table), %g1
+- ba,pt %xcc, kvmap_dtlb_load
+- ldx [%g1 + %g5], %g5
++ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
++ ba,a,pt %xcc, kvmap_dtlb_load
+ #endif
+
+ kvmap_dtlb_nonlinear:
+@@ -294,8 +199,8 @@ kvmap_dtlb_nonlinear:
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* Do not use the TSB for vmemmap. */
+- mov (VMEMMAP_BASE >> 40), %g5
+- sllx %g5, 40, %g5
++ sethi %hi(VMEMMAP_BASE), %g5
++ ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
+ cmp %g4,%g5
+ bgeu,pn %xcc, kvmap_vmemmap
+ nop
+@@ -307,8 +212,8 @@ kvmap_dtlb_tsbmiss:
+ sethi %hi(MODULES_VADDR), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kvmap_dtlb_longpath
+- mov (VMALLOC_END >> 40), %g5
+- sllx %g5, 40, %g5
++ sethi %hi(VMALLOC_END), %g5
++ ldx [%g5 + %lo(VMALLOC_END)], %g5
+ cmp %g4, %g5
+ bgeu,pn %xcc, kvmap_dtlb_longpath
+ nop
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index 66dacd5..27bb554 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1078,7 +1078,8 @@ static void ldc_iommu_release(struct ldc_channel *lp)
+
+ struct ldc_channel *ldc_alloc(unsigned long id,
+ const struct ldc_channel_config *cfgp,
+- void *event_arg)
++ void *event_arg,
++ const char *name)
+ {
+ struct ldc_channel *lp;
+ const struct ldc_mode_ops *mops;
+@@ -1093,6 +1094,8 @@ struct ldc_channel *ldc_alloc(unsigned long id,
+ err = -EINVAL;
+ if (!cfgp)
+ goto out_err;
++ if (!name)
++ goto out_err;
+
+ switch (cfgp->mode) {
+ case LDC_MODE_RAW:
+@@ -1185,6 +1188,21 @@ struct ldc_channel *ldc_alloc(unsigned long id,
+
+ INIT_HLIST_HEAD(&lp->mh_list);
+
++ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
++ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
++
++ err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
++ lp->rx_irq_name, lp);
++ if (err)
++ goto out_free_txq;
++
++ err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
++ lp->tx_irq_name, lp);
++ if (err) {
++ free_irq(lp->cfg.rx_irq, lp);
++ goto out_free_txq;
++ }
++
+ return lp;
+
+ out_free_txq:
+@@ -1237,31 +1255,14 @@ EXPORT_SYMBOL(ldc_free);
+ * state. This does not initiate a handshake, ldc_connect() does
+ * that.
+ */
+-int ldc_bind(struct ldc_channel *lp, const char *name)
++int ldc_bind(struct ldc_channel *lp)
+ {
+ unsigned long hv_err, flags;
+ int err = -EINVAL;
+
+- if (!name ||
+- (lp->state != LDC_STATE_INIT))
++ if (lp->state != LDC_STATE_INIT)
+ return -EINVAL;
+
+- snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
+- snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
+-
+- err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
+- lp->rx_irq_name, lp);
+- if (err)
+- return err;
+-
+- err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
+- lp->tx_irq_name, lp);
+- if (err) {
+- free_irq(lp->cfg.rx_irq, lp);
+- return err;
+- }
+-
+-
+ spin_lock_irqsave(&lp->lock, flags);
+
+ enable_irq(lp->cfg.rx_irq);
+diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
+index 6479256..fce8ab1 100644
+--- a/arch/sparc/kernel/nmi.c
++++ b/arch/sparc/kernel/nmi.c
+@@ -141,7 +141,6 @@ static inline unsigned int get_nmi_count(int cpu)
+
+ static __init void nmi_cpu_busy(void *data)
+ {
+- local_irq_enable_in_hardirq();
+ while (endflag == 0)
+ mb();
+ }
+diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
+index 269af58..7e967c8 100644
+--- a/arch/sparc/kernel/pcr.c
++++ b/arch/sparc/kernel/pcr.c
+@@ -191,12 +191,41 @@ static const struct pcr_ops n4_pcr_ops = {
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+ };
+
++static u64 n5_pcr_read(unsigned long reg_num)
++{
++ unsigned long val;
++
++ (void) sun4v_t5_get_perfreg(reg_num, &val);
++
++ return val;
++}
++
++static void n5_pcr_write(unsigned long reg_num, u64 val)
++{
++ (void) sun4v_t5_set_perfreg(reg_num, val);
++}
++
++static const struct pcr_ops n5_pcr_ops = {
++ .read_pcr = n5_pcr_read,
++ .write_pcr = n5_pcr_write,
++ .read_pic = n4_pic_read,
++ .write_pic = n4_pic_write,
++ .nmi_picl_value = n4_picl_value,
++ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
++ PCR_N4_UTRACE | PCR_N4_TOE |
++ (26 << PCR_N4_SL_SHIFT)),
++ .pcr_nmi_disable = PCR_N4_PICNPT,
++};
++
++
+ static unsigned long perf_hsvc_group;
+ static unsigned long perf_hsvc_major;
+ static unsigned long perf_hsvc_minor;
+
+ static int __init register_perf_hsvc(void)
+ {
++ unsigned long hverror;
++
+ if (tlb_type == hypervisor) {
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+@@ -215,6 +244,10 @@ static int __init register_perf_hsvc(void)
+ perf_hsvc_group = HV_GRP_VT_CPU;
+ break;
+
++ case SUN4V_CHIP_NIAGARA5:
++ perf_hsvc_group = HV_GRP_T5_CPU;
++ break;
++
+ default:
+ return -ENODEV;
+ }
+@@ -222,10 +255,12 @@ static int __init register_perf_hsvc(void)
+
+ perf_hsvc_major = 1;
+ perf_hsvc_minor = 0;
+- if (sun4v_hvapi_register(perf_hsvc_group,
+- perf_hsvc_major,
+- &perf_hsvc_minor)) {
+- printk("perfmon: Could not register hvapi.\n");
++ hverror = sun4v_hvapi_register(perf_hsvc_group,
++ perf_hsvc_major,
++ &perf_hsvc_minor);
++ if (hverror) {
++ pr_err("perfmon: Could not register hvapi(0x%lx).\n",
++ hverror);
+ return -ENODEV;
+ }
+ }
+@@ -254,6 +289,10 @@ static int __init setup_sun4v_pcr_ops(void)
+ pcr_ops = &n4_pcr_ops;
+ break;
+
++ case SUN4V_CHIP_NIAGARA5:
++ pcr_ops = &n5_pcr_ops;
++ break;
++
+ default:
+ ret = -ENODEV;
+ break;
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index b5c38fa..617b9fe 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -1662,7 +1662,8 @@ static bool __init supported_pmu(void)
+ sparc_pmu = &niagara2_pmu;
+ return true;
+ }
+- if (!strcmp(sparc_pmu_type, "niagara4")) {
++ if (!strcmp(sparc_pmu_type, "niagara4") ||
++ !strcmp(sparc_pmu_type, "niagara5")) {
+ sparc_pmu = &niagara4_pmu;
+ return true;
+ }
+@@ -1671,9 +1672,12 @@ static bool __init supported_pmu(void)
+
+ int __init init_hw_perf_events(void)
+ {
++ int err;
++
+ pr_info("Performance events: ");
+
+- if (!supported_pmu()) {
++ err = pcr_arch_init();
++ if (err || !supported_pmu()) {
+ pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
+ return 0;
+ }
+@@ -1685,7 +1689,7 @@ int __init init_hw_perf_events(void)
+
+ return 0;
+ }
+-early_initcall(init_hw_perf_events);
++pure_initcall(init_hw_perf_events);
+
+ void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index d7b4967..c6f7113 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -306,6 +306,9 @@ static void __global_pmu_self(int this_cpu)
+ struct global_pmu_snapshot *pp;
+ int i, num;
+
++ if (!pcr_ops)
++ return;
++
+ pp = &global_cpu_snapshot[this_cpu].pmu;
+
+ num = 1;
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 3fdb455..61a5198 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/cpu.h>
+ #include <linux/initrd.h>
+ #include <linux/module.h>
++#include <linux/start_kernel.h>
+
+ #include <asm/io.h>
+ #include <asm/processor.h>
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
+
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+
+-void __init per_cpu_patch(void)
++static void __init per_cpu_patch(void)
+ {
+ struct cpuid_patch_entry *p;
+ unsigned long ver;
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ }
+ }
+
+-void __init sun4v_patch(void)
++static void __init sun4v_patch(void)
+ {
+ extern void sun4v_hvapi_init(void);
+
+@@ -335,14 +336,25 @@ static void __init pause_patch(void)
+ }
+ }
+
+-#ifdef CONFIG_SMP
+-void __init boot_cpu_id_too_large(int cpu)
++void __init start_early_boot(void)
+ {
+- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+- cpu, NR_CPUS);
+- prom_halt();
++ int cpu;
++
++ check_if_starfire();
++ per_cpu_patch();
++ sun4v_patch();
++
++ cpu = hard_smp_processor_id();
++ if (cpu >= NR_CPUS) {
++ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
++ cpu, NR_CPUS);
++ prom_halt();
++ }
++ current_thread_info()->cpu = cpu;
++
++ prom_init_report();
++ start_kernel();
+ }
+-#endif
+
+ /* On Ultra, we support all of the v8 capabilities. */
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
+@@ -500,12 +512,16 @@ static void __init init_sparc64_elf_hwcap(void)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= HWCAP_SPARC_BLKINIT;
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= HWCAP_SPARC_N2;
+ }
+@@ -533,6 +549,8 @@ static void __init init_sparc64_elf_hwcap(void)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
+ AV_SPARC_ASI_BLK_INIT |
+@@ -540,6 +558,8 @@ static void __init init_sparc64_elf_hwcap(void)
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
++ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
+ cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
+ AV_SPARC_FMAF);
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 8416d7f..50c3dd03 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1395,7 +1395,6 @@ void __cpu_die(unsigned int cpu)
+
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+- pcr_arch_init();
+ }
+
+ void smp_send_reschedule(int cpu)
+@@ -1480,6 +1479,13 @@ static void __init pcpu_populate_pte(unsigned long addr)
+ pud_t *pud;
+ pmd_t *pmd;
+
++ if (pgd_none(*pgd)) {
++ pud_t *new;
++
++ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
++ pgd_populate(&init_mm, pgd, new);
++ }
++
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
+index e0c09bf8..6179e19 100644
+--- a/arch/sparc/kernel/sun4v_tlb_miss.S
++++ b/arch/sparc/kernel/sun4v_tlb_miss.S
+@@ -195,6 +195,11 @@ sun4v_tsb_miss_common:
+ ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
+
+ sun4v_itlb_error:
++ rdpr %tl, %g1
++ cmp %g1, 1
++ ble,pt %icc, sun4v_bad_ra
++ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
++
+ sethi %hi(sun4v_err_itlb_vaddr), %g1
+ stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
+ sethi %hi(sun4v_err_itlb_ctx), %g1
+@@ -206,15 +211,10 @@ sun4v_itlb_error:
+ sethi %hi(sun4v_err_itlb_error), %g1
+ stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
+
++ sethi %hi(1f), %g7
+ rdpr %tl, %g4
+- cmp %g4, 1
+- ble,pt %icc, 1f
+- sethi %hi(2f), %g7
+ ba,pt %xcc, etraptl1
+- or %g7, %lo(2f), %g7
+-
+-1: ba,pt %xcc, etrap
+-2: or %g7, %lo(2b), %g7
++1: or %g7, %lo(1f), %g7
+ mov %l4, %o1
+ call sun4v_itlb_error_report
+ add %sp, PTREGS_OFF, %o0
+@@ -222,6 +222,11 @@ sun4v_itlb_error:
+ /* NOTREACHED */
+
+ sun4v_dtlb_error:
++ rdpr %tl, %g1
++ cmp %g1, 1
++ ble,pt %icc, sun4v_bad_ra
++ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
++
+ sethi %hi(sun4v_err_dtlb_vaddr), %g1
+ stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
+ sethi %hi(sun4v_err_dtlb_ctx), %g1
+@@ -233,21 +238,23 @@ sun4v_dtlb_error:
+ sethi %hi(sun4v_err_dtlb_error), %g1
+ stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
+
++ sethi %hi(1f), %g7
+ rdpr %tl, %g4
+- cmp %g4, 1
+- ble,pt %icc, 1f
+- sethi %hi(2f), %g7
+ ba,pt %xcc, etraptl1
+- or %g7, %lo(2f), %g7
+-
+-1: ba,pt %xcc, etrap
+-2: or %g7, %lo(2b), %g7
++1: or %g7, %lo(1f), %g7
+ mov %l4, %o1
+ call sun4v_dtlb_error_report
+ add %sp, PTREGS_OFF, %o0
+
+ /* NOTREACHED */
+
++sun4v_bad_ra:
++ or %g0, %g4, %g5
++ ba,pt %xcc, sparc64_realfault_common
++ or %g1, %g0, %g4
++
++ /* NOTREACHED */
++
+ /* Instruction Access Exception, tl0. */
+ sun4v_iacc:
+ ldxa [%g0] ASI_SCRATCHPAD, %g2
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index 737f8cb..88ede1d 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -109,10 +109,13 @@ startup_continue:
+ brnz,pn %g1, 1b
+ nop
+
+- sethi %hi(p1275buf), %g2
+- or %g2, %lo(p1275buf), %g2
+- ldx [%g2 + 0x10], %l2
+- add %l2, -(192 + 128), %sp
++ /* Get onto temporary stack which will be in the locked
++ * kernel image.
++ */
++ sethi %hi(tramp_stack), %g1
++ or %g1, %lo(tramp_stack), %g1
++ add %g1, TRAMP_STACK_SIZE, %g1
++ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
+ flushw
+
+ /* Setup the loop variables:
+@@ -394,7 +397,6 @@ after_lock_tlb:
+ sllx %g5, THREAD_SHIFT, %g5
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ add %g6, %g5, %sp
+- mov 0, %fp
+
+ rdpr %pstate, %o1
+ or %o1, PSTATE_IE, %o1
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 4ced92f..25d0c7e 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2102,6 +2102,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ atomic_inc(&sun4v_nonresum_oflow_cnt);
+ }
+
++static void sun4v_tlb_error(struct pt_regs *regs)
++{
++ die_if_kernel("TLB/TSB error", regs);
++}
++
+ unsigned long sun4v_err_itlb_vaddr;
+ unsigned long sun4v_err_itlb_ctx;
+ unsigned long sun4v_err_itlb_pte;
+@@ -2109,8 +2114,7 @@ unsigned long sun4v_err_itlb_error;
+
+ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ {
+- if (tl > 1)
+- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
++ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+@@ -2123,7 +2127,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
+ sun4v_err_itlb_pte, sun4v_err_itlb_error);
+
+- prom_halt();
++ sun4v_tlb_error(regs);
+ }
+
+ unsigned long sun4v_err_dtlb_vaddr;
+@@ -2133,8 +2137,7 @@ unsigned long sun4v_err_dtlb_error;
+
+ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+ {
+- if (tl > 1)
+- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
++ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+@@ -2147,7 +2150,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+ sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
+ sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
+
+- prom_halt();
++ sun4v_tlb_error(regs);
+ }
+
+ void hypervisor_tlbop_error(unsigned long err, unsigned long op)
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index 14158d4..be98685 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -162,10 +162,10 @@ tsb_miss_page_table_walk_sun4v_fastpath:
+ nop
+ .previous
+
+- rdpr %tl, %g3
+- cmp %g3, 1
++ rdpr %tl, %g7
++ cmp %g7, 1
+ bne,pn %xcc, winfix_trampoline
+- nop
++ mov %g3, %g4
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call hugetlb_setup
+diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
+index f8e7dd5..9c5fbd0 100644
+--- a/arch/sparc/kernel/viohs.c
++++ b/arch/sparc/kernel/viohs.c
+@@ -714,7 +714,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio,
+ cfg.tx_irq = vio->vdev->tx_irq;
+ cfg.rx_irq = vio->vdev->rx_irq;
+
+- lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
++ lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
+ if (IS_ERR(lp))
+ return PTR_ERR(lp);
+
+@@ -746,7 +746,7 @@ void vio_port_up(struct vio_driver_state *vio)
+
+ err = 0;
+ if (state == LDC_STATE_INIT) {
+- err = ldc_bind(vio->lp, vio->name);
++ err = ldc_bind(vio->lp);
+ if (err)
+ printk(KERN_WARNING "%s: Port %lu bind failed, "
+ "err=%d\n",
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index 932ff90..0924305 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -35,8 +35,9 @@ jiffies = jiffies_64;
+
+ SECTIONS
+ {
+- /* swapper_low_pmd_dir is sparc64 only */
+- swapper_low_pmd_dir = 0x0000000000402000;
++#ifdef CONFIG_SPARC64
++ swapper_pg_dir = 0x0000000000402000;
++#endif
+ . = INITIAL_ADDRESS;
+ .text TEXTSTART :
+ {
+@@ -122,11 +123,6 @@ SECTIONS
+ *(.swapper_4m_tsb_phys_patch)
+ __swapper_4m_tsb_phys_patch_end = .;
+ }
+- .page_offset_shift_patch : {
+- __page_offset_shift_patch = .;
+- *(.page_offset_shift_patch)
+- __page_offset_shift_patch_end = .;
+- }
+ .popc_3insn_patch : {
+ __popc_3insn_patch = .;
+ *(.popc_3insn_patch)
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 9cf2ee0..140527a 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -41,6 +41,10 @@
+ #endif
+ #endif
+
++#if !defined(EX_LD) && !defined(EX_ST)
++#define NON_USER_COPY
++#endif
++
+ #ifndef EX_LD
+ #define EX_LD(x) x
+ #endif
+@@ -197,9 +201,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
+ mov EX_RETVAL(%o3), %o0
+
+ .Llarge_src_unaligned:
++#ifdef NON_USER_COPY
++ VISEntryHalfFast(.Lmedium_vis_entry_fail)
++#else
++ VISEntryHalf
++#endif
+ andn %o2, 0x3f, %o4
+ sub %o2, %o4, %o2
+- VISEntryHalf
+ alignaddr %o1, %g0, %g1
+ add %o1, %o4, %o1
+ EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
+@@ -240,6 +248,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
+ nop
+ ba,a,pt %icc, .Lmedium_unaligned
+
++#ifdef NON_USER_COPY
++.Lmedium_vis_entry_fail:
++ or %o0, %o1, %g2
++#endif
+ .Lmedium:
+ LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
+ andcc %g2, 0x7, %g0
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
+index 99c017b..f75e690 100644
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -3,8 +3,9 @@
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+- * Returns 0, if ok, and number of bytes not yet set if exception
+- * occurs and we were called as clear_user.
++ * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and
++ * number of bytes not yet set if exception occurs and we were called as
++ * clear_user.
+ */
+
+ #include <asm/ptrace.h>
+@@ -65,6 +66,8 @@ __bzero_begin:
+ .globl __memset_start, __memset_end
+ __memset_start:
+ memset:
++ mov %o0, %g1
++ mov 1, %g4
+ and %o1, 0xff, %g3
+ sll %g3, 8, %g2
+ or %g3, %g2, %g3
+@@ -89,6 +92,7 @@ memset:
+ sub %o0, %o2, %o0
+
+ __bzero:
++ clr %g4
+ mov %g0, %g3
+ 1:
+ cmp %o1, 7
+@@ -151,8 +155,8 @@ __bzero:
+ bne,a 8f
+ EX(stb %g3, [%o0], and %o1, 1)
+ 8:
+- retl
+- clr %o0
++ b 0f
++ nop
+ 7:
+ be 13b
+ orcc %o1, 0, %g0
+@@ -164,6 +168,12 @@ __bzero:
+ bne 8b
+ EX(stb %g3, [%o0 - 1], add %o1, 1)
+ 0:
++ andcc %g4, 1, %g0
++ be 5f
++ nop
++ retl
++ mov %g1, %o0
++5:
+ retl
+ clr %o0
+ __memset_end:
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 4ced3fc..45a413e 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -348,6 +348,9 @@ retry:
+ down_read(&mm->mmap_sem);
+ }
+
++ if (fault_code & FAULT_CODE_BAD_RA)
++ goto do_sigbus;
++
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
+index 1aed043..ae6ce38 100644
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ return 1;
+ }
+
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
++ struct page **pages)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long addr, len, end;
++ unsigned long next, flags;
++ pgd_t *pgdp;
++ int nr = 0;
++
++ start &= PAGE_MASK;
++ addr = start;
++ len = (unsigned long) nr_pages << PAGE_SHIFT;
++ end = start + len;
++
++ local_irq_save(flags);
++ pgdp = pgd_offset(mm, addr);
++ do {
++ pgd_t pgd = *pgdp;
++
++ next = pgd_addr_end(addr, end);
++ if (pgd_none(pgd))
++ break;
++ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++ break;
++ } while (pgdp++, addr = next, addr != end);
++ local_irq_restore(flags);
++
++ return nr;
++}
++
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+ {
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 9686224..34506f2 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -73,7 +73,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly;
+ * 'cpu' properties, but we need to have this table setup before the
+ * MDESC is initialized.
+ */
+-unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+
+ #ifndef CONFIG_DEBUG_PAGEALLOC
+ /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
+@@ -82,10 +81,11 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+ */
+ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+ #endif
++extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+
+ static unsigned long cpu_pgsz_mask;
+
+-#define MAX_BANKS 32
++#define MAX_BANKS 1024
+
+ static struct linux_prom64_registers pavail[MAX_BANKS];
+ static int pavail_ents;
+@@ -163,10 +163,6 @@ static void __init read_obp_memory(const char *property,
+ cmp_p64, NULL);
+ }
+
+-unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
+- sizeof(unsigned long)];
+-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
+-
+ /* Kernel physical address base and size in bytes. */
+ unsigned long kern_base __read_mostly;
+ unsigned long kern_size __read_mostly;
+@@ -838,7 +834,10 @@ static int find_node(unsigned long addr)
+ if ((addr & p->mask) == p->val)
+ return i;
+ }
+- return -1;
++ /* The following condition has been observed on LDOM guests.*/
++ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
++ " rule. Some physical memory will be owned by node 0.");
++ return 0;
+ }
+
+ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+@@ -1360,9 +1359,144 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
+ static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+ static int pall_ents __initdata;
+
+-#ifdef CONFIG_DEBUG_PAGEALLOC
++static unsigned long max_phys_bits = 40;
++
++bool kern_addr_valid(unsigned long addr)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if ((long)addr < 0L) {
++ unsigned long pa = __pa(addr);
++
++ if ((addr >> max_phys_bits) != 0UL)
++ return false;
++
++ return pfn_valid(pa >> PAGE_SHIFT);
++ }
++
++ if (addr >= (unsigned long) KERNBASE &&
++ addr < (unsigned long)&_end)
++ return true;
++
++ pgd = pgd_offset_k(addr);
++ if (pgd_none(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, addr);
++ if (pud_none(*pud))
++ return 0;
++
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
++ pmd = pmd_offset(pud, addr);
++ if (pmd_none(*pmd))
++ return 0;
++
++ if (pmd_large(*pmd))
++ return pfn_valid(pmd_pfn(*pmd));
++
++ pte = pte_offset_kernel(pmd, addr);
++ if (pte_none(*pte))
++ return 0;
++
++ return pfn_valid(pte_pfn(*pte));
++}
++EXPORT_SYMBOL(kern_addr_valid);
++
++static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
++ unsigned long vend,
++ pud_t *pud)
++{
++ const unsigned long mask16gb = (1UL << 34) - 1UL;
++ u64 pte_val = vstart;
++
++ /* Each PUD is 8GB */
++ if ((vstart & mask16gb) ||
++ (vend - vstart <= mask16gb)) {
++ pte_val ^= kern_linear_pte_xor[2];
++ pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
++
++ return vstart + PUD_SIZE;
++ }
++
++ pte_val ^= kern_linear_pte_xor[3];
++ pte_val |= _PAGE_PUD_HUGE;
++
++ vend = vstart + mask16gb + 1UL;
++ while (vstart < vend) {
++ pud_val(*pud) = pte_val;
++
++ pte_val += PUD_SIZE;
++ vstart += PUD_SIZE;
++ pud++;
++ }
++ return vstart;
++}
++
++static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
++ bool guard)
++{
++ if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
++ return true;
++
++ return false;
++}
++
++static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
++ unsigned long vend,
++ pmd_t *pmd)
++{
++ const unsigned long mask256mb = (1UL << 28) - 1UL;
++ const unsigned long mask2gb = (1UL << 31) - 1UL;
++ u64 pte_val = vstart;
++
++ /* Each PMD is 8MB */
++ if ((vstart & mask256mb) ||
++ (vend - vstart <= mask256mb)) {
++ pte_val ^= kern_linear_pte_xor[0];
++ pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
++
++ return vstart + PMD_SIZE;
++ }
++
++ if ((vstart & mask2gb) ||
++ (vend - vstart <= mask2gb)) {
++ pte_val ^= kern_linear_pte_xor[1];
++ pte_val |= _PAGE_PMD_HUGE;
++ vend = vstart + mask256mb + 1UL;
++ } else {
++ pte_val ^= kern_linear_pte_xor[2];
++ pte_val |= _PAGE_PMD_HUGE;
++ vend = vstart + mask2gb + 1UL;
++ }
++
++ while (vstart < vend) {
++ pmd_val(*pmd) = pte_val;
++
++ pte_val += PMD_SIZE;
++ vstart += PMD_SIZE;
++ pmd++;
++ }
++
++ return vstart;
++}
++
++static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
++ bool guard)
++{
++ if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
++ return true;
++
++ return false;
++}
++
+ static unsigned long __ref kernel_map_range(unsigned long pstart,
+- unsigned long pend, pgprot_t prot)
++ unsigned long pend, pgprot_t prot,
++ bool use_huge)
+ {
+ unsigned long vstart = PAGE_OFFSET + pstart;
+ unsigned long vend = PAGE_OFFSET + pend;
+@@ -1381,19 +1515,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
+ pmd_t *pmd;
+ pte_t *pte;
+
++ if (pgd_none(*pgd)) {
++ pud_t *new;
++
++ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
++ alloc_bytes += PAGE_SIZE;
++ pgd_populate(&init_mm, pgd, new);
++ }
+ pud = pud_offset(pgd, vstart);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
++ if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
++ vstart = kernel_map_hugepud(vstart, vend, pud);
++ continue;
++ }
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, vstart);
+- if (!pmd_present(*pmd)) {
++ if (pmd_none(*pmd)) {
+ pte_t *new;
+
++ if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
++ vstart = kernel_map_hugepmd(vstart, vend, pmd);
++ continue;
++ }
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pmd_populate_kernel(&init_mm, pmd, new);
+@@ -1416,100 +1565,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
+ return alloc_bytes;
+ }
+
+-extern unsigned int kvmap_linear_patch[1];
+-#endif /* CONFIG_DEBUG_PAGEALLOC */
+-
+-static void __init kpte_set_val(unsigned long index, unsigned long val)
++static void __init flush_all_kernel_tsbs(void)
+ {
+- unsigned long *ptr = kpte_linear_bitmap;
+-
+- val <<= ((index % (BITS_PER_LONG / 2)) * 2);
+- ptr += (index / (BITS_PER_LONG / 2));
+-
+- *ptr |= val;
+-}
+-
+-static const unsigned long kpte_shift_min = 28; /* 256MB */
+-static const unsigned long kpte_shift_max = 34; /* 16GB */
+-static const unsigned long kpte_shift_incr = 3;
+-
+-static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
+- unsigned long shift)
+-{
+- unsigned long size = (1UL << shift);
+- unsigned long mask = (size - 1UL);
+- unsigned long remains = end - start;
+- unsigned long val;
+-
+- if (remains < size || (start & mask))
+- return start;
+-
+- /* VAL maps:
+- *
+- * shift 28 --> kern_linear_pte_xor index 1
+- * shift 31 --> kern_linear_pte_xor index 2
+- * shift 34 --> kern_linear_pte_xor index 3
+- */
+- val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
+-
+- remains &= ~mask;
+- if (shift != kpte_shift_max)
+- remains = size;
+-
+- while (remains) {
+- unsigned long index = start >> kpte_shift_min;
++ int i;
+
+- kpte_set_val(index, val);
++ for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
++ struct tsb *ent = &swapper_tsb[i];
+
+- start += 1UL << kpte_shift_min;
+- remains -= 1UL << kpte_shift_min;
++ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
++#ifndef CONFIG_DEBUG_PAGEALLOC
++ for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
++ struct tsb *ent = &swapper_4m_tsb[i];
+
+- return start;
+-}
+-
+-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
+-{
+- unsigned long smallest_size, smallest_mask;
+- unsigned long s;
+-
+- smallest_size = (1UL << kpte_shift_min);
+- smallest_mask = (smallest_size - 1UL);
+-
+- while (start < end) {
+- unsigned long orig_start = start;
+-
+- for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
+- start = kpte_mark_using_shift(start, end, s);
+-
+- if (start != orig_start)
+- break;
+- }
+-
+- if (start == orig_start)
+- start = (start + smallest_size) & ~smallest_mask;
++ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+ }
++#endif
+ }
+
+-static void __init init_kpte_bitmap(void)
+-{
+- unsigned long i;
+-
+- for (i = 0; i < pall_ents; i++) {
+- unsigned long phys_start, phys_end;
+-
+- phys_start = pall[i].phys_addr;
+- phys_end = phys_start + pall[i].reg_size;
+-
+- mark_kpte_bitmap(phys_start, phys_end);
+- }
+-}
++extern unsigned int kvmap_linear_patch[1];
+
+ static void __init kernel_physical_mapping_init(void)
+ {
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ unsigned long i, mem_alloced = 0UL;
++ bool use_huge = true;
+
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ use_huge = false;
++#endif
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+@@ -1517,7 +1600,7 @@ static void __init kernel_physical_mapping_init(void)
+ phys_end = phys_start + pall[i].reg_size;
+
+ mem_alloced += kernel_map_range(phys_start, phys_end,
+- PAGE_KERNEL);
++ PAGE_KERNEL, use_huge);
+ }
+
+ printk("Allocated %ld bytes for kernel page tables.\n",
+@@ -1526,8 +1609,9 @@ static void __init kernel_physical_mapping_init(void)
+ kvmap_linear_patch[0] = 0x01000000; /* nop */
+ flushi(&kvmap_linear_patch[0]);
+
++ flush_all_kernel_tsbs();
++
+ __flush_tlb_all();
+-#endif
+ }
+
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+@@ -1537,7 +1621,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
+ unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+ kernel_map_range(phys_start, phys_end,
+- (enable ? PAGE_KERNEL : __pgprot(0)));
++ (enable ? PAGE_KERNEL : __pgprot(0)), false);
+
+ flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
+ PAGE_OFFSET + phys_end);
+@@ -1565,76 +1649,56 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
+ unsigned long PAGE_OFFSET;
+ EXPORT_SYMBOL(PAGE_OFFSET);
+
+-static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
+-{
+- unsigned long final_shift;
+- unsigned int val = *insn;
+- unsigned int cnt;
+-
+- /* We are patching in ilog2(max_supported_phys_address), and
+- * we are doing so in a manner similar to a relocation addend.
+- * That is, we are adding the shift value to whatever value
+- * is in the shift instruction count field already.
+- */
+- cnt = (val & 0x3f);
+- val &= ~0x3f;
+-
+- /* If we are trying to shift >= 64 bits, clear the destination
+- * register. This can happen when phys_bits ends up being equal
+- * to MAX_PHYS_ADDRESS_BITS.
+- */
+- final_shift = (cnt + (64 - phys_bits));
+- if (final_shift >= 64) {
+- unsigned int rd = (val >> 25) & 0x1f;
+-
+- val = 0x80100000 | (rd << 25);
+- } else {
+- val |= final_shift;
+- }
+- *insn = val;
+-
+- __asm__ __volatile__("flush %0"
+- : /* no outputs */
+- : "r" (insn));
+-}
+-
+-static void __init page_offset_shift_patch(unsigned long phys_bits)
+-{
+- extern unsigned int __page_offset_shift_patch;
+- extern unsigned int __page_offset_shift_patch_end;
+- unsigned int *p;
+-
+- p = &__page_offset_shift_patch;
+- while (p < &__page_offset_shift_patch_end) {
+- unsigned int *insn = (unsigned int *)(unsigned long)*p;
++unsigned long VMALLOC_END = 0x0000010000000000UL;
++EXPORT_SYMBOL(VMALLOC_END);
+
+- page_offset_shift_patch_one(insn, phys_bits);
+-
+- p++;
+- }
+-}
++unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
++unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
+
+ static void __init setup_page_offset(void)
+ {
+- unsigned long max_phys_bits = 40;
+-
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
++ /* Cheetah/Panther support a full 64-bit virtual
++ * address, so we can use all that our page tables
++ * support.
++ */
++ sparc64_va_hole_top = 0xfff0000000000000UL;
++ sparc64_va_hole_bottom = 0x0010000000000000UL;
++
+ max_phys_bits = 42;
+ } else if (tlb_type == hypervisor) {
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+ case SUN4V_CHIP_NIAGARA2:
++ /* T1 and T2 support 48-bit virtual addresses. */
++ sparc64_va_hole_top = 0xffff800000000000UL;
++ sparc64_va_hole_bottom = 0x0000800000000000UL;
++
+ max_phys_bits = 39;
+ break;
+ case SUN4V_CHIP_NIAGARA3:
++ /* T3 supports 48-bit virtual addresses. */
++ sparc64_va_hole_top = 0xffff800000000000UL;
++ sparc64_va_hole_bottom = 0x0000800000000000UL;
++
+ max_phys_bits = 43;
+ break;
+ case SUN4V_CHIP_NIAGARA4:
+ case SUN4V_CHIP_NIAGARA5:
+ case SUN4V_CHIP_SPARC64X:
+- default:
++ case SUN4V_CHIP_SPARC_M6:
++ /* T4 and later support 52-bit virtual addresses. */
++ sparc64_va_hole_top = 0xfff8000000000000UL;
++ sparc64_va_hole_bottom = 0x0008000000000000UL;
+ max_phys_bits = 47;
+ break;
++ case SUN4V_CHIP_SPARC_M7:
++ default:
++ /* M7 and later support 52-bit virtual addresses. */
++ sparc64_va_hole_top = 0xfff8000000000000UL;
++ sparc64_va_hole_bottom = 0x0008000000000000UL;
++ max_phys_bits = 49;
++ break;
+ }
+ }
+
+@@ -1644,12 +1708,16 @@ static void __init setup_page_offset(void)
+ prom_halt();
+ }
+
+- PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
++ PAGE_OFFSET = sparc64_va_hole_top;
++ VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
++ (sparc64_va_hole_bottom >> 2));
+
+- pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
++ pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
+ PAGE_OFFSET, max_phys_bits);
+-
+- page_offset_shift_patch(max_phys_bits);
++ pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
++ VMALLOC_START, VMALLOC_END);
++ pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
++ VMEMMAP_BASE, VMEMMAP_BASE << 1);
+ }
+
+ static void __init tsb_phys_patch(void)
+@@ -1694,21 +1762,42 @@ static void __init tsb_phys_patch(void)
+ #define NUM_KTSB_DESCR 1
+ #endif
+ static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
+-extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
++
++/* The swapper TSBs are loaded with a base sequence of:
++ *
++ * sethi %uhi(SYMBOL), REG1
++ * sethi %hi(SYMBOL), REG2
++ * or REG1, %ulo(SYMBOL), REG1
++ * or REG2, %lo(SYMBOL), REG2
++ * sllx REG1, 32, REG1
++ * or REG1, REG2, REG1
++ *
++ * When we use physical addressing for the TSB accesses, we patch the
++ * first four instructions in the above sequence.
++ */
+
+ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
+ {
+- pa >>= KTSB_PHYS_SHIFT;
++ unsigned long high_bits, low_bits;
++
++ high_bits = (pa >> 32) & 0xffffffff;
++ low_bits = (pa >> 0) & 0xffffffff;
+
+ while (start < end) {
+ unsigned int *ia = (unsigned int *)(unsigned long)*start;
+
+- ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
++ ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
+ __asm__ __volatile__("flush %0" : : "r" (ia));
+
+- ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
++ ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
+ __asm__ __volatile__("flush %0" : : "r" (ia + 1));
+
++ ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
++ __asm__ __volatile__("flush %0" : : "r" (ia + 2));
++
++ ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
++ __asm__ __volatile__("flush %0" : : "r" (ia + 3));
++
+ start++;
+ }
+ }
+@@ -1847,7 +1936,6 @@ static void __init sun4v_linear_pte_xor_finalize(void)
+ /* paging_init() sets up the page tables */
+
+ static unsigned long last_valid_pfn;
+-pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+ static void sun4u_pgprot_init(void);
+ static void sun4v_pgprot_init(void);
+@@ -1950,16 +2038,10 @@ void __init paging_init(void)
+ */
+ init_mm.pgd += ((shift) / (sizeof(pgd_t)));
+
+- memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
++ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+- /* Now can init the kernel/bad page tables. */
+- pud_set(pud_offset(&swapper_pg_dir[0], 0),
+- swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
+-
+ inherit_prom_mappings();
+
+- init_kpte_bitmap();
+-
+ /* Ok, we can use our TLB miss and window trap handlers safely. */
+ setup_tba();
+
+@@ -2066,70 +2148,6 @@ int page_in_phys_avail(unsigned long paddr)
+ return 0;
+ }
+
+-static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+-static int pavail_rescan_ents __initdata;
+-
+-/* Certain OBP calls, such as fetching "available" properties, can
+- * claim physical memory. So, along with initializing the valid
+- * address bitmap, what we do here is refetch the physical available
+- * memory list again, and make sure it provides at least as much
+- * memory as 'pavail' does.
+- */
+-static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
+-{
+- int i;
+-
+- read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
+-
+- for (i = 0; i < pavail_ents; i++) {
+- unsigned long old_start, old_end;
+-
+- old_start = pavail[i].phys_addr;
+- old_end = old_start + pavail[i].reg_size;
+- while (old_start < old_end) {
+- int n;
+-
+- for (n = 0; n < pavail_rescan_ents; n++) {
+- unsigned long new_start, new_end;
+-
+- new_start = pavail_rescan[n].phys_addr;
+- new_end = new_start +
+- pavail_rescan[n].reg_size;
+-
+- if (new_start <= old_start &&
+- new_end >= (old_start + PAGE_SIZE)) {
+- set_bit(old_start >> ILOG2_4MB, bitmap);
+- goto do_next_page;
+- }
+- }
+-
+- prom_printf("mem_init: Lost memory in pavail\n");
+- prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
+- pavail[i].phys_addr,
+- pavail[i].reg_size);
+- prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
+- pavail_rescan[i].phys_addr,
+- pavail_rescan[i].reg_size);
+- prom_printf("mem_init: Cannot continue, aborting.\n");
+- prom_halt();
+-
+- do_next_page:
+- old_start += PAGE_SIZE;
+- }
+- }
+-}
+-
+-static void __init patch_tlb_miss_handler_bitmap(void)
+-{
+- extern unsigned int valid_addr_bitmap_insn[];
+- extern unsigned int valid_addr_bitmap_patch[];
+-
+- valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
+- mb();
+- valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
+- flushi(&valid_addr_bitmap_insn[0]);
+-}
+-
+ static void __init register_page_bootmem_info(void)
+ {
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+@@ -2142,18 +2160,6 @@ static void __init register_page_bootmem_info(void)
+ }
+ void __init mem_init(void)
+ {
+- unsigned long addr, last;
+-
+- addr = PAGE_OFFSET + kern_base;
+- last = PAGE_ALIGN(kern_size) + addr;
+- while (addr < last) {
+- set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
+- addr += PAGE_SIZE;
+- }
+-
+- setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
+- patch_tlb_miss_handler_bitmap();
+-
+ high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+
+ register_page_bootmem_info();
+@@ -2243,18 +2249,9 @@ unsigned long _PAGE_CACHE __read_mostly;
+ EXPORT_SYMBOL(_PAGE_CACHE);
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+-unsigned long vmemmap_table[VMEMMAP_SIZE];
+-
+-static long __meminitdata addr_start, addr_end;
+-static int __meminitdata node_start;
+-
+ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ int node)
+ {
+- unsigned long phys_start = (vstart - VMEMMAP_BASE);
+- unsigned long phys_end = (vend - VMEMMAP_BASE);
+- unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
+- unsigned long end = VMEMMAP_ALIGN(phys_end);
+ unsigned long pte_base;
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+@@ -2265,47 +2262,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+- for (; addr < end; addr += VMEMMAP_CHUNK) {
+- unsigned long *vmem_pp =
+- vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
+- void *block;
++ pte_base |= _PAGE_PMD_HUGE;
+
+- if (!(*vmem_pp & _PAGE_VALID)) {
+- block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
+- if (!block)
++ vstart = vstart & PMD_MASK;
++ vend = ALIGN(vend, PMD_SIZE);
++ for (; vstart < vend; vstart += PMD_SIZE) {
++ pgd_t *pgd = pgd_offset_k(vstart);
++ unsigned long pte;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ if (pgd_none(*pgd)) {
++ pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
++
++ if (!new)
+ return -ENOMEM;
++ pgd_populate(&init_mm, pgd, new);
++ }
+
+- *vmem_pp = pte_base | __pa(block);
++ pud = pud_offset(pgd, vstart);
++ if (pud_none(*pud)) {
++ pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
+
+- /* check to see if we have contiguous blocks */
+- if (addr_end != addr || node_start != node) {
+- if (addr_start)
+- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+- addr_start, addr_end-1, node_start);
+- addr_start = addr;
+- node_start = node;
+- }
+- addr_end = addr + VMEMMAP_CHUNK;
++ if (!new)
++ return -ENOMEM;
++ pud_populate(&init_mm, pud, new);
+ }
+- }
+- return 0;
+-}
+
+-void __meminit vmemmap_populate_print_last(void)
+-{
+- if (addr_start) {
+- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+- addr_start, addr_end-1, node_start);
+- addr_start = 0;
+- addr_end = 0;
+- node_start = 0;
++ pmd = pmd_offset(pud, vstart);
++
++ pte = pmd_val(*pmd);
++ if (!(pte & _PAGE_VALID)) {
++ void *block = vmemmap_alloc_block(PMD_SIZE, node);
++
++ if (!block)
++ return -ENOMEM;
++
++ pmd_val(*pmd) = pte_base | __pa(block);
++ }
+ }
++
++ return 0;
+ }
+
+ void vmemmap_free(unsigned long start, unsigned long end)
+ {
+ }
+-
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ static void prot_init_common(unsigned long page_none,
+@@ -2717,8 +2719,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+ }
+ if (end > HI_OBP_ADDRESS) {
+- flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
+- do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
++ flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
++ do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
+ }
+ } else {
+ flush_tsb_kernel_range(start, end);
+diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
+index 5d3782de..ac49119 100644
+--- a/arch/sparc/mm/init_64.h
++++ b/arch/sparc/mm/init_64.h
+@@ -8,15 +8,8 @@
+ */
+
+ #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
+-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
+-#define KPTE_BITMAP_BYTES \
+- ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
+-#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
+-#define VALID_ADDR_BITMAP_BYTES \
+- ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
+
+ extern unsigned long kern_linear_pte_xor[4];
+-extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+ extern unsigned int sparc64_highest_unlocked_tlb_ent;
+ extern unsigned long sparc64_kern_pri_context;
+ extern unsigned long sparc64_kern_pri_nuc_bits;
+@@ -38,15 +31,4 @@ extern unsigned long kern_locked_tte_data;
+
+ extern void prom_world(int enter);
+
+-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+-#define VMEMMAP_CHUNK_SHIFT 22
+-#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
+-#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
+-#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
+-
+-#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
+- sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
+-extern unsigned long vmemmap_table[VMEMMAP_SIZE];
+-#endif
+-
+ #endif /* _SPARC64_MM_INIT_H */
+diff --git a/arch/sparc/power/hibernate_asm.S b/arch/sparc/power/hibernate_asm.S
+index 7994216..d7d9017 100644
+--- a/arch/sparc/power/hibernate_asm.S
++++ b/arch/sparc/power/hibernate_asm.S
+@@ -54,8 +54,8 @@ ENTRY(swsusp_arch_resume)
+ nop
+
+ /* Write PAGE_OFFSET to %g7 */
+- sethi %uhi(PAGE_OFFSET), %g7
+- sllx %g7, 32, %g7
++ sethi %hi(PAGE_OFFSET), %g7
++ ldx [%g7 + %lo(PAGE_OFFSET)], %g7
+
+ setuw (PAGE_SIZE-8), %g3
+
+diff --git a/arch/sparc/prom/bootstr_64.c b/arch/sparc/prom/bootstr_64.c
+index ab9ccc6..7149e77 100644
+--- a/arch/sparc/prom/bootstr_64.c
++++ b/arch/sparc/prom/bootstr_64.c
+@@ -14,7 +14,10 @@
+ * the .bss section or it will break things.
+ */
+
+-#define BARG_LEN 256
++/* We limit BARG_LEN to 1024 because this is the size of the
++ * 'barg_out' command line buffer in the SILO bootloader.
++ */
++#define BARG_LEN 1024
+ struct {
+ int bootstr_len;
+ int bootstr_valid;
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
+index 9c86b4b..8050f38 100644
+--- a/arch/sparc/prom/cif.S
++++ b/arch/sparc/prom/cif.S
+@@ -11,11 +11,10 @@
+ .text
+ .globl prom_cif_direct
+ prom_cif_direct:
++ save %sp, -192, %sp
+ sethi %hi(p1275buf), %o1
+ or %o1, %lo(p1275buf), %o1
+- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
+- save %o2, -192, %sp
+- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
++ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
+ mov %g4, %l0
+ mov %g5, %l1
+ mov %g6, %l3
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index d95db75..110b0d7 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,13 +26,13 @@ phandle prom_chosen_node;
+ * It gets passed the pointer to the PROM vector.
+ */
+
+-extern void prom_cif_init(void *, void *);
++extern void prom_cif_init(void *);
+
+-void __init prom_init(void *cif_handler, void *cif_stack)
++void __init prom_init(void *cif_handler)
+ {
+ phandle node;
+
+- prom_cif_init(cif_handler, cif_stack);
++ prom_cif_init(cif_handler);
+
+ prom_chosen_node = prom_finddevice(prom_chosen_path);
+ if (!prom_chosen_node || (s32)prom_chosen_node == -1)
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index e58b817..545d8bb 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -9,6 +9,7 @@
+ #include <linux/smp.h>
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
++#include <linux/irqflags.h>
+
+ #include <asm/openprom.h>
+ #include <asm/oplib.h>
+@@ -19,7 +20,6 @@
+ struct {
+ long prom_callback; /* 0x00 */
+ void (*prom_cif_handler)(long *); /* 0x08 */
+- unsigned long prom_cif_stack; /* 0x10 */
+ } p1275buf;
+
+ extern void prom_world(int);
+@@ -36,8 +36,8 @@ void p1275_cmd_direct(unsigned long *args)
+ {
+ unsigned long flags;
+
+- raw_local_save_flags(flags);
+- raw_local_irq_restore((unsigned long)PIL_NMI);
++ local_save_flags(flags);
++ local_irq_restore((unsigned long)PIL_NMI);
+ raw_spin_lock(&prom_entry_lock);
+
+ prom_world(1);
+@@ -45,11 +45,10 @@ void p1275_cmd_direct(unsigned long *args)
+ prom_world(0);
+
+ raw_spin_unlock(&prom_entry_lock);
+- raw_local_irq_restore(flags);
++ local_irq_restore(flags);
+ }
+
+ void prom_cif_init(void *cif_handler, void *cif_stack)
+ {
+ p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+- p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+ }
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index d71d5ac..ac63ea4 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -480,6 +480,7 @@ struct kvm_vcpu_arch {
+ u64 mmio_gva;
+ unsigned access;
+ gfn_t mmio_gfn;
++ u64 mmio_gen;
+
+ struct kvm_pmu pmu;
+
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 5cd9bfa..c1a07d3 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -153,6 +153,21 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ setup_clear_cpu_cap(X86_FEATURE_ERMS);
+ }
+ }
++
++ /*
++ * Intel Quark Core DevMan_001.pdf section 6.4.11
++ * "The operating system also is required to invalidate (i.e., flush)
++ * the TLB when any changes are made to any of the page table entries.
++ * The operating system must reload CR3 to cause the TLB to be flushed"
++ *
++ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
++ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
++ * to be modified
++ */
++ if (c->x86 == 5 && c->x86_model == 9) {
++ pr_info("Disabling PGE capability bit\n");
++ setup_clear_cpu_cap(X86_FEATURE_PGE);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 9b53135..49088b8 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -198,16 +198,20 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+
+ /*
+- * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
+- * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
+- * number.
++ * the low bit of the generation number is always presumed to be zero.
++ * This disables mmio caching during memslot updates. The concept is
++ * similar to a seqcount but instead of retrying the access we just punt
++ * and ignore the cache.
++ *
++ * spte bits 3-11 are used as bits 1-9 of the generation number,
++ * the bits 52-61 are used as bits 10-19 of the generation number.
+ */
+-#define MMIO_SPTE_GEN_LOW_SHIFT 3
++#define MMIO_SPTE_GEN_LOW_SHIFT 2
+ #define MMIO_SPTE_GEN_HIGH_SHIFT 52
+
+-#define MMIO_GEN_SHIFT 19
+-#define MMIO_GEN_LOW_SHIFT 9
+-#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1)
++#define MMIO_GEN_SHIFT 20
++#define MMIO_GEN_LOW_SHIFT 10
++#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2)
+ #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1)
+ #define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1)
+
+@@ -3157,7 +3161,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+
+- vcpu_clear_mmio_info(vcpu, ~0ul);
++ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+ if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
+ hpa_t root = vcpu->arch.mmu.root_hpa;
+@@ -4379,7 +4383,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
+ * The very rare case: if the generation-number is round,
+ * zap all shadow pages.
+ */
+- if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) {
++ if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
+ printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ }
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 8da5823..21ea4fc 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ vcpu->arch.mmio_gva = gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
++ vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
++}
++
++static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
+ }
+
+ /*
+- * Clear the mmio cache info for the given gva,
+- * specially, if gva is ~0ul, we clear all mmio cache info.
++ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
++ * clear all mmio cache info.
+ */
++#define MMIO_GVA_ANY (~(gva_t)0)
++
+ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+- if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
++ if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+ return;
+
+ vcpu->arch.mmio_gva = 0;
+@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+
+ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+ {
+- if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
++ vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+ return true;
+
+ return false;
+@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+
+ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+ {
+- if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
++ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
++ vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+ return true;
+
+ return false;
+diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
+index 3c562f5..e1bce26 100644
+--- a/crypto/async_tx/async_xor.c
++++ b/crypto/async_tx/async_xor.c
+@@ -78,8 +78,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
+ tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
+ xor_src_cnt, unmap->len,
+ dma_flags);
+- src_list[0] = tmp;
+-
+
+ if (unlikely(!tx))
+ async_tx_quiesce(&submit->depend_tx);
+@@ -92,6 +90,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
+ xor_src_cnt, unmap->len,
+ dma_flags);
+ }
++ src_list[0] = tmp;
+
+ dma_set_unmap(tx, unmap);
+ async_tx_submit(chan, tx, submit);
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index c30df50e..2495ee5 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -1081,6 +1081,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+ if (!firmware_p)
+ return -EINVAL;
+
++ if (!name || name[0] == '\0')
++ return -EINVAL;
++
+ ret = _request_firmware_prepare(&fw, name, device);
+ if (ret <= 0) /* error or already assigned */
+ goto out;
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index c5471cd..d39fd61 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -473,6 +473,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ {
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
++ const char *devname = "dummy";
+
+ /* If we don't have the debugfs root yet, postpone init */
+ if (!regmap_debugfs_root) {
+@@ -491,12 +492,15 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ INIT_LIST_HEAD(&map->debugfs_off_cache);
+ mutex_init(&map->cache_lock);
+
++ if (map->dev)
++ devname = dev_name(map->dev);
++
+ if (name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+- dev_name(map->dev), name);
++ devname, name);
+ name = map->debugfs_name;
+ } else {
+- name = dev_name(map->dev);
++ name = devname;
+ }
+
+ map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 2ea056c..f6cff3b 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1308,7 +1308,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
+ }
+
+ #ifdef LOG_DEVICE
+- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
++ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x <= %x\n", reg, val);
+ #endif
+
+@@ -1557,6 +1557,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ } else {
+ void *wval;
+
++ if (!val_count)
++ return -EINVAL;
++
+ wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ if (!wval) {
+ ret = -ENOMEM;
+@@ -1739,7 +1742,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
+ ret = map->reg_read(context, reg, val);
+ if (ret == 0) {
+ #ifdef LOG_DEVICE
+- if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
++ if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x => %x\n", reg, *val);
+ #endif
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1c7b504..e00c3f8 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -309,6 +309,9 @@ static void btusb_intr_complete(struct urb *urb)
+ BT_ERR("%s corrupted event packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
+@@ -397,6 +400,9 @@ static void btusb_bulk_complete(struct urb *urb)
+ BT_ERR("%s corrupted ACL packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
+@@ -491,6 +497,9 @@ static void btusb_isoc_complete(struct urb *urb)
+ hdev->stat.err_rx++;
+ }
+ }
++ } else if (urb->status == -ENOENT) {
++ /* Avoid suspend failed when usb_kill_urb */
++ return;
+ }
+
+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index e36a024..5651992 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5)
+ break;
+
+ to_remove--;
+- seq = (seq - 1) % 8;
++ seq = (seq - 1) & 0x07;
+ }
+
+ if (seq != h5->rx_ack)
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 69ea36f..e99e71a 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -164,8 +164,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ ret = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel));
+
+- if (ret != 0)
++ if (ret != 0) {
++ err = ret;
+ goto error1;
++ }
+
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ if (t == 0) {
+@@ -362,7 +364,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 next_gpadl_handle;
+ unsigned long flags;
+ int ret = 0;
+- int t;
+
+ next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
+ atomic_inc(&vmbus_connection.next_gpadl_handle);
+@@ -409,9 +410,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+
+ }
+ }
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+- BUG_ON(t == 0);
+-
++ wait_for_completion(&msginfo->waitevent);
+
+ /* At this point, we received the gpadl created msg */
+ *gpadl_handle = gpadlmsg->gpadl;
+@@ -434,7 +433,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ struct vmbus_channel_gpadl_teardown *msg;
+ struct vmbus_channel_msginfo *info;
+ unsigned long flags;
+- int ret, t;
++ int ret;
+
+ info = kmalloc(sizeof(*info) +
+ sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
+@@ -456,11 +455,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_gpadl_teardown));
+
+- BUG_ON(ret != 0);
+- t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
+- BUG_ON(t == 0);
++ if (ret)
++ goto post_msg_err;
++
++ wait_for_completion(&info->waitevent);
+
+- /* Received a torndown response */
++post_msg_err:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+@@ -470,7 +470,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ }
+ EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
+
+-static void vmbus_close_internal(struct vmbus_channel *channel)
++static int vmbus_close_internal(struct vmbus_channel *channel)
+ {
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+@@ -492,11 +492,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
+
+ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
+
+- BUG_ON(ret != 0);
++ if (ret) {
++ pr_err("Close failed: close post msg return is %d\n", ret);
++ /*
++ * If we failed to post the close msg,
++ * it is perhaps better to leak memory.
++ */
++ return ret;
++ }
++
+ /* Tear down the gpadl for the channel's ring buffer */
+- if (channel->ringbuffer_gpadlhandle)
+- vmbus_teardown_gpadl(channel,
+- channel->ringbuffer_gpadlhandle);
++ if (channel->ringbuffer_gpadlhandle) {
++ ret = vmbus_teardown_gpadl(channel,
++ channel->ringbuffer_gpadlhandle);
++ if (ret) {
++ pr_err("Close failed: teardown gpadl return %d\n", ret);
++ /*
++ * If we failed to teardown gpadl,
++ * it is perhaps better to leak memory.
++ */
++ return ret;
++ }
++ }
+
+ /* Cleanup the ring buffers for this channel */
+ hv_ringbuffer_cleanup(&channel->outbound);
+@@ -505,7 +522,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
+ free_pages((unsigned long)channel->ringbuffer_pages,
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+
+-
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index ce5a9f2..d8fd95c 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -408,10 +408,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
+ * insufficient resources. Retry the operation a couple of
+ * times before giving up.
+ */
+- while (retries < 3) {
+- ret = hv_post_message(conn_id, 1, buffer, buflen);
+- if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
++ while (retries < 10) {
++ ret = hv_post_message(conn_id, 1, buffer, buflen);
++
++ switch (ret) {
++ case HV_STATUS_INSUFFICIENT_BUFFERS:
++ ret = -ENOMEM;
++ case -ENOMEM:
++ break;
++ case HV_STATUS_SUCCESS:
+ return ret;
++ default:
++ pr_err("hv_post_msg() failed; error code:%d\n", ret);
++ return -EINVAL;
++ }
++
+ retries++;
+ msleep(100);
+ }
+diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
+index 5653e50..424f51d 100644
+--- a/drivers/message/fusion/mptspi.c
++++ b/drivers/message/fusion/mptspi.c
+@@ -1422,6 +1422,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto out_mptspi_probe;
+ }
+
++ /* VMWare emulation doesn't properly implement WRITE_SAME
++ */
++ if (pdev->subsystem_vendor == 0x15AD)
++ sh->no_write_same = 1;
++
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 4bc7d62..9a07bba 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
+
+ dev_dbg(dev, "Device probe\n");
+
+- strncpy(id.name, dev_name(dev), sizeof(id.name));
++ strlcpy(id.name, dev_name(dev), sizeof(id.name));
+
+ return driver->probe(device, &id);
+ }
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index df1f5e7..1ac33d9 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -272,6 +272,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+@@ -315,6 +317,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index a394a9a..7cf6081 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -2039,7 +2039,7 @@ struct mac_iveiv_entry {
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
+ */
+-#define BBP1_TX_POWER_CTRL FIELD8(0x07)
++#define BBP1_TX_POWER_CTRL FIELD8(0x03)
+ #define BBP1_TX_ANTENNA FIELD8(0x18)
+
+ /*
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index 483d9ad..9773667 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -855,7 +855,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ rangesz = pna + na + ns;
+ nranges = rlen / sizeof(__be32) / rangesz;
+
+- for (i = 0; i < nranges; i++) {
++ for (i = 0; i < nranges; i++, range += rangesz) {
+ u32 flags = of_read_number(range, 1);
+ u32 slot = of_read_number(range + 1, 1);
+ u64 cpuaddr = of_read_number(range + na, pna);
+@@ -865,14 +865,14 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ rtype = IORESOURCE_IO;
+ else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ rtype = IORESOURCE_MEM;
++ else
++ continue;
+
+ if (slot == PCI_SLOT(devfn) && type == rtype) {
+ *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+ *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ return 0;
+ }
+-
+- range += rangesz;
+ }
+
+ return -ENOENT;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 276ef9c..39a207a 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ {
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
++ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 813f437..6e8776b 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -24,6 +24,7 @@
+ #include <linux/ioport.h>
+ #include <linux/sched.h>
+ #include <linux/ktime.h>
++#include <linux/mm.h>
+ #include <asm/dma.h> /* isa_dma_bridge_buggy */
+ #include "pci.h"
+
+@@ -287,6 +288,25 @@ static void quirk_citrine(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+
++/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
++static void quirk_extend_bar_to_page(struct pci_dev *dev)
++{
++ int i;
++
++ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
++ struct resource *r = &dev->resource[i];
++
++ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
++ r->end = PAGE_SIZE - 1;
++ r->start = 0;
++ r->flags |= IORESOURCE_UNSET;
++ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
++ i, r);
++ }
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
++
+ /*
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
+diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
+index b2fcac7..5bb9406 100644
+--- a/drivers/scsi/be2iscsi/be_mgmt.c
++++ b/drivers/scsi/be2iscsi/be_mgmt.c
+@@ -897,17 +897,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
+
+ if (ip_action == IP_ACTION_ADD) {
+ memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
+- ip_param->len);
++ sizeof(req->ip_params.ip_record.ip_addr.addr));
+
+ if (subnet_param)
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+- subnet_param->value, subnet_param->len);
++ subnet_param->value,
++ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ } else {
+ memcpy(req->ip_params.ip_record.ip_addr.addr,
+- if_info->ip_addr.addr, ip_param->len);
++ if_info->ip_addr.addr,
++ sizeof(req->ip_params.ip_record.ip_addr.addr));
+
+ memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+- if_info->ip_addr.subnet_mask, ip_param->len);
++ if_info->ip_addr.subnet_mask,
++ sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
+ }
+
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+@@ -935,7 +938,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
+ req->action = gtway_action;
+ req->ip_addr.ip_type = BE2_IPV4;
+
+- memcpy(req->ip_addr.addr, gt_addr, param_len);
++ memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+
+ return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 83cb612..23c1b0c 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3039,10 +3039,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
+ }
+
+ static void
+-qla2x00_clear_drv_active(scsi_qla_host_t *vha)
++qla2x00_clear_drv_active(struct qla_hw_data *ha)
+ {
+- struct qla_hw_data *ha = vha->hw;
+-
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(ha);
+@@ -3111,7 +3109,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
+
+ scsi_host_put(base_vha->host);
+
+- qla2x00_clear_drv_active(base_vha);
++ qla2x00_clear_drv_active(ha);
+
+ qla2x00_unmap_iobases(ha);
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 0cb7307..2f264ac 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1382,12 +1382,10 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ uint32_t req_cnt)
+ {
+- struct qla_hw_data *ha = vha->hw;
+- device_reg_t __iomem *reg = ha->iobase;
+ uint32_t cnt;
+
+ if (vha->req->cnt < (req_cnt + 2)) {
+- cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
++ cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+ "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index 6d207af..a4c45ea 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -89,7 +89,13 @@ err_exit:
+
+ static void mid_spi_dma_exit(struct dw_spi *dws)
+ {
++ if (!dws->dma_inited)
++ return;
++
++ dmaengine_terminate_all(dws->txchan);
+ dma_release_channel(dws->txchan);
++
++ dmaengine_terminate_all(dws->rxchan);
+ dma_release_channel(dws->rxchan);
+ }
+
+@@ -136,7 +142,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ txconf.dst_addr = dws->dma_addr;
+ txconf.dst_maxburst = LNW_DMA_MSIZE_16;
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ txconf.dst_addr_width = dws->dma_width;
+ txconf.device_fc = false;
+
+ txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+@@ -159,7 +165,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+ rxconf.src_addr = dws->dma_addr;
+ rxconf.src_maxburst = LNW_DMA_MSIZE_16;
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ rxconf.src_addr_width = dws->dma_width;
+ rxconf.device_fc = false;
+
+ rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index db8434d..f4e68b3 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -260,8 +260,16 @@ serial_omap_baud_is_mode16(struct uart_port *port, unsigned int baud)
+ {
+ unsigned int n13 = port->uartclk / (13 * baud);
+ unsigned int n16 = port->uartclk / (16 * baud);
+- int baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
+- int baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
++ int baudAbsDiff13;
++ int baudAbsDiff16;
++
++ if (n13 == 0)
++ n13 = 1;
++ if (n16 == 0)
++ n16 = 1;
++
++ baudAbsDiff13 = baud - (port->uartclk / (13 * n13));
++ baudAbsDiff16 = baud - (port->uartclk / (16 * n16));
+ if (baudAbsDiff13 < 0)
+ baudAbsDiff13 = -baudAbsDiff13;
+ if (baudAbsDiff16 < 0)
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index 8154165..fd13ef0 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -445,7 +445,7 @@ config USB_GOKU
+ gadget drivers to also be dynamically linked.
+
+ config USB_EG20T
+- tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
++ tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
+ depends on PCI
+ help
+ This is a USB device driver for EG20T PCH.
+@@ -466,6 +466,7 @@ config USB_EG20T
+ ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7831 is completely compatible for Intel EG20T PCH.
+
++ This driver can be used with Intel's Quark X1000 SOC platform
+ #
+ # LAST -- dummy/emulated controller
+ #
+diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
+index eb8c3be..460d953 100644
+--- a/drivers/usb/gadget/pch_udc.c
++++ b/drivers/usb/gadget/pch_udc.c
+@@ -343,6 +343,7 @@ struct pch_vbus_gpio_data {
+ * @setup_data: Received setup data
+ * @phys_addr: of device memory
+ * @base_addr: for mapped device memory
++ * @bar: Indicates which PCI BAR for USB regs
+ * @irq: IRQ line for the device
+ * @cfg_data: current cfg, intf, and alt in use
+ * @vbus_gpio: GPIO informaton for detecting VBUS
+@@ -370,14 +371,17 @@ struct pch_udc_dev {
+ struct usb_ctrlrequest setup_data;
+ unsigned long phys_addr;
+ void __iomem *base_addr;
++ unsigned bar;
+ unsigned irq;
+ struct pch_udc_cfg_data cfg_data;
+ struct pch_vbus_gpio_data vbus_gpio;
+ };
+ #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
+
++#define PCH_UDC_PCI_BAR_QUARK_X1000 0
+ #define PCH_UDC_PCI_BAR 1
+ #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
+ #define PCI_VENDOR_ID_ROHM 0x10DB
+ #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
+ #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
+@@ -3076,7 +3080,7 @@ static void pch_udc_remove(struct pci_dev *pdev)
+ iounmap(dev->base_addr);
+ if (dev->mem_region)
+ release_mem_region(dev->phys_addr,
+- pci_resource_len(pdev, PCH_UDC_PCI_BAR));
++ pci_resource_len(pdev, dev->bar));
+ if (dev->active)
+ pci_disable_device(pdev);
+ kfree(dev);
+@@ -3144,9 +3148,15 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ dev->active = 1;
+ pci_set_drvdata(pdev, dev);
+
++ /* Determine BAR based on PCI ID */
++ if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
++ dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
++ else
++ dev->bar = PCH_UDC_PCI_BAR;
++
+ /* PCI resource allocation */
+- resource = pci_resource_start(pdev, 1);
+- len = pci_resource_len(pdev, 1);
++ resource = pci_resource_start(pdev, dev->bar);
++ len = pci_resource_len(pdev, dev->bar);
+
+ if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
+ dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
+@@ -3212,6 +3222,12 @@ finished:
+
+ static const struct pci_device_id pch_udc_pcidev_id[] = {
+ {
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
++ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
++ .class_mask = 0xffffffff,
++ },
++ {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class_mask = 0xffffffff,
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0165b86..a9a881e 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2510,23 +2510,28 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_map *em = NULL;
+ struct extent_state *cached_state = NULL;
+- u64 lockstart = *offset;
+- u64 lockend = i_size_read(inode);
+- u64 start = *offset;
+- u64 len = i_size_read(inode);
++ u64 lockstart;
++ u64 lockend;
++ u64 start;
++ u64 len;
+ int ret = 0;
+
+- lockend = max_t(u64, root->sectorsize, lockend);
++ if (inode->i_size == 0)
++ return -ENXIO;
++
++ /*
++ * *offset can be negative, in this case we start finding DATA/HOLE from
++ * the very start of the file.
++ */
++ start = max_t(loff_t, 0, *offset);
++
++ lockstart = round_down(start, root->sectorsize);
++ lockend = round_up(i_size_read(inode), root->sectorsize);
+ if (lockend <= lockstart)
+ lockend = lockstart + root->sectorsize;
+-
+ lockend--;
+ len = lockend - lockstart + 1;
+
+- len = max_t(u64, len, root->sectorsize);
+- if (inode->i_size == 0)
+- return -ENXIO;
+-
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
+ &cached_state);
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index c69c763..d68a725 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3596,7 +3596,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ * without delay
+ */
+ if (!btrfs_is_free_space_inode(inode)
+- && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
++ && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
++ && !root->fs_info->log_root_recovering) {
+ btrfs_update_root_times(trans, root);
+
+ ret = btrfs_delayed_update_inode(trans, root, inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index a6d8efa..0b72006 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -302,6 +302,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
+ goto out_drop;
+
+ } else {
++ ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
++ if (ret && ret != -ENODATA)
++ goto out_drop;
+ ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
+ }
+
+@@ -4750,6 +4753,12 @@ long btrfs_ioctl(struct file *file, unsigned int
+ if (ret)
+ return ret;
+ ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
++ /*
++ * The transaction thread may want to do more work,
++ * namely it pokes the cleaner ktread that will start
++ * processing uncleaned subvols.
++ */
++ wake_up_process(root->fs_info->transaction_kthread);
+ return ret;
+ }
+ case BTRFS_IOC_START_SYNC:
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 07b3b36..01f977e 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -736,7 +736,8 @@ again:
+ err = ret;
+ goto out;
+ }
+- BUG_ON(!ret || !path1->slots[0]);
++ ASSERT(ret);
++ ASSERT(path1->slots[0]);
+
+ path1->slots[0]--;
+
+@@ -746,10 +747,10 @@ again:
+ * the backref was added previously when processing
+ * backref of type BTRFS_TREE_BLOCK_REF_KEY
+ */
+- BUG_ON(!list_is_singular(&cur->upper));
++ ASSERT(list_is_singular(&cur->upper));
+ edge = list_entry(cur->upper.next, struct backref_edge,
+ list[LOWER]);
+- BUG_ON(!list_empty(&edge->list[UPPER]));
++ ASSERT(list_empty(&edge->list[UPPER]));
+ exist = edge->node[UPPER];
+ /*
+ * add the upper level block to pending list if we need
+@@ -831,7 +832,7 @@ again:
+ cur->cowonly = 1;
+ }
+ #else
+- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
++ ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
+ if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ #endif
+ if (key.objectid == key.offset) {
+@@ -840,7 +841,7 @@ again:
+ * backref of this type.
+ */
+ root = find_reloc_root(rc, cur->bytenr);
+- BUG_ON(!root);
++ ASSERT(root);
+ cur->root = root;
+ break;
+ }
+@@ -868,7 +869,7 @@ again:
+ } else {
+ upper = rb_entry(rb_node, struct backref_node,
+ rb_node);
+- BUG_ON(!upper->checked);
++ ASSERT(upper->checked);
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ }
+ list_add_tail(&edge->list[LOWER], &cur->upper);
+@@ -892,7 +893,7 @@ again:
+
+ if (btrfs_root_level(&root->root_item) == cur->level) {
+ /* tree root */
+- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
++ ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ cur->bytenr);
+ if (should_ignore_root(root))
+ list_add(&cur->list, &useless);
+@@ -927,7 +928,7 @@ again:
+ need_check = true;
+ for (; level < BTRFS_MAX_LEVEL; level++) {
+ if (!path2->nodes[level]) {
+- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
++ ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ lower->bytenr);
+ if (should_ignore_root(root))
+ list_add(&lower->list, &useless);
+@@ -976,12 +977,15 @@ again:
+ need_check = false;
+ list_add_tail(&edge->list[UPPER],
+ &list);
+- } else
++ } else {
++ if (upper->checked)
++ need_check = true;
+ INIT_LIST_HEAD(&edge->list[UPPER]);
++ }
+ } else {
+ upper = rb_entry(rb_node, struct backref_node,
+ rb_node);
+- BUG_ON(!upper->checked);
++ ASSERT(upper->checked);
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ if (!upper->owner)
+ upper->owner = btrfs_header_owner(eb);
+@@ -1025,7 +1029,7 @@ next:
+ * everything goes well, connect backref nodes and insert backref nodes
+ * into the cache.
+ */
+- BUG_ON(!node->checked);
++ ASSERT(node->checked);
+ cowonly = node->cowonly;
+ if (!cowonly) {
+ rb_node = tree_insert(&cache->rb_root, node->bytenr,
+@@ -1061,8 +1065,21 @@ next:
+ continue;
+ }
+
+- BUG_ON(!upper->checked);
+- BUG_ON(cowonly != upper->cowonly);
++ if (!upper->checked) {
++ /*
++ * Still want to blow up for developers since this is a
++ * logic bug.
++ */
++ ASSERT(0);
++ err = -EINVAL;
++ goto out;
++ }
++ if (cowonly != upper->cowonly) {
++ ASSERT(0);
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (!cowonly) {
+ rb_node = tree_insert(&cache->rb_root, upper->bytenr,
+ &upper->rb_node);
+@@ -1085,7 +1102,7 @@ next:
+ while (!list_empty(&useless)) {
+ upper = list_entry(useless.next, struct backref_node, list);
+ list_del_init(&upper->list);
+- BUG_ON(!list_empty(&upper->upper));
++ ASSERT(list_empty(&upper->upper));
+ if (upper == node)
+ node = NULL;
+ if (upper->lowest) {
+@@ -1118,29 +1135,45 @@ out:
+ if (err) {
+ while (!list_empty(&useless)) {
+ lower = list_entry(useless.next,
+- struct backref_node, upper);
+- list_del_init(&lower->upper);
++ struct backref_node, list);
++ list_del_init(&lower->list);
+ }
+- upper = node;
+- INIT_LIST_HEAD(&list);
+- while (upper) {
+- if (RB_EMPTY_NODE(&upper->rb_node)) {
+- list_splice_tail(&upper->upper, &list);
+- free_backref_node(cache, upper);
+- }
+-
+- if (list_empty(&list))
+- break;
+-
+- edge = list_entry(list.next, struct backref_edge,
+- list[LOWER]);
++ while (!list_empty(&list)) {
++ edge = list_first_entry(&list, struct backref_edge,
++ list[UPPER]);
++ list_del(&edge->list[UPPER]);
+ list_del(&edge->list[LOWER]);
++ lower = edge->node[LOWER];
+ upper = edge->node[UPPER];
+ free_backref_edge(cache, edge);
++
++ /*
++ * Lower is no longer linked to any upper backref nodes
++ * and isn't in the cache, we can free it ourselves.
++ */
++ if (list_empty(&lower->upper) &&
++ RB_EMPTY_NODE(&lower->rb_node))
++ list_add(&lower->list, &useless);
++
++ if (!RB_EMPTY_NODE(&upper->rb_node))
++ continue;
++
++ /* Add this guy's upper edges to the list to proces */
++ list_for_each_entry(edge, &upper->upper, list[LOWER])
++ list_add_tail(&edge->list[UPPER], &list);
++ if (list_empty(&upper->upper))
++ list_add(&upper->list, &useless);
++ }
++
++ while (!list_empty(&useless)) {
++ lower = list_entry(useless.next,
++ struct backref_node, list);
++ list_del_init(&lower->list);
++ free_backref_node(cache, lower);
+ }
+ return ERR_PTR(err);
+ }
+- BUG_ON(node && node->detached);
++ ASSERT(!node || !node->detached);
+ return node;
+ }
+
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index a65ed4c..20d7935 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -4728,7 +4728,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+
+ if (S_ISREG(sctx->cur_inode_mode)) {
+ if (need_send_hole(sctx)) {
+- if (sctx->cur_inode_last_extent == (u64)-1) {
++ if (sctx->cur_inode_last_extent == (u64)-1 ||
++ sctx->cur_inode_last_extent <
++ sctx->cur_inode_size) {
+ ret = get_last_extent(sctx, (u64)-1);
+ if (ret)
+ goto out;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index b05bf58..a0b65a0 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -592,7 +592,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ if (transid <= root->fs_info->last_trans_committed)
+ goto out;
+
+- ret = -EINVAL;
+ /* find specified transaction */
+ spin_lock(&root->fs_info->trans_lock);
+ list_for_each_entry(t, &root->fs_info->trans_list, list) {
+@@ -608,9 +607,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+ }
+ }
+ spin_unlock(&root->fs_info->trans_lock);
+- /* The specified transaction doesn't exist */
+- if (!cur_trans)
++
++ /*
++ * The specified transaction doesn't exist, or we
++ * raced with btrfs_commit_transaction
++ */
++ if (!cur_trans) {
++ if (transid > root->fs_info->last_trans_committed)
++ ret = -EINVAL;
+ goto out;
++ }
+ } else {
+ /* find newest transaction that is committing | committed */
+ spin_lock(&root->fs_info->trans_lock);
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index b167ca4..a85ceb7 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -1039,7 +1039,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ }
+
+ rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+- if (!rc)
++ if (!rc && dentry->d_inode)
+ fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
+ out:
+ return rc;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 75536db..c7d4a0a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1365,6 +1365,8 @@ static int do_umount(struct mount *mnt, int flags)
+ * Special case for "unmounting" root ...
+ * we just try to remount it readonly.
+ */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
+ down_write(&sb->s_umount);
+ if (!(sb->s_flags & MS_RDONLY))
+ retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2e9662e..da657b7 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7242,7 +7242,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+ int ret = 0;
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+- return 0;
++ return -EAGAIN;
+ task = _nfs41_proc_sequence(clp, cred, false);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
+index 1720d32..e1ba58c 100644
+--- a/fs/nfs/nfs4renewd.c
++++ b/fs/nfs/nfs4renewd.c
+@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
+ }
+ nfs_expire_all_delegations(clp);
+ } else {
++ int ret;
++
+ /* Queue an asynchronous RENEW. */
+- ops->sched_state_renewal(clp, cred, renew_flags);
++ ret = ops->sched_state_renewal(clp, cred, renew_flags);
+ put_rpccred(cred);
+- goto out_exp;
++ switch (ret) {
++ default:
++ goto out_exp;
++ case -EAGAIN:
++ case -ENOMEM:
++ break;
++ }
+ }
+ } else {
+ dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 27f5f85..b4f177f 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1732,7 +1732,8 @@ restart:
+ if (status < 0) {
+ set_bit(ops->owner_flag_bit, &sp->so_flags);
+ nfs4_put_state_owner(sp);
+- return nfs4_recovery_handle_error(clp, status);
++ status = nfs4_recovery_handle_error(clp, status);
++ return (status != 0) ? status : -EAGAIN;
+ }
+
+ nfs4_put_state_owner(sp);
+@@ -1741,7 +1742,7 @@ restart:
+ spin_unlock(&clp->cl_lock);
+ }
+ rcu_read_unlock();
+- return status;
++ return 0;
+ }
+
+ static int nfs4_check_lease(struct nfs_client *clp)
+@@ -1788,7 +1789,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+ break;
+ case -NFS4ERR_STALE_CLIENTID:
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+- nfs4_state_clear_reclaim_reboot(clp);
+ nfs4_state_start_reclaim_reboot(clp);
+ break;
+ case -NFS4ERR_CLID_INUSE:
+@@ -2370,6 +2370,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ status = nfs4_check_lease(clp);
+ if (status < 0)
+ goto out_error;
++ continue;
+ }
+
+ if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
+@@ -2391,14 +2392,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ section = "reclaim reboot";
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->reboot_recovery_ops);
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+- continue;
+- nfs4_state_end_reclaim_reboot(clp);
+- if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
++ if (status == -EAGAIN)
+ continue;
+ if (status < 0)
+ goto out_error;
++ nfs4_state_end_reclaim_reboot(clp);
+ }
+
+ /* Now recover expired state... */
+@@ -2406,9 +2404,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ section = "reclaim nograce";
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->nograce_recovery_ops);
+- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+- test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
++ if (status == -EAGAIN)
+ continue;
+ if (status < 0)
+ goto out_error;
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 287a22c..de6323e 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -71,7 +71,7 @@ static int create_fd(struct fsnotify_group *group,
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+- client_fd = get_unused_fd();
++ client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
+ if (client_fd < 0)
+ return client_fd;
+
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 5d2518b..0461fbe 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -434,10 +434,22 @@ xfs_start_page_writeback(
+ {
+ ASSERT(PageLocked(page));
+ ASSERT(!PageWriteback(page));
+- if (clear_dirty)
++
++ /*
++ * if the page was not fully cleaned, we need to ensure that the higher
++ * layers come back to it correctly. That means we need to keep the page
++ * dirty, and for WB_SYNC_ALL writeback we need to ensure the
++ * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
++ * write this page in this writeback sweep will be made.
++ */
++ if (clear_dirty) {
+ clear_page_dirty_for_io(page);
+- set_page_writeback(page);
++ set_page_writeback(page);
++ } else
++ set_page_writeback_keepwrite(page);
++
+ unlock_page(page);
++
+ /* If no buffers on the page are to be written, finish it here */
+ if (!buffers)
+ end_page_writeback(page);
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
+new file mode 100644
+index 0000000..cdd1cc2
+--- /dev/null
++++ b/include/linux/compiler-gcc5.h
+@@ -0,0 +1,66 @@
++#ifndef __LINUX_COMPILER_H
++#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
++#endif
++
++#define __used __attribute__((__used__))
++#define __must_check __attribute__((warn_unused_result))
++#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
++
++/* Mark functions as cold. gcc will assume any path leading to a call
++ to them will be unlikely. This means a lot of manual unlikely()s
++ are unnecessary now for any paths leading to the usual suspects
++ like BUG(), printk(), panic() etc. [but let's keep them for now for
++ older compilers]
++
++ Early snapshots of gcc 4.3 don't support this and we can't detect this
++ in the preprocessor, but we can live with this because they're unreleased.
++ Maketime probing would be overkill here.
++
++ gcc also has a __attribute__((__hot__)) to move hot functions into
++ a special section, but I don't see any sense in this right now in
++ the kernel context */
++#define __cold __attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++
++/*
++ * Mark a position in code as unreachable. This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased. Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone __attribute__((__noclone__))
++
++/*
++ * Tell the optimizer that something else uses this function or variable.
++ */
++#define __visible __attribute__((externally_visible))
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#define __HAVE_BUILTIN_BSWAP16__
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 97fbecd..057c1d8 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2551,6 +2551,7 @@
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
+ #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
+ #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+ #define PCI_DEVICE_ID_INTEL_I960 0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+ #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index d7ca410..218b058 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1876,11 +1876,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
+ #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+ #define used_math() tsk_used_math(current)
+
+-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
++/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
++ * __GFP_FS is also cleared as it implies __GFP_IO.
++ */
+ static inline gfp_t memalloc_noio_flags(gfp_t flags)
+ {
+ if (unlikely(current->flags & PF_MEMALLOC_NOIO))
+- flags &= ~__GFP_IO;
++ flags &= ~(__GFP_IO | __GFP_FS);
+ return flags;
+ }
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 0b0dc02..fda2950 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -329,6 +329,8 @@ static void get_futex_key_refs(union futex_key *key)
+ case FUT_OFF_MMSHARED:
+ futex_get_mm(key); /* implies MB (B) */
+ break;
++ default:
++ smp_mb(); /* explicit MB (B) */
+ }
+ }
+
+diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
+index 8563081..a1c387f 100644
+--- a/lib/lzo/lzo1x_decompress_safe.c
++++ b/lib/lzo/lzo1x_decompress_safe.c
+@@ -19,31 +19,21 @@
+ #include <linux/lzo.h>
+ #include "lzodefs.h"
+
+-#define HAVE_IP(t, x) \
+- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+- (((t + x) >= t) && ((t + x) >= x)))
++#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
++#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
++#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
++#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
++#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+
+-#define HAVE_OP(t, x) \
+- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+- (((t + x) >= t) && ((t + x) >= x)))
+-
+-#define NEED_IP(t, x) \
+- do { \
+- if (!HAVE_IP(t, x)) \
+- goto input_overrun; \
+- } while (0)
+-
+-#define NEED_OP(t, x) \
+- do { \
+- if (!HAVE_OP(t, x)) \
+- goto output_overrun; \
+- } while (0)
+-
+-#define TEST_LB(m_pos) \
+- do { \
+- if ((m_pos) < out) \
+- goto lookbehind_overrun; \
+- } while (0)
++/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
++ * count without overflowing an integer. The multiply will overflow when
++ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
++ * depending on the base count. Since the base count is taken from a u8
++ * and a few bits, it is safe to assume that it will always be lower than
++ * or equal to 2*255, thus we can always prevent any overflow by accepting
++ * two less 255 steps. See Documentation/lzo.txt for more information.
++ */
++#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
+
+ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ if (t < 16) {
+ if (likely(state == 0)) {
+ if (unlikely(t == 0)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 15 + *ip++;
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 15 + *ip++;
+ }
+ t += 3;
+ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
++ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ const unsigned char *ie = ip + t;
+ unsigned char *oe = op + t;
+ do {
+@@ -101,8 +98,8 @@ copy_literal_run:
+ } else
+ #endif
+ {
+- NEED_OP(t, 0);
+- NEED_IP(t, 3);
++ NEED_OP(t);
++ NEED_IP(t + 3);
+ do {
+ *op++ = *ip++;
+ } while (--t > 0);
+@@ -115,7 +112,7 @@ copy_literal_run:
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LB(m_pos);
+- NEED_OP(2, 0);
++ NEED_OP(2);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+@@ -136,13 +133,20 @@ copy_literal_run:
+ } else if (t >= 32) {
+ t = (t & 31) + (3 - 1);
+ if (unlikely(t == 2)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 31 + *ip++;
+- NEED_IP(2, 0);
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 31 + *ip++;
++ NEED_IP(2);
+ }
+ m_pos = op - 1;
+ next = get_unaligned_le16(ip);
+@@ -154,13 +158,20 @@ copy_literal_run:
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
++ size_t offset;
++ const unsigned char *ip_last = ip;
++
+ while (unlikely(*ip == 0)) {
+- t += 255;
+ ip++;
+- NEED_IP(1, 0);
++ NEED_IP(1);
+ }
+- t += 7 + *ip++;
+- NEED_IP(2, 0);
++ offset = ip - ip_last;
++ if (unlikely(offset > MAX_255_COUNT))
++ return LZO_E_ERROR;
++
++ offset = (offset << 8) - offset;
++ t += offset + 7 + *ip++;
++ NEED_IP(2);
+ }
+ next = get_unaligned_le16(ip);
+ ip += 2;
+@@ -174,7 +185,7 @@ copy_literal_run:
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (op - m_pos >= 8) {
+ unsigned char *oe = op + t;
+- if (likely(HAVE_OP(t, 15))) {
++ if (likely(HAVE_OP(t + 15))) {
+ do {
+ COPY8(op, m_pos);
+ op += 8;
+@@ -184,7 +195,7 @@ copy_literal_run:
+ m_pos += 8;
+ } while (op < oe);
+ op = oe;
+- if (HAVE_IP(6, 0)) {
++ if (HAVE_IP(6)) {
+ state = next;
+ COPY4(op, ip);
+ op += next;
+@@ -192,7 +203,7 @@ copy_literal_run:
+ continue;
+ }
+ } else {
+- NEED_OP(t, 0);
++ NEED_OP(t);
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+@@ -201,7 +212,7 @@ copy_literal_run:
+ #endif
+ {
+ unsigned char *oe = op + t;
+- NEED_OP(t, 0);
++ NEED_OP(t);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+@@ -214,15 +225,15 @@ match_next:
+ state = next;
+ t = next;
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
++ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ COPY4(op, ip);
+ op += t;
+ ip += t;
+ } else
+ #endif
+ {
+- NEED_IP(t, 3);
+- NEED_OP(t, 0);
++ NEED_IP(t + 3);
++ NEED_OP(t);
+ while (t > 0) {
+ *op++ = *ip++;
+ t--;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 6afa3b4..0007c9e 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2608,12 +2608,8 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
+
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+
+- pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
+-
+- pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+-
+ sdu_len = len;
+- pdu_len -= L2CAP_SDULEN_SIZE;
++ pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
+
+ while (len > 0) {
+ if (len <= pdu_len)
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 734e946..6df1b25 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -194,8 +194,11 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+ goto out;
+
+ cause = "missing-hash";
+- status =
+- (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
++ status = INTEGRITY_NOLABEL;
++ if (inode->i_size == 0) {
++ iint->flags |= IMA_NEW_FILE;
++ status = INTEGRITY_PASS;
++ }
+ goto out;
+ }
+
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 76d8aad..9f70efd 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -131,11 +131,13 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
+ return;
+
+ mutex_lock(&inode->i_mutex);
+- if (atomic_read(&inode->i_writecount) == 1 &&
+- iint->version != inode->i_version) {
+- iint->flags &= ~IMA_DONE_MASK;
+- if (iint->flags & IMA_APPRAISE)
+- ima_update_xattr(iint, file);
++ if (atomic_read(&inode->i_writecount) == 1) {
++ if ((iint->version != inode->i_version) ||
++ (iint->flags & IMA_NEW_FILE)) {
++ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
++ if (iint->flags & IMA_APPRAISE)
++ ima_update_xattr(iint, file);
++ }
+ }
+ mutex_unlock(&inode->i_mutex);
+ }
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index 33c0a70..2f8715d 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -31,6 +31,7 @@
+ #define IMA_DIGSIG 0x01000000
+ #define IMA_DIGSIG_REQUIRED 0x02000000
+ #define IMA_PERMIT_DIRECTIO 0x04000000
++#define IMA_NEW_FILE 0x08000000
+
+ #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_APPRAISE_SUBMASK)
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 01a5e05..566b0f6 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3189,7 +3189,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
+
+ #ifndef ARCH_HAS_DMA_MMAP_COHERENT
+ /* This should be defined / handled globally! */
+-#ifdef CONFIG_ARM
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ #define ARCH_HAS_DMA_MMAP_COHERENT
+ #endif
+ #endif
+diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
+index cae3659..0a34b5f 100644
+--- a/sound/pci/emu10k1/emu10k1_callback.c
++++ b/sound/pci/emu10k1/emu10k1_callback.c
+@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
+ * get more voice for pcm
+ *
+ * terminate most inactive voice and give it as a pcm voice.
++ *
++ * voice_lock is already held.
+ */
+ int
+ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ struct snd_emux *emu;
+ struct snd_emux_voice *vp;
+ struct best_voice best[V_END];
+- unsigned long flags;
+ int i;
+
+ emu = hw->synth;
+
+- spin_lock_irqsave(&emu->voice_lock, flags);
+ lookup_voices(emu, hw, best, 1); /* no OFF voices */
+ for (i = 0; i < V_END; i++) {
+ if (best[i].voice >= 0) {
+@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
+ vp->emu->num_voices--;
+ vp->ch = -1;
+ vp->state = SNDRV_EMUX_ST_OFF;
+- spin_unlock_irqrestore(&emu->voice_lock, flags);
+ return ch;
+ }
+ }
+- spin_unlock_irqrestore(&emu->voice_lock, flags);
+
+ /* not found */
+ return -ENOMEM;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d135c90..8253b48 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1557,19 +1557,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ }
+ }
+
+- if (pin_eld->eld_valid && !eld->eld_valid) {
+- update_eld = true;
++ if (pin_eld->eld_valid != eld->eld_valid)
+ eld_changed = true;
+- }
++
++ if (pin_eld->eld_valid && !eld->eld_valid)
++ update_eld = true;
++
+ if (update_eld) {
+ bool old_eld_valid = pin_eld->eld_valid;
+ pin_eld->eld_valid = eld->eld_valid;
+- eld_changed = pin_eld->eld_size != eld->eld_size ||
++ if (pin_eld->eld_size != eld->eld_size ||
+ memcmp(pin_eld->eld_buffer, eld->eld_buffer,
+- eld->eld_size) != 0;
+- if (eld_changed)
++ eld->eld_size) != 0) {
+ memcpy(pin_eld->eld_buffer, eld->eld_buffer,
+ eld->eld_size);
++ eld_changed = true;
++ }
+ pin_eld->eld_size = eld->eld_size;
+ pin_eld->info = eld->info;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5d0058b..4c826a4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2926,6 +2926,9 @@ static void alc283_shutup(struct hda_codec *codec)
+
+ alc_write_coef_idx(codec, 0x43, 0x9004);
+
++ /*depop hp during suspend*/
++ alc_write_coef_idx(codec, 0x06, 0x2100);
++
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 223c47b..c657752 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -385,6 +385,36 @@ YAMAHA_DEVICE(0x105d, NULL),
+ }
+ },
+ {
++ USB_DEVICE(0x0499, 0x1509),
++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ /* .vendor_name = "Yamaha", */
++ /* .product_name = "Steinberg UR22", */
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 3,
++ .type = QUIRK_MIDI_YAMAHA
++ },
++ {
++ .ifnum = 4,
++ .type = QUIRK_IGNORE_INTERFACE
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
++{
+ USB_DEVICE(0x0499, 0x150a),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "Yamaha", */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 03a0381..6611253 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -52,6 +52,7 @@
+
+ #include <asm/processor.h>
+ #include <asm/io.h>
++#include <asm/ioctl.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+
+@@ -95,8 +96,6 @@ static int hardware_enable_all(void);
+ static void hardware_disable_all(void);
+
+ static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+-static void update_memslots(struct kvm_memslots *slots,
+- struct kvm_memory_slot *new, u64 last_generation);
+
+ static void kvm_release_pfn_dirty(pfn_t pfn);
+ static void mark_page_dirty_in_slot(struct kvm *kvm,
+@@ -682,8 +681,7 @@ static void sort_memslots(struct kvm_memslots *slots)
+ }
+
+ static void update_memslots(struct kvm_memslots *slots,
+- struct kvm_memory_slot *new,
+- u64 last_generation)
++ struct kvm_memory_slot *new)
+ {
+ if (new) {
+ int id = new->id;
+@@ -694,8 +692,6 @@ static void update_memslots(struct kvm_memslots *slots,
+ if (new->npages != npages)
+ sort_memslots(slots);
+ }
+-
+- slots->generation = last_generation + 1;
+ }
+
+ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
+@@ -717,10 +713,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
+ {
+ struct kvm_memslots *old_memslots = kvm->memslots;
+
+- update_memslots(slots, new, kvm->memslots->generation);
++ /*
++ * Set the low bit in the generation, which disables SPTE caching
++ * until the end of synchronize_srcu_expedited.
++ */
++ WARN_ON(old_memslots->generation & 1);
++ slots->generation = old_memslots->generation + 1;
++
++ update_memslots(slots, new);
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+
++ /*
++ * Increment the new memslot generation a second time. This prevents
++ * vm exits that race with memslot updates from caching a memslot
++ * generation that will (potentially) be valid forever.
++ */
++ slots->generation++;
++
+ kvm_arch_memslots_updated(kvm);
+
+ return old_memslots;
+@@ -1970,6 +1980,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
+ if (vcpu->kvm->mm != current->mm)
+ return -EIO;
+
++ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
++ return -EINVAL;
++
+ #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
+ /*
+ * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
diff --git a/3.14.22/4420_grsecurity-3.0-3.14.22-201410250026.patch b/3.14.23/4420_grsecurity-3.0-3.14.23-201410312212.patch
index 9bb50c5..2b0f9bd 100644
--- a/3.14.22/4420_grsecurity-3.0-3.14.22-201410250026.patch
+++ b/3.14.23/4420_grsecurity-3.0-3.14.23-201410312212.patch
@@ -235,7 +235,7 @@ index b89a739..e289b9b 100644
+zconf.lex.c
zoffset.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 7116fda..d8ed6e8 100644
+index 7116fda..2f71588 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1084,6 +1084,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
@@ -260,7 +260,7 @@ index 7116fda..d8ed6e8 100644
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2347,6 +2355,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2347,6 +2355,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -270,8 +270,13 @@ index 7116fda..d8ed6e8 100644
+ page table updates on X86-64.
+
+ pax_sanitize_slab=
-+ 0/1 to disable/enable slab object sanitization (enabled by
-+ default).
++ Format: { 0 | 1 | off | fast | full }
++ Options '0' and '1' are only provided for backward
++ compatibility, 'off' or 'fast' should be used instead.
++ 0|off : disable slab object sanitization
++ 1|fast: enable slab object sanitization excluding
++ whitelisted slabs (default)
++ full : sanitize all slabs, even the whitelisted ones
+
+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
+
@@ -287,7 +292,7 @@ index 7116fda..d8ed6e8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index a59980e..46601e4 100644
+index 135a04a..79b5e32 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -892,10 +897,10 @@ index 4733d32..b142a40 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 62d2cb5..0d7f7f5 100644
+index 62d2cb5..26e43ca 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
-@@ -18,17 +18,35 @@
+@@ -18,17 +18,41 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
@@ -907,6 +912,12 @@ index 62d2cb5..0d7f7f5 100644
#ifdef __KERNEL__
++#ifdef CONFIG_THUMB2_KERNEL
++#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
++#else
++#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
++#endif
++
+#define _ASM_EXTABLE(from, to) \
+" .pushsection __ex_table,\"a\"\n"\
+" .align 3\n" \
@@ -931,7 +942,7 @@ index 62d2cb5..0d7f7f5 100644
#if __LINUX_ARM_ARCH__ >= 6
-@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
+@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v)
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
@@ -940,7 +951,7 @@ index 62d2cb5..0d7f7f5 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -968,7 +979,7 @@ index 62d2cb5..0d7f7f5 100644
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
-@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -62,6 +116,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
smp_mb();
__asm__ __volatile__("@ atomic_add_return\n"
@@ -978,7 +989,7 @@ index 62d2cb5..0d7f7f5 100644
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
+" mov %0, %1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1011,7 +1022,7 @@ index 62d2cb5..0d7f7f5 100644
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
-@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v)
+@@ -83,6 +173,36 @@ static inline void atomic_sub(int i, atomic_t *v)
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
@@ -1020,7 +1031,7 @@ index 62d2cb5..0d7f7f5 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1048,7 +1059,7 @@ index 62d2cb5..0d7f7f5 100644
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
" strex %1, %0, [%3]\n"
-@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+@@ -101,11 +221,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
smp_mb();
__asm__ __volatile__("@ atomic_sub_return\n"
@@ -1060,7 +1071,7 @@ index 62d2cb5..0d7f7f5 100644
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
+" mov %0, %1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1076,7 +1087,7 @@ index 62d2cb5..0d7f7f5 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
-@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+@@ -138,6 +272,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
@@ -1105,7 +1116,7 @@ index 62d2cb5..0d7f7f5 100644
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
-@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -156,7 +312,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val;
}
@@ -1123,7 +1134,7 @@ index 62d2cb5..0d7f7f5 100644
static inline int atomic_sub_return(int i, atomic_t *v)
{
-@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+@@ -171,6 +337,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
@@ -1134,7 +1145,7 @@ index 62d2cb5..0d7f7f5 100644
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
-@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -186,9 +356,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
@@ -1153,7 +1164,7 @@ index 62d2cb5..0d7f7f5 100644
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
-@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -201,11 +380,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc(v) atomic_add(1, v)
@@ -1181,7 +1192,7 @@ index 62d2cb5..0d7f7f5 100644
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-@@ -221,6 +410,14 @@ typedef struct {
+@@ -221,6 +416,14 @@ typedef struct {
long long counter;
} atomic64_t;
@@ -1196,7 +1207,7 @@ index 62d2cb5..0d7f7f5 100644
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -237,6 +440,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1216,7 +1227,7 @@ index 62d2cb5..0d7f7f5 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
-@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -245,6 +461,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
);
}
@@ -1232,7 +1243,7 @@ index 62d2cb5..0d7f7f5 100644
#else
static inline long long atomic64_read(const atomic64_t *v)
{
-@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -259,6 +484,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1252,7 +1263,7 @@ index 62d2cb5..0d7f7f5 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
long long tmp;
-@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -273,6 +511,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
: "cc");
}
@@ -1274,7 +1285,7 @@ index 62d2cb5..0d7f7f5 100644
#endif
static inline void atomic64_add(long long i, atomic64_t *v)
-@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
+@@ -284,6 +537,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
" adds %Q0, %Q0, %Q4\n"
@@ -1282,7 +1293,7 @@ index 62d2cb5..0d7f7f5 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1312,7 +1323,7 @@ index 62d2cb5..0d7f7f5 100644
" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
-@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+@@ -303,6 +587,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" adds %Q0, %Q0, %Q4\n"
@@ -1322,7 +1333,7 @@ index 62d2cb5..0d7f7f5 100644
+" bvc 3f\n"
+" mov %0, %1\n"
+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1357,7 +1368,7 @@ index 62d2cb5..0d7f7f5 100644
" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
-@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
+@@ -325,6 +647,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
@@ -1365,7 +1376,7 @@ index 62d2cb5..0d7f7f5 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1395,7 +1406,7 @@ index 62d2cb5..0d7f7f5 100644
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
-@@ -344,10 +691,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+@@ -344,10 +697,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
@@ -1406,7 +1417,7 @@ index 62d2cb5..0d7f7f5 100644
+" bvc 3f\n"
+" mov %0, %1\n"
+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1422,7 +1433,7 @@ index 62d2cb5..0d7f7f5 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
-@@ -382,6 +744,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+@@ -382,6 +750,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
return oldval;
}
@@ -1454,7 +1465,7 @@ index 62d2cb5..0d7f7f5 100644
static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
long long result;
-@@ -406,20 +793,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+@@ -406,20 +799,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long result;
@@ -1475,7 +1486,7 @@ index 62d2cb5..0d7f7f5 100644
+" bvc 3f\n"
+" mov %Q0, %Q1\n"
+" mov %R0, %R1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1495,7 +1506,7 @@ index 62d2cb5..0d7f7f5 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter)
: "cc");
-@@ -442,13 +843,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -442,13 +849,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teq %0, %5\n"
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
@@ -1507,7 +1518,7 @@ index 62d2cb5..0d7f7f5 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1524,7 +1535,7 @@ index 62d2cb5..0d7f7f5 100644
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -461,10 +874,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -461,10 +880,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
@@ -3932,7 +3943,7 @@ index 6eb97b3..ac509f6 100644
atomic64_set(&mm->context.id, asid);
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index eb8830a..5360ce7 100644
+index eb8830a..e8ff52e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,7 @@
@@ -4046,7 +4057,7 @@ index eb8830a..5360ce7 100644
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
-@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
ifsr_info[nr].name = name;
}
@@ -4126,9 +4137,15 @@ index eb8830a..5360ce7 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++#ifdef CONFIG_THUMB2_KERNEL
++ unsigned short bkpt;
++
++ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
++#else
+ unsigned int bkpt;
+
+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
++#endif
+ current->thread.error_code = ifsr;
+ current->thread.trap_no = 0;
+ pax_report_refcount_overflow(regs);
@@ -9862,20 +9879,6 @@ index 370ca1e..d4f4a98 100644
extern unsigned long sparc64_elf_hwcap;
#define ELF_HWCAP sparc64_elf_hwcap
-diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
-index a12dbe3..0337e85 100644
---- a/arch/sparc/include/asm/oplib_64.h
-+++ b/arch/sparc/include/asm/oplib_64.h
-@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
- /* You must call prom_init() before using any of the library services,
- * preferably as early as possible. Pass it the romvec pointer.
- */
--extern void prom_init(void *cif_handler, void *cif_stack);
-+void prom_init(void *cif_handler);
-+void prom_init_report(void);
-
- /* Boot argument acquisition, returns the boot command line string. */
- extern char *prom_getbootargs(void);
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 9b1c36d..209298b 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
@@ -9889,16 +9892,16 @@ index 9b1c36d..209298b 100644
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
-index bcfe063..b333142 100644
+index 2c8d41f..06b1206 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
-@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
}
- #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
+ #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
index 59ba6f6..4518128 100644
@@ -9968,21 +9971,6 @@ index 79da178..c2eede8 100644
#define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
SRMMU_DIRTY | SRMMU_REF)
-diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
-index 5e35e05..b1a29e9 100644
---- a/arch/sparc/include/asm/setup.h
-+++ b/arch/sparc/include/asm/setup.h
-@@ -24,6 +24,10 @@ static inline int con_is_present(void)
- }
- #endif
-
-+#ifdef CONFIG_SPARC64
-+void __init start_early_boot(void);
-+#endif
-+
- extern void sun_do_break(void);
- extern int stop_a_enabled;
- extern int scons_pwroff;
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 9689176..63c18ea 100644
--- a/arch/sparc/include/asm/spinlock_64.h
@@ -10098,22 +10086,19 @@ index 96efa7a..16858bf 100644
/*
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
-index a5f01ac..a8811dd 100644
+index cc6275c..7eb8e21 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
-@@ -63,7 +63,10 @@ struct thread_info {
+@@ -63,6 +63,8 @@ struct thread_info {
struct pt_regs *kern_una_regs;
unsigned int kern_una_insn;
-- unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ unsigned long lowest_stack;
+
-+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
-+ __attribute__ ((aligned(64)));
+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
+ __attribute__ ((aligned(64)));
};
-
- #endif /* !(__ASSEMBLY__) */
-@@ -188,12 +191,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
/* flag bit 4 is available */
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
@@ -10128,7 +10113,7 @@ index a5f01ac..a8811dd 100644
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
-@@ -213,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
+@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
@@ -10272,108 +10257,6 @@ index d15cc17..d0ae796 100644
extra-y := head_$(BITS).o
-diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
-index 140966f..620009d 100644
---- a/arch/sparc/kernel/entry.h
-+++ b/arch/sparc/kernel/entry.h
-@@ -66,13 +66,10 @@ struct pause_patch_entry {
- extern struct pause_patch_entry __pause_3insn_patch,
- __pause_3insn_patch_end;
-
--extern void __init per_cpu_patch(void);
--extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
-- struct sun4v_1insn_patch_entry *);
--extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
-- struct sun4v_2insn_patch_entry *);
--extern void __init sun4v_patch(void);
--extern void __init boot_cpu_id_too_large(int cpu);
-+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
-+ struct sun4v_1insn_patch_entry *);
-+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
-+ struct sun4v_2insn_patch_entry *);
- extern unsigned int dcache_parity_tl1_occurred;
- extern unsigned int icache_parity_tl1_occurred;
-
-diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
-index 452f04fe..fbea0ac 100644
---- a/arch/sparc/kernel/head_64.S
-+++ b/arch/sparc/kernel/head_64.S
-@@ -660,14 +660,12 @@ tlb_fixup_done:
- sethi %hi(init_thread_union), %g6
- or %g6, %lo(init_thread_union), %g6
- ldx [%g6 + TI_TASK], %g4
-- mov %sp, %l6
-
- wr %g0, ASI_P, %asi
- mov 1, %g1
- sllx %g1, THREAD_SHIFT, %g1
- sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
- add %g6, %g1, %sp
-- mov 0, %fp
-
- /* Set per-cpu pointer initially to zero, this makes
- * the boot-cpu use the in-kernel-image per-cpu areas
-@@ -694,44 +692,14 @@ tlb_fixup_done:
- nop
- #endif
-
-- mov %l6, %o1 ! OpenPROM stack
- call prom_init
- mov %l7, %o0 ! OpenPROM cif handler
-
-- /* Initialize current_thread_info()->cpu as early as possible.
-- * In order to do that accurately we have to patch up the get_cpuid()
-- * assembler sequences. And that, in turn, requires that we know
-- * if we are on a Starfire box or not. While we're here, patch up
-- * the sun4v sequences as well.
-+ /* To create a one-register-window buffer between the kernel's
-+ * initial stack and the last stack frame we use from the firmware,
-+ * do the rest of the boot from a C helper function.
- */
-- call check_if_starfire
-- nop
-- call per_cpu_patch
-- nop
-- call sun4v_patch
-- nop
--
--#ifdef CONFIG_SMP
-- call hard_smp_processor_id
-- nop
-- cmp %o0, NR_CPUS
-- blu,pt %xcc, 1f
-- nop
-- call boot_cpu_id_too_large
-- nop
-- /* Not reached... */
--
--1:
--#else
-- mov 0, %o0
--#endif
-- sth %o0, [%g6 + TI_CPU]
--
-- call prom_init_report
-- nop
--
-- /* Off we go.... */
-- call start_kernel
-+ call start_early_boot
- nop
- /* Not reached... */
-
-diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
-index b7ddcdd..cdbfec2 100644
---- a/arch/sparc/kernel/hvtramp.S
-+++ b/arch/sparc/kernel/hvtramp.S
-@@ -109,7 +109,6 @@ hv_cpu_startup:
- sllx %g5, THREAD_SHIFT, %g5
- sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
-- mov 0, %fp
-
- call init_irqwork_curcpu
- nop
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 510baec..9ff2607 100644
--- a/arch/sparc/kernel/process_32.c
@@ -10405,7 +10288,7 @@ index 510baec..9ff2607 100644
} while (++count < 16);
printk("\n");
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
-index d7b4967..2edf827 100644
+index c6f7113..9299700 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
@@ -10496,70 +10379,8 @@ index c13c9f2..d572c34 100644
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
-index 3fdb455..949f773 100644
---- a/arch/sparc/kernel/setup_64.c
-+++ b/arch/sparc/kernel/setup_64.c
-@@ -30,6 +30,7 @@
- #include <linux/cpu.h>
- #include <linux/initrd.h>
- #include <linux/module.h>
-+#include <linux/start_kernel.h>
-
- #include <asm/io.h>
- #include <asm/processor.h>
-@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
-
- static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-
--void __init per_cpu_patch(void)
-+static void __init per_cpu_patch(void)
- {
- struct cpuid_patch_entry *p;
- unsigned long ver;
-@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
- }
- }
-
--void __init sun4v_patch(void)
-+static void __init sun4v_patch(void)
- {
- extern void sun4v_hvapi_init(void);
-
-@@ -335,14 +336,25 @@ static void __init pause_patch(void)
- }
- }
-
--#ifdef CONFIG_SMP
--void __init boot_cpu_id_too_large(int cpu)
-+void __init start_early_boot(void)
- {
-- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
-- cpu, NR_CPUS);
-- prom_halt();
-+ int cpu;
-+
-+ check_if_starfire();
-+ per_cpu_patch();
-+ sun4v_patch();
-+
-+ cpu = hard_smp_processor_id();
-+ if (cpu >= NR_CPUS) {
-+ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
-+ cpu, NR_CPUS);
-+ prom_halt();
-+ }
-+ current_thread_info()->cpu = cpu;
-+
-+ prom_init_report();
-+ start_kernel();
- }
--#endif
-
- /* On Ultra, we support all of the v8 capabilities. */
- unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
-index 8416d7f..f83823c 100644
+index 50c3dd03..adff164 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -10835,36 +10656,6 @@ index 33a17e7..d87fb1f 100644
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
2:
-diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
-index 737f8cb..88ede1d 100644
---- a/arch/sparc/kernel/trampoline_64.S
-+++ b/arch/sparc/kernel/trampoline_64.S
-@@ -109,10 +109,13 @@ startup_continue:
- brnz,pn %g1, 1b
- nop
-
-- sethi %hi(p1275buf), %g2
-- or %g2, %lo(p1275buf), %g2
-- ldx [%g2 + 0x10], %l2
-- add %l2, -(192 + 128), %sp
-+ /* Get onto temporary stack which will be in the locked
-+ * kernel image.
-+ */
-+ sethi %hi(tramp_stack), %g1
-+ or %g1, %lo(tramp_stack), %g1
-+ add %g1, TRAMP_STACK_SIZE, %g1
-+ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
- flushw
-
- /* Setup the loop variables:
-@@ -394,7 +397,6 @@ after_lock_tlb:
- sllx %g5, THREAD_SHIFT, %g5
- sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
-- mov 0, %fp
-
- rdpr %pstate, %o1
- or %o1, PSTATE_IE, %o1
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 6629829..036032d 100644
--- a/arch/sparc/kernel/traps_32.c
@@ -10899,7 +10690,7 @@ index 6629829..036032d 100644
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index 4ced92f..965eeed 100644
+index 25d0c7e..b571456 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
@@ -11017,8 +10808,8 @@ index 4ced92f..965eeed 100644
+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
}
- unsigned long sun4v_err_itlb_vaddr;
-@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ static void sun4v_tlb_error(struct pt_regs *regs)
+@@ -2118,9 +2129,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -11030,7 +10821,7 @@ index 4ced92f..965eeed 100644
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
-@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+@@ -2141,9 +2152,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -11042,7 +10833,7 @@ index 4ced92f..965eeed 100644
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
-@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = (unsigned long)sf->fp + STACK_BIAS;
}
@@ -11058,7 +10849,7 @@ index 4ced92f..965eeed 100644
graph++;
}
}
-@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
return (struct reg_window *) (fp + STACK_BIAS);
}
@@ -11067,7 +10858,7 @@ index 4ced92f..965eeed 100644
void die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
-@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+@@ -2414,7 +2427,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
while (rw &&
count++ < 30 &&
kstack_valid(tp, (unsigned long) rw)) {
@@ -11076,7 +10867,7 @@ index 4ced92f..965eeed 100644
(void *) rw->ins[7]);
rw = kernel_stack_up(rw);
-@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+@@ -2427,8 +2440,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
}
user_instruction_dump ((unsigned int __user *) regs->tpc);
}
@@ -11682,7 +11473,7 @@ index 59dbd46..1dd7f5e 100644
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 4ced3fc..234f1e4 100644
+index 45a413e..fff0231 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -22,6 +22,9 @@
@@ -12171,7 +11962,7 @@ index 4ced3fc..234f1e4 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
-@@ -352,6 +815,29 @@ retry:
+@@ -355,6 +818,29 @@ retry:
if (!vma)
goto bad_area;
@@ -12201,47 +11992,6 @@ index 4ced3fc..234f1e4 100644
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
-diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
-index 1aed043..ae6ce38 100644
---- a/arch/sparc/mm/gup.c
-+++ b/arch/sparc/mm/gup.c
-@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- return 1;
- }
-
-+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
-+ struct page **pages)
-+{
-+ struct mm_struct *mm = current->mm;
-+ unsigned long addr, len, end;
-+ unsigned long next, flags;
-+ pgd_t *pgdp;
-+ int nr = 0;
-+
-+ start &= PAGE_MASK;
-+ addr = start;
-+ len = (unsigned long) nr_pages << PAGE_SHIFT;
-+ end = start + len;
-+
-+ local_irq_save(flags);
-+ pgdp = pgd_offset(mm, addr);
-+ do {
-+ pgd_t pgd = *pgdp;
-+
-+ next = pgd_addr_end(addr, end);
-+ if (pgd_none(pgd))
-+ break;
-+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
-+ break;
-+ } while (pgdp++, addr = next, addr != end);
-+ local_irq_restore(flags);
-+
-+ return nr;
-+}
-+
- int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
- {
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d329537..2c3746a 100644
--- a/arch/sparc/mm/hugetlbpage.c
@@ -12349,10 +12099,10 @@ index d329537..2c3746a 100644
pte_t *huge_pte_alloc(struct mm_struct *mm,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index 9686224..dfbdb10 100644
+index 34506f2..0621e68 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
-@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+@@ -184,9 +184,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int num_kernel_image_mappings;
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -12364,7 +12114,7 @@ index 9686224..dfbdb10 100644
#endif
#endif
-@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
+@@ -194,7 +194,7 @@ inline void flush_dcache_page_impl(struct page *page)
{
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -12373,7 +12123,7 @@ index 9686224..dfbdb10 100644
#endif
#ifdef DCACHE_ALIASING_POSSIBLE
-@@ -470,10 +470,10 @@ void mmu_info(struct seq_file *m)
+@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
#ifdef CONFIG_DEBUG_DCFLUSH
seq_printf(m, "DCPageFlushes\t: %d\n",
@@ -12386,63 +12136,6 @@ index 9686224..dfbdb10 100644
#endif /* CONFIG_SMP */
#endif /* CONFIG_DEBUG_DCFLUSH */
}
-diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
-index 9c86b4b..8050f38 100644
---- a/arch/sparc/prom/cif.S
-+++ b/arch/sparc/prom/cif.S
-@@ -11,11 +11,10 @@
- .text
- .globl prom_cif_direct
- prom_cif_direct:
-+ save %sp, -192, %sp
- sethi %hi(p1275buf), %o1
- or %o1, %lo(p1275buf), %o1
-- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
-- save %o2, -192, %sp
-- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
-+ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
- mov %g4, %l0
- mov %g5, %l1
- mov %g6, %l3
-diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
-index d95db75..110b0d7 100644
---- a/arch/sparc/prom/init_64.c
-+++ b/arch/sparc/prom/init_64.c
-@@ -26,13 +26,13 @@ phandle prom_chosen_node;
- * It gets passed the pointer to the PROM vector.
- */
-
--extern void prom_cif_init(void *, void *);
-+extern void prom_cif_init(void *);
-
--void __init prom_init(void *cif_handler, void *cif_stack)
-+void __init prom_init(void *cif_handler)
- {
- phandle node;
-
-- prom_cif_init(cif_handler, cif_stack);
-+ prom_cif_init(cif_handler);
-
- prom_chosen_node = prom_finddevice(prom_chosen_path);
- if (!prom_chosen_node || (s32)prom_chosen_node == -1)
-diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
-index e58b817..c27c30e4 100644
---- a/arch/sparc/prom/p1275.c
-+++ b/arch/sparc/prom/p1275.c
-@@ -19,7 +19,6 @@
- struct {
- long prom_callback; /* 0x00 */
- void (*prom_cif_handler)(long *); /* 0x08 */
-- unsigned long prom_cif_stack; /* 0x10 */
- } p1275buf;
-
- extern void prom_world(int);
-@@ -51,5 +50,4 @@ void p1275_cmd_direct(unsigned long *args)
- void prom_cif_init(void *cif_handler, void *cif_stack)
- {
- p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
-- p1275buf.prom_cif_stack = (unsigned long)cif_stack;
- }
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index b3692ce..e4517c9 100644
--- a/arch/tile/Kconfig
@@ -34445,7 +34138,7 @@ index 0149575..f746de8 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index af2d431..3cf24f0b 100644
+index af2d431..bc63cba 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
@@ -34737,7 +34430,7 @@ index af2d431..3cf24f0b 100644
fp->bpf_func = (void *)image;
}
out:
-@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
+@@ -782,10 +887,8 @@ static void bpf_jit_free_deferred(struct work_struct *work)
{
struct sk_filter *fp = container_of(work, struct sk_filter, work);
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
@@ -34745,7 +34438,6 @@ index af2d431..3cf24f0b 100644
- set_memory_rw(addr, header->pages);
- module_free(NULL, header);
-+ set_memory_rw(addr, 1);
+ module_free_exec(NULL, (void *)addr);
kfree(fp);
}
@@ -42599,12 +42291,12 @@ index cedc6da..2c3da2a 100644
if (atomic_read(&uhid->report_done))
goto unlock;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index 69ea36f..8dbf4bb 100644
+index e99e71a..e4ae549 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
-@@ -364,8 +364,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ unsigned long flags;
int ret = 0;
- int t;
- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
- atomic_inc(&vmbus_connection.next_gpadl_handle);
@@ -49605,7 +49297,7 @@ index fb02fc2..83dc2c3 100644
kfree(msi_dev_attr);
++count;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
-index 276ef9c..1d33a36 100644
+index 39a207a..d1ec78a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1112,7 +1112,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
@@ -51373,7 +51065,7 @@ index 1f42662..bf9836c 100644
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index 83cb612..9b7b08c 100644
+index 23c1b0c..e8035ca 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1491,8 +1491,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
@@ -59165,10 +58857,10 @@ index f70119f..ab5894d 100644
spin_lock_init(&delayed_root->lock);
init_waitqueue_head(&delayed_root->wait);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index a6d8efa..2f062cf 100644
+index 0b72006..264c7de 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
-@@ -3491,9 +3491,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+@@ -3494,9 +3494,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
@@ -59181,7 +58873,7 @@ index a6d8efa..2f062cf 100644
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
-@@ -3515,10 +3518,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+@@ -3518,10 +3521,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
memcpy(dest, &space, sizeof(space));
dest++;
space_args.total_spaces++;
@@ -60274,7 +59966,7 @@ index a93f7e6..d58bcbe 100644
return 0;
while (nr) {
diff --git a/fs/dcache.c b/fs/dcache.c
-index 58d57da..f91b141 100644
+index 58d57da..a3f889f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -250,7 +250,7 @@ static void __d_free(struct rcu_head *head)
@@ -60351,7 +60043,15 @@ index 58d57da..f91b141 100644
d_lru_isolate(dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
-@@ -1268,7 +1268,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+@@ -1159,6 +1159,7 @@ out_unlock:
+ return;
+
+ rename_retry:
++ done_seqretry(&rename_lock, seq);
+ if (!retry)
+ return;
+ seq = 1;
+@@ -1268,7 +1269,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
* loop in shrink_dcache_parent() might not make any progress
* and loop forever.
*/
@@ -60360,7 +60060,7 @@ index 58d57da..f91b141 100644
dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
/*
-@@ -1322,11 +1322,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+@@ -1322,11 +1323,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
struct select_data *data = _data;
enum d_walk_ret ret = D_WALK_CONTINUE;
@@ -60374,7 +60074,7 @@ index 58d57da..f91b141 100644
goto out;
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
-@@ -1336,7 +1336,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+@@ -1336,7 +1337,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry->d_name.name,
@@ -60383,7 +60083,7 @@ index 58d57da..f91b141 100644
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
BUG();
-@@ -1494,7 +1494,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1494,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
*/
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
if (name->len > DNAME_INLINE_LEN-1) {
@@ -60392,7 +60092,7 @@ index 58d57da..f91b141 100644
if (!dname) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
-@@ -1512,7 +1512,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1512,7 +1513,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb();
dentry->d_name.name = dname;
@@ -60401,7 +60101,7 @@ index 58d57da..f91b141 100644
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
-@@ -2275,7 +2275,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
+@@ -2275,7 +2276,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next;
}
@@ -60410,7 +60110,7 @@ index 58d57da..f91b141 100644
found = dentry;
spin_unlock(&dentry->d_lock);
break;
-@@ -2374,7 +2374,7 @@ again:
+@@ -2374,7 +2375,7 @@ again:
spin_lock(&dentry->d_lock);
inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode);
@@ -60419,7 +60119,7 @@ index 58d57da..f91b141 100644
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
-@@ -3313,7 +3313,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+@@ -3313,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
@@ -60428,7 +60128,7 @@ index 58d57da..f91b141 100644
}
}
return D_WALK_CONTINUE;
-@@ -3429,7 +3429,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3429,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -60455,7 +60155,7 @@ index 1576195..49a19ae 100644
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index b167ca4..a224e19 100644
+index a85ceb7..5097313b 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
@@ -64199,10 +63899,10 @@ index dd2f2c5..27e6c48 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index 75536db..7ec079e 100644
+index c7d4a0a..93207ab 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -1369,6 +1369,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1371,6 +1371,9 @@ static int do_umount(struct mount *mnt, int flags)
if (!(sb->s_flags & MS_RDONLY))
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
up_write(&sb->s_umount);
@@ -64212,7 +63912,7 @@ index 75536db..7ec079e 100644
return retval;
}
-@@ -1391,6 +1394,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1393,6 +1396,9 @@ static int do_umount(struct mount *mnt, int flags)
}
unlock_mount_hash();
namespace_unlock();
@@ -64222,7 +63922,7 @@ index 75536db..7ec079e 100644
return retval;
}
-@@ -1410,7 +1416,7 @@ static inline bool may_mount(void)
+@@ -1412,7 +1418,7 @@ static inline bool may_mount(void)
* unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
*/
@@ -64231,7 +63931,7 @@ index 75536db..7ec079e 100644
{
struct path path;
struct mount *mnt;
-@@ -1452,7 +1458,7 @@ out:
+@@ -1454,7 +1460,7 @@ out:
/*
* The 2.0 compatible umount. No flags.
*/
@@ -64240,7 +63940,7 @@ index 75536db..7ec079e 100644
{
return sys_umount(name, 0);
}
-@@ -2501,6 +2507,16 @@ long do_mount(const char *dev_name, const char *dir_name,
+@@ -2503,6 +2509,16 @@ long do_mount(const char *dev_name, const char *dir_name,
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
@@ -64257,7 +63957,7 @@ index 75536db..7ec079e 100644
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
data_page);
-@@ -2515,6 +2531,9 @@ long do_mount(const char *dev_name, const char *dir_name,
+@@ -2517,6 +2533,9 @@ long do_mount(const char *dev_name, const char *dir_name,
dev_name, data_page);
dput_out:
path_put(&path);
@@ -64267,7 +63967,7 @@ index 75536db..7ec079e 100644
return retval;
}
-@@ -2532,7 +2551,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+@@ -2534,7 +2553,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
* number incrementing at 10Ghz will take 12,427 years to wrap which
* is effectively never, so we can ignore the possibility.
*/
@@ -64276,7 +63976,7 @@ index 75536db..7ec079e 100644
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
-@@ -2547,7 +2566,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2549,7 +2568,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
kfree(new_ns);
return ERR_PTR(ret);
}
@@ -64285,7 +63985,7 @@ index 75536db..7ec079e 100644
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
-@@ -2557,7 +2576,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2559,7 +2578,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
@@ -64294,7 +63994,7 @@ index 75536db..7ec079e 100644
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
-@@ -2678,8 +2697,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+@@ -2680,8 +2699,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
@@ -64305,7 +64005,7 @@ index 75536db..7ec079e 100644
{
int ret;
char *kernel_type;
-@@ -2792,6 +2811,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2794,6 +2813,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (error)
goto out2;
@@ -64317,7 +64017,7 @@ index 75536db..7ec079e 100644
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -2829,6 +2853,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2831,6 +2855,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make sure we can reach put_old from new_root */
if (!is_path_reachable(old_mnt, old.dentry, &new))
goto out4;
@@ -64327,7 +64027,7 @@ index 75536db..7ec079e 100644
root_mp->m_count++; /* pin it so it won't go away */
lock_mount_hash();
detach_mnt(new_mnt, &parent_path);
-@@ -3060,7 +3087,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
+@@ -3062,7 +3089,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -64570,7 +64270,7 @@ index a80a741..7b96e1b 100644
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
-index 287a22c..4e56e4e 100644
+index de6323e..4931489 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
@@ -64702,6 +64402,19 @@ index 0440134..d52c93a 100644
bail:
if (handle)
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index feed025f..cee9402 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -158,7 +158,7 @@ bail_add:
+ * NOTE: This dentry already has ->d_op set from
+ * ocfs2_get_parent() and ocfs2_get_dentry()
+ */
+- if (ret)
++ if (!IS_ERR_OR_NULL(ret))
+ dentry = ret;
+
+ status = ocfs2_dentry_attach_lock(dentry, inode,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 553f53c..aaf5133 100644
--- a/fs/ocfs2/ocfs2.h
@@ -84262,7 +83975,7 @@ index a964f72..b475afb 100644
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index d7ca410..8b39a0c 100644
+index 218b058..1ce7ad0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -133,6 +133,7 @@ struct fs_struct;
@@ -84508,7 +84221,7 @@ index d7ca410..8b39a0c 100644
{
return tsk->pid;
}
-@@ -2013,6 +2127,25 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2015,6 +2129,25 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
@@ -84534,7 +84247,7 @@ index d7ca410..8b39a0c 100644
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
-@@ -2137,7 +2270,9 @@ void yield(void);
+@@ -2139,7 +2272,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -84544,7 +84257,7 @@ index d7ca410..8b39a0c 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2170,6 +2305,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2172,6 +2307,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -84552,7 +84265,7 @@ index d7ca410..8b39a0c 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2332,7 +2468,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2334,7 +2470,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -84561,7 +84274,7 @@ index d7ca410..8b39a0c 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2533,9 +2669,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2535,9 +2671,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -89096,7 +88809,7 @@ index e2c6853..9a6397e 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 0b0dc02..5f3eb62 100644
+index fda2950..5f3eb62 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -89125,16 +88838,7 @@ index 0b0dc02..5f3eb62 100644
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
-@@ -329,6 +330,8 @@ static void get_futex_key_refs(union futex_key *key)
- case FUT_OFF_MMSHARED:
- futex_get_mm(key); /* implies MB (B) */
- break;
-+ default:
-+ smp_mb(); /* explicit MB (B) */
- }
- }
-
-@@ -380,6 +383,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+@@ -382,6 +383,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
struct page *page, *page_head;
int err, ro = 0;
@@ -89146,7 +88850,7 @@ index 0b0dc02..5f3eb62 100644
/*
* The futex address must be "naturally" aligned.
*/
-@@ -579,7 +587,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+@@ -581,7 +587,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
@@ -89155,7 +88859,7 @@ index 0b0dc02..5f3eb62 100644
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -3020,6 +3028,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3022,6 +3028,7 @@ static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
@@ -89163,7 +88867,7 @@ index 0b0dc02..5f3eb62 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -3031,8 +3040,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3033,8 +3040,11 @@ static void __init futex_detect_cmpxchg(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -93969,10 +93673,32 @@ index 114d1be..ab0350c 100644
(val << avg->factor)) >> avg->weight :
(val << avg->factor);
diff --git a/lib/bitmap.c b/lib/bitmap.c
-index 06f7e4f..f3cf2b0 100644
+index 06f7e4f..9078e42 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
-@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
+ lower = src[off + k];
+ if (left && off + k == lim - 1)
+ lower &= mask;
+- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
++ dst[k] = lower >> rem;
++ if (rem)
++ dst[k] |= upper << (BITS_PER_LONG - rem);
+ if (left && k == lim - 1)
+ dst[k] &= mask;
+ }
+@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
+ upper = src[k];
+ if (left && k == lim - 1)
+ upper &= (1UL << left) - 1;
+- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
++ dst[k + off] = upper << rem;
++ if (rem)
++ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
+ if (left && k + off == lim - 1)
+ dst[k + off] &= (1UL << left) - 1;
+ }
+@@ -422,7 +426,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
@@ -93981,7 +93707,7 @@ index 06f7e4f..f3cf2b0 100644
bitmap_zero(maskp, nmaskbits);
-@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
+@@ -507,7 +511,7 @@ int bitmap_parse_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -93990,7 +93716,7 @@ index 06f7e4f..f3cf2b0 100644
ulen, 1, maskp, nmaskbits);
}
-@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+@@ -598,7 +602,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
{
unsigned a, b;
int c, old_c, totaldigits;
@@ -93999,7 +93725,7 @@ index 06f7e4f..f3cf2b0 100644
int exp_digit, in_range;
totaldigits = c = 0;
-@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+@@ -698,7 +702,7 @@ int bitmap_parselist_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -95359,7 +95085,7 @@ index 33365e9..2234ef9 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 492e36f..55613ed 100644
+index 492e36f..b153792 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -95409,7 +95135,19 @@ index 492e36f..55613ed 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-@@ -1636,12 +1642,6 @@ no_page_table:
+@@ -1137,8 +1143,10 @@ again:
+ if (unlikely(page_mapcount(page) < 0))
+ print_bad_pte(vma, addr, ptent, page);
+ force_flush = !__tlb_remove_page(tlb, page);
+- if (force_flush)
++ if (force_flush) {
++ addr += PAGE_SIZE;
+ break;
++ }
+ continue;
+ }
+ /*
+@@ -1636,12 +1644,6 @@ no_page_table:
return page;
}
@@ -95422,7 +95160,7 @@ index 492e36f..55613ed 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1728,10 +1728,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1728,10 +1730,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i = 0;
@@ -95435,7 +95173,7 @@ index 492e36f..55613ed 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1780,7 +1780,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1780,7 +1782,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -95444,7 +95182,7 @@ index 492e36f..55613ed 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1809,11 +1809,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1809,11 +1811,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -95456,7 +95194,7 @@ index 492e36f..55613ed 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1893,7 +1888,7 @@ next_page:
+@@ -1893,7 +1890,7 @@ next_page:
start += page_increm * PAGE_SIZE;
nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
@@ -95465,7 +95203,7 @@ index 492e36f..55613ed 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2105,6 +2100,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2105,6 +2102,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -95476,7 +95214,7 @@ index 492e36f..55613ed 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2149,9 +2148,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2149,9 +2150,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -95498,7 +95236,7 @@ index 492e36f..55613ed 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2234,6 +2245,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2234,6 +2247,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -95506,7 +95244,7 @@ index 492e36f..55613ed 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2481,7 +2493,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2481,7 +2495,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -95517,7 +95255,7 @@ index 492e36f..55613ed 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2501,7 +2515,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2501,7 +2517,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -95528,7 +95266,7 @@ index 492e36f..55613ed 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2591,6 +2607,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2591,6 +2609,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -95715,7 +95453,7 @@ index 492e36f..55613ed 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2815,6 +3011,12 @@ gotten:
+@@ -2815,6 +3013,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -95728,7 +95466,7 @@ index 492e36f..55613ed 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2866,6 +3068,10 @@ gotten:
+@@ -2866,6 +3070,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -95739,7 +95477,7 @@ index 492e36f..55613ed 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3143,6 +3349,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3143,6 +3351,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -95751,7 +95489,7 @@ index 492e36f..55613ed 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -3166,6 +3377,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3166,6 +3379,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -95763,7 +95501,7 @@ index 492e36f..55613ed 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3185,40 +3401,6 @@ out_release:
+@@ -3185,40 +3403,6 @@ out_release:
}
/*
@@ -95804,7 +95542,7 @@ index 492e36f..55613ed 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3227,27 +3409,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3227,27 +3411,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -95837,7 +95575,7 @@ index 492e36f..55613ed 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3271,6 +3449,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3271,6 +3451,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -95849,7 +95587,7 @@ index 492e36f..55613ed 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3278,6 +3461,12 @@ setpte:
+@@ -3278,6 +3463,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -95862,7 +95600,7 @@ index 492e36f..55613ed 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3422,6 +3611,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3422,6 +3613,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -95875,7 +95613,7 @@ index 492e36f..55613ed 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3443,6 +3638,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3443,6 +3640,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -95890,7 +95628,7 @@ index 492e36f..55613ed 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3690,6 +3893,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+@@ -3690,6 +3895,12 @@ static int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -95903,7 +95641,7 @@ index 492e36f..55613ed 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3706,9 +3915,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3706,9 +3917,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -95945,7 +95683,7 @@ index 492e36f..55613ed 100644
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3836,6 +4077,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3836,6 +4079,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -95969,7 +95707,7 @@ index 492e36f..55613ed 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3866,6 +4124,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3866,6 +4126,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -96000,7 +95738,7 @@ index 492e36f..55613ed 100644
#endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3879,7 +4161,7 @@ static int __init gate_vma_init(void)
+@@ -3879,7 +4163,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -96009,7 +95747,7 @@ index 492e36f..55613ed 100644
return 0;
}
-@@ -4013,8 +4295,8 @@ out:
+@@ -4013,8 +4297,8 @@ out:
return ret;
}
@@ -96020,7 +95758,7 @@ index 492e36f..55613ed 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -4040,8 +4322,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -4040,8 +4324,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -96031,7 +95769,7 @@ index 492e36f..55613ed 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -4049,7 +4331,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4049,7 +4333,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -96040,7 +95778,7 @@ index 492e36f..55613ed 100644
void *maddr;
struct page *page = NULL;
-@@ -4108,8 +4390,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4108,8 +4392,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -96051,7 +95789,7 @@ index 492e36f..55613ed 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -4119,11 +4401,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4119,11 +4403,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -98268,7 +98006,7 @@ index f0d698b..7037c25 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 0b1c2a5..819c6bc 100644
+index 0b1c2a5..4deff8e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -98321,29 +98059,28 @@ index 0b1c2a5..819c6bc 100644
slab_early_init = 0;
-@@ -3477,6 +3481,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -3477,6 +3481,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab) {
-+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
-+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
++ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
++ STATS_INC_NOT_SANITIZED(cachep);
++ else {
++ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
+
-+ if (cachep->ctor)
-+ cachep->ctor(objp);
++ if (cachep->ctor)
++ cachep->ctor(objp);
+
-+ STATS_INC_SANITIZED(cachep);
-+ } else
-+ STATS_INC_NOT_SANITIZED(cachep);
++ STATS_INC_SANITIZED(cachep);
+ }
+#endif
+
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3705,6 +3724,7 @@ void kfree(const void *objp)
+@@ -3705,6 +3723,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -98351,7 +98088,7 @@ index 0b1c2a5..819c6bc 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4146,14 +4166,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+@@ -4146,14 +4165,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
}
/* cpu stats */
{
@@ -98378,7 +98115,7 @@ index 0b1c2a5..819c6bc 100644
#endif
}
-@@ -4374,13 +4402,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4374,13 +4401,69 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -98450,10 +98187,10 @@ index 0b1c2a5..819c6bc 100644
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slab.h b/mm/slab.h
-index 8184a7c..ab27737 100644
+index 8184a7c..81ed62c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
+@@ -32,6 +32,20 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
@@ -98463,13 +98200,18 @@ index 8184a7c..ab27737 100644
+#else
+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
+#endif
-+extern bool pax_sanitize_slab;
++enum pax_sanitize_mode {
++ PAX_SANITIZE_SLAB_OFF = 0,
++ PAX_SANITIZE_SLAB_FAST,
++ PAX_SANITIZE_SLAB_FULL,
++};
++extern enum pax_sanitize_mode pax_sanitize_slab;
+#endif
+
unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size);
-@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
+@@ -67,7 +81,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
@@ -98479,7 +98221,7 @@ index 8184a7c..ab27737 100644
#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-@@ -257,6 +267,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+@@ -257,6 +272,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return s;
page = virt_to_head_page(x);
@@ -98490,10 +98232,10 @@ index 8184a7c..ab27737 100644
if (slab_equal_or_root(cachep, s))
return cachep;
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index f149e67..b366f92 100644
+index f149e67..97a8b4b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
-@@ -23,11 +23,22 @@
+@@ -23,11 +23,35 @@
#include "slab.h"
@@ -98504,20 +98246,47 @@ index f149e67..b366f92 100644
struct kmem_cache *kmem_cache;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+bool pax_sanitize_slab __read_only = true;
++enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
+static int __init pax_sanitize_slab_setup(char *str)
+{
-+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
-+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
-+ return 1;
++ if (!str)
++ return 0;
++
++ if (!strcmp(str, "0") || !strcmp(str, "off")) {
++ pr_info("PaX slab sanitization: %s\n", "disabled");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
++ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
++ pr_info("PaX slab sanitization: %s\n", "fast");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
++ } else if (!strcmp(str, "full")) {
++ pr_info("PaX slab sanitization: %s\n", "full");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
++ } else
++ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
++
++ return 0;
+}
-+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
++early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
+#endif
+
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
size_t size)
-@@ -225,7 +236,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
+@@ -200,6 +224,13 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
+ */
+ flags &= CACHE_CREATE_MASK;
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_NO_SANITIZE;
++ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++ flags &= ~SLAB_NO_SANITIZE;
++#endif
++
+ s = __kmem_cache_alias(memcg, name, size, align, flags, ctor);
+ if (s)
+ goto out_unlock;
+@@ -225,7 +256,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
if (err)
goto out_free_cache;
@@ -98526,7 +98295,7 @@ index f149e67..b366f92 100644
list_add(&s->list, &slab_caches);
memcg_register_cache(s);
-@@ -278,8 +289,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+@@ -278,8 +309,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
get_online_cpus();
mutex_lock(&slab_mutex);
@@ -98536,7 +98305,7 @@ index f149e67..b366f92 100644
list_del(&s->list);
if (!__kmem_cache_shutdown(s)) {
-@@ -326,7 +336,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
+@@ -326,7 +356,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
name, size, err);
@@ -98545,7 +98314,7 @@ index f149e67..b366f92 100644
}
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
-@@ -339,7 +349,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
+@@ -339,7 +369,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
create_boot_cache(s, name, size, flags);
list_add(&s->list, &slab_caches);
@@ -98554,7 +98323,7 @@ index f149e67..b366f92 100644
return s;
}
-@@ -351,6 +361,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+@@ -351,6 +381,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif
@@ -98566,7 +98335,7 @@ index f149e67..b366f92 100644
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
-@@ -415,6 +430,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+@@ -415,6 +450,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
return kmalloc_dma_caches[index];
#endif
@@ -98580,7 +98349,7 @@ index f149e67..b366f92 100644
return kmalloc_caches[index];
}
-@@ -471,7 +493,7 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -471,7 +513,7 @@ void __init create_kmalloc_caches(unsigned long flags)
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
if (!kmalloc_caches[i]) {
kmalloc_caches[i] = create_kmalloc_cache(NULL,
@@ -98589,7 +98358,7 @@ index f149e67..b366f92 100644
}
/*
-@@ -480,10 +502,10 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -480,10 +522,10 @@ void __init create_kmalloc_caches(unsigned long flags)
* earlier power of two caches
*/
if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
@@ -98602,7 +98371,7 @@ index f149e67..b366f92 100644
}
/* Kmalloc array is now usable */
-@@ -516,6 +538,23 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -516,6 +558,23 @@ void __init create_kmalloc_caches(unsigned long flags)
}
}
#endif
@@ -98626,7 +98395,7 @@ index f149e67..b366f92 100644
}
#endif /* !CONFIG_SLOB */
-@@ -556,6 +595,9 @@ void print_slabinfo_header(struct seq_file *m)
+@@ -556,6 +615,9 @@ void print_slabinfo_header(struct seq_file *m)
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
@@ -98637,7 +98406,7 @@ index f149e67..b366f92 100644
seq_putc(m, '\n');
}
diff --git a/mm/slob.c b/mm/slob.c
-index 4bf8809..98a6914 100644
+index 4bf8809..a0a0b9f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
@@ -98718,6 +98487,15 @@ index 4bf8809..98a6914 100644
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
+@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+ /*
+ * slob_free: entry point into the slob allocator.
+ */
+-static void slob_free(void *block, int size)
++static void slob_free(struct kmem_cache *c, void *block, int size)
+ {
+ struct page *sp;
+ slob_t *prev, *next, *b = (slob_t *)block;
@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
if (slob_page_free(sp))
clear_slob_page_free(sp);
@@ -98730,7 +98508,7 @@ index 4bf8809..98a6914 100644
}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab)
++ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
@@ -98811,7 +98589,7 @@ index 4bf8809..98a6914 100644
- slob_free(m, *m + align);
- } else
+ slob_t *m = (slob_t *)(block - align);
-+ slob_free(m, m[0].units + align);
++ slob_free(NULL, m, m[0].units + align);
+ } else {
+ __ClearPageSlab(sp);
+ page_mapcount_reset(sp);
@@ -98955,24 +98733,34 @@ index 4bf8809..98a6914 100644
if (b && c->ctor)
c->ctor(b);
-@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+ EXPORT_SYMBOL(kmem_cache_alloc_node);
+ #endif
- static void __kmem_cache_free(void *b, int size)
+-static void __kmem_cache_free(void *b, int size)
++static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
{
- if (size < PAGE_SIZE)
+- slob_free(b, size);
+ struct page *sp;
+
+ sp = virt_to_page(b);
+ BUG_ON(!PageSlab(sp));
+ if (!sp->private)
- slob_free(b, size);
++ slob_free(c, b, size);
else
- slob_free_pages(b, get_order(size));
+ slob_free_pages(sp, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head)
+ struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
+ void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
+
+- __kmem_cache_free(b, slob_rcu->size);
++ __kmem_cache_free(NULL, b, slob_rcu->size);
+ }
void kmem_cache_free(struct kmem_cache *c, void *b)
{
@@ -98995,7 +98783,7 @@ index 4bf8809..98a6914 100644
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
- __kmem_cache_free(b, c->size);
-+ __kmem_cache_free(b, size);
++ __kmem_cache_free(c, b, size);
}
+#ifdef CONFIG_PAX_USERCOPY_SLABS
@@ -99008,7 +98796,7 @@ index 4bf8809..98a6914 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 7611f14..dfe9298 100644
+index 7611f14..3d5e216 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -207,7 +207,7 @@ struct track {
@@ -99034,7 +98822,7 @@ index 7611f14..dfe9298 100644
slab_free_hook(s, x);
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
++ if (!(s->flags & SLAB_NO_SANITIZE)) {
+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
+ if (s->ctor)
+ s->ctor(x);
@@ -99058,7 +98846,7 @@ index 7611f14..dfe9298 100644
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
++ (!(flags & SLAB_NO_SANITIZE)) ||
+#endif
s->ctor)) {
/*
@@ -99213,7 +99001,7 @@ index 7611f14..dfe9298 100644
}
SLAB_ATTR_RO(aliases);
-@@ -4605,6 +4678,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+@@ -4605,6 +4678,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(cache_dma);
#endif
@@ -99225,20 +99013,31 @@ index 7611f14..dfe9298 100644
+SLAB_ATTR_RO(usercopy);
+#endif
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
++}
++SLAB_ATTR_RO(sanitize);
++#endif
++
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
-@@ -4939,6 +5020,9 @@ static struct attribute *slab_attrs[] = {
+@@ -4939,6 +5028,12 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+ &usercopy_attr.attr,
+#endif
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ &sanitize_attr.attr,
++#endif
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
-@@ -5171,6 +5255,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5171,6 +5266,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -99246,7 +99045,7 @@ index 7611f14..dfe9298 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5228,6 +5313,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5228,6 +5324,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -99254,7 +99053,7 @@ index 7611f14..dfe9298 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5241,6 +5327,7 @@ struct saved_alias {
+@@ -5241,6 +5338,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -99262,7 +99061,7 @@ index 7611f14..dfe9298 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5263,6 +5350,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5263,6 +5361,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -100184,10 +99983,10 @@ index 7552f9e..074ce29 100644
err = -EFAULT;
break;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index 6afa3b4..7a14180 100644
+index 0007c9e..f11541b 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
-@@ -3740,8 +3740,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+@@ -3736,8 +3736,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
break;
case L2CAP_CONF_RFC:
@@ -106947,10 +106746,10 @@ index 8fac3fd..32ff38d 100644
unsigned int secindex_strings;
diff --git a/security/Kconfig b/security/Kconfig
-index beb86b5..9becb4a 100644
+index beb86b5..e66c504 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,965 @@
+@@ -4,6 +4,969 @@
menu "Security options"
@@ -107713,10 +107512,14 @@ index beb86b5..9becb4a 100644
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
++ The slab sanitization feature excludes a few slab caches per default
++ for performance reasons. To extend the feature to cover those as
++ well, pass "pax_sanitize_slab=full" as kernel command line parameter.
++
+ To reduce the performance penalty by sanitizing pages only, albeit
+ limiting the effectiveness of this feature at the same time, slab
-+ sanitization can be disabled with the kernel commandline parameter
-+ "pax_sanitize_slab=0".
++ sanitization can be disabled with the kernel command line parameter
++ "pax_sanitize_slab=off".
+
+ Note that this feature does not protect data stored in live pages,
+ e.g., process memory swapped to disk may stay there for a long time.
@@ -107916,7 +107719,7 @@ index beb86b5..9becb4a 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -103,7 +1062,7 @@ config INTEL_TXT
+@@ -103,7 +1066,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -108734,7 +108537,7 @@ index af49721..e85058e 100644
if (err < 0)
return err;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
-index 01a5e05..c6bb425 100644
+index 566b0f6..636730b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2811,11 +2811,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
@@ -117081,10 +116884,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..d832fcc
+index 0000000..2f37382
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5991 @@
+@@ -0,0 +1,5996 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
@@ -118074,6 +117877,7 @@ index 0000000..d832fcc
+page_offset_11120 page_offset 0 11120 NULL
+cea_db_payload_len_11124 cea_db_payload_len 0 11124 NULL nohasharray
+tracing_buffers_read_11124 tracing_buffers_read 3 11124 &cea_db_payload_len_11124
++alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
+snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 NULL
+il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
+comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL
@@ -118435,6 +118239,7 @@ index 0000000..d832fcc
+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
+security_inode_rename_14805 security_inode_rename 0 14805 NULL
+xfs_btree_kill_iroot_14824 xfs_btree_kill_iroot 0 14824 NULL
++do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
+mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
+lcd_write_14857 lcd_write 3 14857 NULL
+get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
@@ -118772,7 +118577,8 @@ index 0000000..d832fcc
+smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
+debug_output_18575 debug_output 3 18575 NULL
+xfs_btree_read_bufl_18597 xfs_btree_read_bufl 0 18597 NULL
-+filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL
++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
++slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
+iowarrior_write_18604 iowarrior_write 3 18604 NULL
+nvc0_ram_create__18624 nvc0_ram_create_ 4 18624 NULL
+from_buffer_18625 from_buffer 3 18625 NULL
@@ -121404,6 +121210,7 @@ index 0000000..d832fcc
+nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL
+newpart_47485 newpart 6-4 47485 NULL
+core_sys_select_47494 core_sys_select 1 47494 NULL
++alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
+unlink_simple_47506 unlink_simple 3 47506 NULL
+pstore_decompress_47510 pstore_decompress 0 47510 NULL
+__proc_lnet_portal_rotor_47529 __proc_lnet_portal_rotor 5 47529 NULL
@@ -122741,6 +122548,7 @@ index 0000000..d832fcc
+insert_one_name_61668 insert_one_name 7 61668 NULL
+qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
+lock_loop_61681 lock_loop 1 61681 NULL
++__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
+filter_read_61692 filter_read 3 61692 NULL
+iov_length_61716 iov_length 0 61716 NULL
+fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
@@ -124584,10 +124392,10 @@ index 714b949..1f0dc1e 100644
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 03a0381..8b31923 100644
+index 6611253..eb4bc0f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
-@@ -76,12 +76,17 @@ LIST_HEAD(vm_list);
+@@ -77,12 +77,17 @@ LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
static int kvm_usage_count = 0;
@@ -124607,7 +124415,7 @@ index 03a0381..8b31923 100644
struct dentry *kvm_debugfs_dir;
-@@ -758,7 +763,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+@@ -768,7 +773,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->slot < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
@@ -124616,7 +124424,7 @@ index 03a0381..8b31923 100644
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
-@@ -1615,9 +1620,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+@@ -1625,9 +1630,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
@@ -124636,7 +124444,7 @@ index 03a0381..8b31923 100644
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
-@@ -1867,7 +1880,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
+@@ -1877,7 +1890,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
@@ -124645,7 +124453,7 @@ index 03a0381..8b31923 100644
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
#ifdef CONFIG_COMPAT
-@@ -2532,7 +2545,7 @@ out:
+@@ -2545,7 +2558,7 @@ out:
}
#endif
@@ -124654,7 +124462,7 @@ index 03a0381..8b31923 100644
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
#ifdef CONFIG_COMPAT
-@@ -2632,7 +2645,7 @@ out:
+@@ -2645,7 +2658,7 @@ out:
return r;
}
@@ -124663,7 +124471,7 @@ index 03a0381..8b31923 100644
.unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
-@@ -2658,7 +2671,7 @@ static void hardware_enable_nolock(void *junk)
+@@ -2671,7 +2684,7 @@ static void hardware_enable_nolock(void *junk)
if (r) {
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -124672,7 +124480,7 @@ index 03a0381..8b31923 100644
printk(KERN_INFO "kvm: enabling virtualization on "
"CPU%d failed\n", cpu);
}
-@@ -2714,10 +2727,10 @@ static int hardware_enable_all(void)
+@@ -2727,10 +2740,10 @@ static int hardware_enable_all(void)
kvm_usage_count++;
if (kvm_usage_count == 1) {
@@ -124685,7 +124493,7 @@ index 03a0381..8b31923 100644
hardware_disable_all_nolock();
r = -EBUSY;
}
-@@ -3121,7 +3134,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+@@ -3134,7 +3147,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
@@ -124694,7 +124502,7 @@ index 03a0381..8b31923 100644
struct module *module)
{
int r;
-@@ -3168,7 +3181,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3181,7 +3194,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
@@ -124703,7 +124511,7 @@ index 03a0381..8b31923 100644
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_3;
-@@ -3178,9 +3191,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3191,9 +3204,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
if (r)
goto out_free;
@@ -124715,7 +124523,7 @@ index 03a0381..8b31923 100644
r = misc_register(&kvm_dev);
if (r) {
-@@ -3190,9 +3205,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3203,9 +3218,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
register_syscore_ops(&kvm_syscore_ops);
diff --git a/3.14.22/4425_grsec_remove_EI_PAX.patch b/3.14.23/4425_grsec_remove_EI_PAX.patch
index fc51f79..fc51f79 100644
--- a/3.14.22/4425_grsec_remove_EI_PAX.patch
+++ b/3.14.23/4425_grsec_remove_EI_PAX.patch
diff --git a/3.14.22/4427_force_XATTR_PAX_tmpfs.patch b/3.14.23/4427_force_XATTR_PAX_tmpfs.patch
index dcc7fb5..dcc7fb5 100644
--- a/3.14.22/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.14.23/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.14.22/4430_grsec-remove-localversion-grsec.patch b/3.14.23/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.14.22/4430_grsec-remove-localversion-grsec.patch
+++ b/3.14.23/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.14.22/4435_grsec-mute-warnings.patch b/3.14.23/4435_grsec-mute-warnings.patch
index 392cefb..392cefb 100644
--- a/3.14.22/4435_grsec-mute-warnings.patch
+++ b/3.14.23/4435_grsec-mute-warnings.patch
diff --git a/3.14.22/4440_grsec-remove-protected-paths.patch b/3.14.23/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.14.22/4440_grsec-remove-protected-paths.patch
+++ b/3.14.23/4440_grsec-remove-protected-paths.patch
diff --git a/3.14.22/4450_grsec-kconfig-default-gids.patch b/3.14.23/4450_grsec-kconfig-default-gids.patch
index ff7afeb..ff7afeb 100644
--- a/3.14.22/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.23/4450_grsec-kconfig-default-gids.patch
diff --git a/3.14.22/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.23/4465_selinux-avc_audit-log-curr_ip.patch
index f92c155..f92c155 100644
--- a/3.14.22/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.14.23/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.14.22/4470_disable-compat_vdso.patch b/3.14.23/4470_disable-compat_vdso.patch
index d5eed75..d5eed75 100644
--- a/3.14.22/4470_disable-compat_vdso.patch
+++ b/3.14.23/4470_disable-compat_vdso.patch
diff --git a/3.14.22/4475_emutramp_default_on.patch b/3.14.23/4475_emutramp_default_on.patch
index cf88fd9..cf88fd9 100644
--- a/3.14.22/4475_emutramp_default_on.patch
+++ b/3.14.23/4475_emutramp_default_on.patch
diff --git a/3.17.1/0000_README b/3.17.2/0000_README
index 8ff44c0..c71a071 100644
--- a/3.17.1/0000_README
+++ b/3.17.2/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.17.1-201410250027.patch
+Patch: 4420_grsecurity-3.0-3.17.2-201410312213.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.17.1/4420_grsecurity-3.0-3.17.1-201410250027.patch b/3.17.2/4420_grsecurity-3.0-3.17.2-201410312213.patch
index 9f9c8f5..942b997 100644
--- a/3.17.1/4420_grsecurity-3.0-3.17.1-201410250027.patch
+++ b/3.17.2/4420_grsecurity-3.0-3.17.2-201410312213.patch
@@ -313,7 +313,7 @@ index 764f599..c600e2f 100644
A typical pattern in a Kbuild file looks like this:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 1edd5fd..84fd32e 100644
+index 1edd5fd..107ff46 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1155,6 +1155,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
@@ -338,7 +338,7 @@ index 1edd5fd..84fd32e 100644
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2467,6 +2475,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2467,6 +2475,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -348,8 +348,13 @@ index 1edd5fd..84fd32e 100644
+ page table updates on X86-64.
+
+ pax_sanitize_slab=
-+ 0/1 to disable/enable slab object sanitization (enabled by
-+ default).
++ Format: { 0 | 1 | off | fast | full }
++ Options '0' and '1' are only provided for backward
++ compatibility, 'off' or 'fast' should be used instead.
++ 0|off : disable slab object sanitization
++ 1|fast: enable slab object sanitization excluding
++ whitelisted slabs (default)
++ full : sanitize all slabs, even the whitelisted ones
+
+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
+
@@ -365,7 +370,7 @@ index 1edd5fd..84fd32e 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 4669409..95d8745 100644
+index 390afde..33153b5 100644
--- a/Makefile
+++ b/Makefile
@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -974,10 +979,10 @@ index 32cbbd5..c102df9 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index 3040359..cf3bab0 100644
+index 3040359..a494fa3 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
-@@ -18,17 +18,35 @@
+@@ -18,17 +18,41 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
@@ -989,6 +994,12 @@ index 3040359..cf3bab0 100644
#ifdef __KERNEL__
++#ifdef CONFIG_THUMB2_KERNEL
++#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
++#else
++#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
++#endif
++
+#define _ASM_EXTABLE(from, to) \
+" .pushsection __ex_table,\"a\"\n"\
+" .align 3\n" \
@@ -1013,7 +1024,7 @@ index 3040359..cf3bab0 100644
#if __LINUX_ARM_ARCH__ >= 6
-@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
+@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v)
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
@@ -1022,7 +1033,7 @@ index 3040359..cf3bab0 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1050,7 +1061,7 @@ index 3040359..cf3bab0 100644
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
-@@ -63,6 +111,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -63,6 +117,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n"
@@ -1060,7 +1071,7 @@ index 3040359..cf3bab0 100644
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
+" mov %0, %1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1094,7 +1105,7 @@ index 3040359..cf3bab0 100644
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
-@@ -84,6 +169,36 @@ static inline void atomic_sub(int i, atomic_t *v)
+@@ -84,6 +175,36 @@ static inline void atomic_sub(int i, atomic_t *v)
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
@@ -1103,7 +1114,7 @@ index 3040359..cf3bab0 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1131,7 +1142,7 @@ index 3040359..cf3bab0 100644
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
" strex %1, %0, [%3]\n"
-@@ -103,11 +218,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+@@ -103,11 +224,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
@@ -1143,7 +1154,7 @@ index 3040359..cf3bab0 100644
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
+" mov %0, %1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1159,7 +1170,7 @@ index 3040359..cf3bab0 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
-@@ -152,12 +281,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -152,12 +287,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
__asm__ __volatile__ ("@ atomic_add_unless\n"
"1: ldrex %0, [%4]\n"
" teq %0, %5\n"
@@ -1170,7 +1181,7 @@ index 3040359..cf3bab0 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1187,7 +1198,7 @@ index 3040359..cf3bab0 100644
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -168,6 +309,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -168,6 +315,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
@@ -1216,7 +1227,7 @@ index 3040359..cf3bab0 100644
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
-@@ -186,7 +349,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -186,7 +355,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val;
}
@@ -1234,7 +1245,7 @@ index 3040359..cf3bab0 100644
static inline int atomic_sub_return(int i, atomic_t *v)
{
-@@ -201,6 +374,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+@@ -201,6 +380,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val;
}
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
@@ -1245,7 +1256,7 @@ index 3040359..cf3bab0 100644
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
-@@ -216,6 +393,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -216,6 +399,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
@@ -1257,7 +1268,7 @@ index 3040359..cf3bab0 100644
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
-@@ -229,13 +411,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -229,13 +417,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -1291,7 +1302,7 @@ index 3040359..cf3bab0 100644
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-@@ -246,6 +448,14 @@ typedef struct {
+@@ -246,6 +454,14 @@ typedef struct {
long long counter;
} atomic64_t;
@@ -1306,7 +1317,7 @@ index 3040359..cf3bab0 100644
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-@@ -262,6 +472,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -262,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1326,7 +1337,7 @@ index 3040359..cf3bab0 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
-@@ -270,6 +493,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -270,6 +499,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
);
}
@@ -1342,7 +1353,7 @@ index 3040359..cf3bab0 100644
#else
static inline long long atomic64_read(const atomic64_t *v)
{
-@@ -284,6 +516,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -284,6 +522,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1362,7 +1373,7 @@ index 3040359..cf3bab0 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
long long tmp;
-@@ -298,6 +543,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -298,6 +549,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
: "cc");
}
@@ -1384,7 +1395,7 @@ index 3040359..cf3bab0 100644
#endif
static inline void atomic64_add(long long i, atomic64_t *v)
-@@ -309,6 +569,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
+@@ -309,6 +575,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
" adds %Q0, %Q0, %Q4\n"
@@ -1392,7 +1403,7 @@ index 3040359..cf3bab0 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1422,7 +1433,7 @@ index 3040359..cf3bab0 100644
" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
-@@ -329,6 +620,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+@@ -329,6 +626,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" adds %Q0, %Q0, %Q4\n"
@@ -1432,7 +1443,7 @@ index 3040359..cf3bab0 100644
+" bvc 3f\n"
+" mov %0, %1\n"
+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1467,7 +1478,7 @@ index 3040359..cf3bab0 100644
" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
-@@ -351,6 +680,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
+@@ -351,6 +686,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
@@ -1475,7 +1486,7 @@ index 3040359..cf3bab0 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1505,7 +1516,7 @@ index 3040359..cf3bab0 100644
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
-@@ -371,10 +731,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+@@ -371,10 +737,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
@@ -1516,7 +1527,7 @@ index 3040359..cf3bab0 100644
+" bvc 3f\n"
+" mov %0, %1\n"
+" mov %H0, %H1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1532,7 +1543,7 @@ index 3040359..cf3bab0 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
-@@ -410,6 +785,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+@@ -410,6 +791,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
return oldval;
}
@@ -1564,7 +1575,7 @@ index 3040359..cf3bab0 100644
static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
long long result;
-@@ -435,21 +835,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+@@ -435,21 +841,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long result;
@@ -1586,7 +1597,7 @@ index 3040359..cf3bab0 100644
+" bvc 3f\n"
+" mov %Q0, %Q1\n"
+" mov %R0, %R1\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1606,7 +1617,7 @@ index 3040359..cf3bab0 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter)
: "cc");
-@@ -473,13 +887,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -473,13 +893,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teq %0, %5\n"
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
@@ -1618,7 +1629,7 @@ index 3040359..cf3bab0 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+" bvc 3f\n"
-+"2: bkpt 0xf103\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
@@ -1635,7 +1646,7 @@ index 3040359..cf3bab0 100644
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -492,10 +918,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -492,10 +924,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
@@ -2112,7 +2123,7 @@ index 9fd61c7..f8f1cff 100644
/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
-index 06e0bc0..e60c2d3 100644
+index 06e0bc0..c65bca8 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -81,6 +81,7 @@
@@ -2123,7 +2134,12 @@ index 06e0bc0..e60c2d3 100644
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
-@@ -96,6 +97,7 @@
+@@ -92,10 +93,12 @@
+ #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
+ #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+ #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
++#define PMD_SECT_RDONLY PMD_SECT_AP2
+
/*
* To be used in assembly code with the upper page attributes.
*/
@@ -3590,6 +3606,19 @@ index f7a07a5..258e1f7 100644
pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
base, length, desc->virtual);
+diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
+index 7f352de..6dc0929 100644
+--- a/arch/arm/mach-keystone/keystone.c
++++ b/arch/arm/mach-keystone/keystone.c
+@@ -27,7 +27,7 @@
+
+ #include "keystone.h"
+
+-static struct notifier_block platform_nb;
++static notifier_block_no_const platform_nb;
+ static unsigned long keystone_dma_pfn_offset __read_mostly;
+
+ static int keystone_platform_notifier(struct notifier_block *nb,
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 2bdc323..cf1c607 100644
--- a/arch/arm/mach-mvebu/coherency.c
@@ -4007,7 +4036,7 @@ index 6eb97b3..ac509f6 100644
atomic64_set(&mm->context.id, asid);
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index eb8830a..5360ce7 100644
+index eb8830a..e8ff52e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,7 @@
@@ -4121,7 +4150,7 @@ index eb8830a..5360ce7 100644
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
-@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
ifsr_info[nr].name = name;
}
@@ -4201,9 +4230,15 @@ index eb8830a..5360ce7 100644
+
+#ifdef CONFIG_PAX_REFCOUNT
+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++#ifdef CONFIG_THUMB2_KERNEL
++ unsigned short bkpt;
++
++ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
++#else
+ unsigned int bkpt;
+
+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
++#endif
+ current->thread.error_code = ifsr;
+ current->thread.trap_no = 0;
+ pax_report_refcount_overflow(regs);
@@ -8107,26 +8142,648 @@ index 4bc7b62..107e0b2 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
-index 28992d0..c797b20 100644
+index 28992d0..434c881 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
-@@ -519,6 +519,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
- return t1;
+@@ -12,6 +12,11 @@
+
+ #define ATOMIC_INIT(i) { (i) }
+
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++ PPC_LONG" " #from ", " #to"\n" \
++" .previous\n"
++
+ static __inline__ int atomic_read(const atomic_t *v)
+ {
+ int t;
+@@ -21,16 +26,61 @@ static __inline__ int atomic_read(const atomic_t *v)
+ return t;
}
-+#define atomic64_read_unchecked(v) atomic64_read(v)
-+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
-+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
-+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
-+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
-+#define atomic64_inc_unchecked(v) atomic64_inc(v)
-+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
-+#define atomic64_dec_unchecked(v) atomic64_dec(v)
-+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ int t;
+
- #endif /* __powerpc64__ */
++ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
- #endif /* __KERNEL__ */
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
+ static __inline__ void atomic_add(int a, atomic_t *v)
+ {
+ int t;
+
+ __asm__ __volatile__(
++"1: lwarx %0,0,%3 # atomic_add\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%2,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%2,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%3)
++" stwcx. %0,0,%3 \n\
++ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t), "+m" (v->counter)
++ : "r" (a), "r" (&v->counter)
++ : "cc");
++}
++
++static __inline__ void atomic_add_unchecked(int a, atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__(
+ "1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+@@ -41,12 +91,49 @@ static __inline__ void atomic_add(int a, atomic_t *v)
+ : "cc");
+ }
+
++/* Same as atomic_add but return the value */
+ static __inline__ int atomic_add_return(int a, atomic_t *v)
+ {
+ int t;
+
+ __asm__ __volatile__(
+ PPC_ATOMIC_ENTRY_BARRIER
++"1: lwarx %0,0,%2 # atomic_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%1,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%2)
++" stwcx. %0,0,%2 \n\
++ bne- 1b\n"
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ PPC_ATOMIC_EXIT_BARRIER
++ : "=&r" (t)
++ : "r" (a), "r" (&v->counter)
++ : "cc", "memory");
++
++ return t;
++}
++
++/* Same as atomic_add_unchecked but return the value */
++static __inline__ int atomic_add_return_unchecked(int a, atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__(
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+@@ -67,6 +154,37 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
+ int t;
+
+ __asm__ __volatile__(
++"1: lwarx %0,0,%3 # atomic_sub\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" subfo. %0,%2,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" subf %0,%2,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%3)
++" stwcx. %0,0,%3 \n\
++ bne- 1b\n"
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t), "+m" (v->counter)
++ : "r" (a), "r" (&v->counter)
++ : "cc");
++}
++
++static __inline__ void atomic_sub_unchecked(int a, atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__(
+ "1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+@@ -77,12 +195,49 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
+ : "cc");
+ }
+
++/* Same as atomic_sub but return the value */
+ static __inline__ int atomic_sub_return(int a, atomic_t *v)
+ {
+ int t;
+
+ __asm__ __volatile__(
+ PPC_ATOMIC_ENTRY_BARRIER
++"1: lwarx %0,0,%2 # atomic_sub_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" subfo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" subf %0,%1,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%2)
++" stwcx. %0,0,%2 \n\
++ bne- 1b\n"
++ PPC_ATOMIC_EXIT_BARRIER
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&v->counter)
++ : "cc", "memory");
++
++ return t;
++}
++
++/* Same as atomic_sub_unchecked but return the value */
++static __inline__ int atomic_sub_return_unchecked(int a, atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__(
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+@@ -96,38 +251,23 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
+ return t;
+ }
+
+-static __inline__ void atomic_inc(atomic_t *v)
+-{
+- int t;
++/*
++ * atomic_inc - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_inc\n\
+- addic %0,%0,1\n"
+- PPC405_ERR77(0,%2)
+-" stwcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
+ }
+
+-static __inline__ int atomic_inc_return(atomic_t *v)
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: lwarx %0,0,%1 # atomic_inc_return\n\
+- addic %0,%0,1\n"
+- PPC405_ERR77(0,%1)
+-" stwcx. %0,0,%1 \n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ return atomic_add_return_unchecked(1, v);
+ }
+
+ /*
+@@ -140,43 +280,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+-static __inline__ void atomic_dec(atomic_t *v)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_dec\n\
+- addic %0,%0,-1\n"
+- PPC405_ERR77(0,%2)\
+-" stwcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++ return atomic_add_return_unchecked(1, v) == 0;
+ }
+
+-static __inline__ int atomic_dec_return(atomic_t *v)
++/*
++ * atomic_dec - decrement atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic_dec(v) atomic_sub(1, (v))
++#define atomic_dec_return(v) atomic_sub_return(1, (v))
++
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: lwarx %0,0,%1 # atomic_dec_return\n\
+- addic %0,%0,-1\n"
+- PPC405_ERR77(0,%1)
+-" stwcx. %0,0,%1\n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ atomic_sub_unchecked(1, v);
+ }
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+@@ -271,6 +406,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
+ }
+ #define atomic_dec_if_positive atomic_dec_if_positive
+
++#define smp_mb__before_atomic_dec() smp_mb()
++#define smp_mb__after_atomic_dec() smp_mb()
++#define smp_mb__before_atomic_inc() smp_mb()
++#define smp_mb__after_atomic_inc() smp_mb()
++
+ #ifdef __powerpc64__
+
+ #define ATOMIC64_INIT(i) { (i) }
+@@ -284,11 +424,25 @@ static __inline__ long atomic64_read(const atomic64_t *v)
+ return t;
+ }
+
++static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic64_set(atomic64_t *v, long i)
+ {
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
++static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
+ static __inline__ void atomic64_add(long a, atomic64_t *v)
+ {
+ long t;
+@@ -303,12 +457,76 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
+ : "cc");
+ }
+
++static __inline__ void atomic64_add_unchecked(long a, atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__(
++"1: ldarx %0,0,%3 # atomic64_add\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%2,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%2,%0\n"
++#endif
++
++"3:\n"
++" stdcx. %0,0,%3 \n\
++ bne- 1b\n"
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t), "+m" (v->counter)
++ : "r" (a), "r" (&v->counter)
++ : "cc");
++}
++
+ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
+ {
+ long t;
+
+ __asm__ __volatile__(
+ PPC_ATOMIC_ENTRY_BARRIER
++"1: ldarx %0,0,%2 # atomic64_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%1,%0\n"
++#endif
++
++"3:\n"
++" stdcx. %0,0,%2 \n\
++ bne- 1b\n"
++ PPC_ATOMIC_EXIT_BARRIER
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&v->counter)
++ : "cc", "memory");
++
++ return t;
++}
++
++static __inline__ long atomic64_add_return_unchecked(long a, atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__(
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: ldarx %0,0,%2 # atomic64_add_return\n\
+ add %0,%1,%0\n\
+ stdcx. %0,0,%2 \n\
+@@ -328,6 +546,36 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
+ long t;
+
+ __asm__ __volatile__(
++"1: ldarx %0,0,%3 # atomic64_sub\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" subfo. %0,%2,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" subf %0,%2,%0\n"
++#endif
++
++"3:\n"
++" stdcx. %0,0,%3 \n\
++ bne- 1b"
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t), "+m" (v->counter)
++ : "r" (a), "r" (&v->counter)
++ : "cc");
++}
++
++static __inline__ void atomic64_sub_unchecked(long a, atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__(
+ "1: ldarx %0,0,%3 # atomic64_sub\n\
+ subf %0,%2,%0\n\
+ stdcx. %0,0,%3 \n\
+@@ -343,6 +591,40 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
+
+ __asm__ __volatile__(
+ PPC_ATOMIC_ENTRY_BARRIER
++"1: ldarx %0,0,%2 # atomic64_sub_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" subfo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" subf %0,%1,%0\n"
++#endif
++
++"3:\n"
++" stdcx. %0,0,%2 \n\
++ bne- 1b\n"
++ PPC_ATOMIC_EXIT_BARRIER
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&v->counter)
++ : "cc", "memory");
++
++ return t;
++}
++
++static __inline__ long atomic64_sub_return_unchecked(long a, atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__(
++ PPC_ATOMIC_ENTRY_BARRIER
+ "1: ldarx %0,0,%2 # atomic64_sub_return\n\
+ subf %0,%1,%0\n\
+ stdcx. %0,0,%2 \n\
+@@ -355,36 +637,23 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
+ return t;
+ }
+
+-static __inline__ void atomic64_inc(atomic64_t *v)
+-{
+- long t;
++/*
++ * atomic64_inc - increment atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_inc\n\
+- addic %0,%0,1\n\
+- stdcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++static __inline__ void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
+ }
+
+-static __inline__ long atomic64_inc_return(atomic64_t *v)
++static __inline__ int atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
+- addic %0,%0,1\n\
+- stdcx. %0,0,%1 \n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ return atomic64_add_return_unchecked(1, v);
+ }
+
+ /*
+@@ -397,36 +666,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+-static __inline__ void atomic64_dec(atomic64_t *v)
++/*
++ * atomic64_dec - decrement atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
++
++static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_dec\n\
+- addic %0,%0,-1\n\
+- stdcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ long atomic64_dec_return(atomic64_t *v)
+-{
+- long t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
+- addic %0,%0,-1\n\
+- stdcx. %0,0,%1\n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ atomic64_sub_unchecked(1, v);
+ }
+
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+@@ -459,6 +710,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index bab79a1..4a3eabc 100644
--- a/arch/powerpc/include/asm/barrier.h
@@ -8224,10 +8881,10 @@ index 5acabbd..7ea14fa 100644
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
-index b8da913..60b608a 100644
+index b8da913..c02b593 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
-@@ -9,15 +9,26 @@ typedef struct
+@@ -9,21 +9,65 @@ typedef struct
atomic_long_t a;
} local_t;
@@ -8254,23 +8911,46 @@ index b8da913..60b608a 100644
static __inline__ long local_add_return(long a, local_t *l)
{
-@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
-
- return t;
- }
-+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
+ long t;
- #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
-
-@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
-
- return t;
- }
-+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
-
- static __inline__ long local_inc_return(local_t *l)
- {
-@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
+ __asm__ __volatile__(
++"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%1,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%2)
++ PPC_STLCX "%0,0,%2 \n\
++ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&(l->a.counter))
++ : "cc", "memory");
++
++ return t;
++}
++
++static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
++{
++ long t;
++
++ __asm__ __volatile__(
+ "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
#define local_cmpxchg(l, o, n) \
(cmpxchg_local(&((l)->a.counter), (o), (n)))
@@ -8424,6 +9104,73 @@ index 5a6614a..d89995d1 100644
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index 4dbe072..b803275 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "1: " PPC_LWARX(%0,0,%1,1) "\n"
+ __DO_SIGN_EXTEND
+-" addic. %0,%0,1\n\
+- ble- 2f\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,1\n"
++#endif
++
++"3:\n"
++ "ble- 4f\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+-"2:" : "=&r" (tmp)
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b,4b)
++#endif
++
++ : "=&r" (tmp)
+ : "r" (&rw->lock)
+ : "cr0", "xer", "memory");
+
+@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ PPC_RELEASE_BARRIER
+-"1: lwarx %0,0,%1\n\
+- addic %0,%0,-1\n"
++"1: lwarx %0,0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,-1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,-1\n"
++#endif
++
++"3:\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "xer", "memory");
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index b034ecd..af7e31f 100644
--- a/arch/powerpc/include/asm/thread_info.h
@@ -8905,10 +9652,18 @@ index 2cb0c94..c0c0bc9 100644
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index 0dc43f9..7893068 100644
+index 0dc43f9..a885d33 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
-@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
+@@ -36,6 +36,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/ratelimit.h>
+ #include <linux/context_tracking.h>
++#include <linux/uaccess.h>
+
+ #include <asm/emulated_ops.h>
+ #include <asm/pgtable.h>
+@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
return flags;
}
@@ -8917,7 +9672,7 @@ index 0dc43f9..7893068 100644
static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
int signr)
{
-@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
@@ -8927,6 +9682,33 @@ index 0dc43f9..7893068 100644
do_exit(signr);
}
+@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+ enum ctx_state prev_state = exception_enter();
+ unsigned int reason = get_reason(regs);
+
++#ifdef CONFIG_PAX_REFCOUNT
++ unsigned int bkpt;
++ const struct exception_table_entry *entry;
++
++ if (reason & REASON_ILLEGAL) {
++ /* Check if PaX bad instruction */
++ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
++ current->thread.trap_nr = 0;
++ pax_report_refcount_overflow(regs);
++ /* fixup_exception() for PowerPC does not exist, simulate its job */
++ if ((entry = search_exception_tables(regs->nip)) != NULL) {
++ regs->nip = entry->fixup;
++ return;
++ }
++ /* fixup_exception() could not handle */
++ goto bail;
++ }
++ }
++#endif
++
+ /* We can now get here via a FP Unavailable exception if the core
+ * has no FPU, in that case the reason flags will be 0 */
+
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f174351..5722009 100644
--- a/arch/powerpc/kernel/vdso.c
@@ -9950,20 +10732,6 @@ index 370ca1e..d4f4a98 100644
extern unsigned long sparc64_elf_hwcap;
#define ELF_HWCAP sparc64_elf_hwcap
-diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
-index f346824..2e3a4ad 100644
---- a/arch/sparc/include/asm/oplib_64.h
-+++ b/arch/sparc/include/asm/oplib_64.h
-@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
- /* You must call prom_init() before using any of the library services,
- * preferably as early as possible. Pass it the romvec pointer.
- */
--void prom_init(void *cif_handler, void *cif_stack);
-+void prom_init(void *cif_handler);
-+void prom_init_report(void);
-
- /* Boot argument acquisition, returns the boot command line string. */
- char *prom_getbootargs(void);
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index a3890da..f6a408e 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
@@ -9977,16 +10745,16 @@ index a3890da..f6a408e 100644
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
-index 39a7ac4..2c9b586 100644
+index 5e31871..b71c9d7 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
-@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
}
- #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
+ #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
- static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
index 59ba6f6..4518128 100644
@@ -10057,17 +10825,10 @@ index 79da178..c2eede8 100644
SRMMU_DIRTY | SRMMU_REF)
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
-index f5fffd8..4272fe8 100644
+index 29d64b1..4272fe8 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
-@@ -48,13 +48,15 @@ unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int);
- #endif
-
- #ifdef CONFIG_SPARC64
-+void __init start_early_boot(void);
-+
- /* unaligned_64.c */
- int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
void handle_ld_nf(u32 insn, struct pt_regs *regs);
/* init_64.c */
@@ -10193,22 +10954,19 @@ index 96efa7a..16858bf 100644
/*
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
-index a5f01ac..a8811dd 100644
+index cc6275c..7eb8e21 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
-@@ -63,7 +63,10 @@ struct thread_info {
+@@ -63,6 +63,8 @@ struct thread_info {
struct pt_regs *kern_una_regs;
unsigned int kern_una_insn;
-- unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ unsigned long lowest_stack;
+
-+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
-+ __attribute__ ((aligned(64)));
+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
+ __attribute__ ((aligned(64)));
};
-
- #endif /* !(__ASSEMBLY__) */
-@@ -188,12 +191,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
/* flag bit 4 is available */
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
@@ -10223,7 +10981,7 @@ index a5f01ac..a8811dd 100644
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
-@@ -213,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
+@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
@@ -10367,104 +11125,6 @@ index 7cf9c6e..6206648 100644
extra-y := head_$(BITS).o
-diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
-index ebaba61..88d322b 100644
---- a/arch/sparc/kernel/entry.h
-+++ b/arch/sparc/kernel/entry.h
-@@ -65,13 +65,10 @@ struct pause_patch_entry {
- extern struct pause_patch_entry __pause_3insn_patch,
- __pause_3insn_patch_end;
-
--void __init per_cpu_patch(void);
- void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
- struct sun4v_1insn_patch_entry *);
- void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
- struct sun4v_2insn_patch_entry *);
--void __init sun4v_patch(void);
--void __init boot_cpu_id_too_large(int cpu);
- extern unsigned int dcache_parity_tl1_occurred;
- extern unsigned int icache_parity_tl1_occurred;
-
-diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
-index 452f04fe..fbea0ac 100644
---- a/arch/sparc/kernel/head_64.S
-+++ b/arch/sparc/kernel/head_64.S
-@@ -660,14 +660,12 @@ tlb_fixup_done:
- sethi %hi(init_thread_union), %g6
- or %g6, %lo(init_thread_union), %g6
- ldx [%g6 + TI_TASK], %g4
-- mov %sp, %l6
-
- wr %g0, ASI_P, %asi
- mov 1, %g1
- sllx %g1, THREAD_SHIFT, %g1
- sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
- add %g6, %g1, %sp
-- mov 0, %fp
-
- /* Set per-cpu pointer initially to zero, this makes
- * the boot-cpu use the in-kernel-image per-cpu areas
-@@ -694,44 +692,14 @@ tlb_fixup_done:
- nop
- #endif
-
-- mov %l6, %o1 ! OpenPROM stack
- call prom_init
- mov %l7, %o0 ! OpenPROM cif handler
-
-- /* Initialize current_thread_info()->cpu as early as possible.
-- * In order to do that accurately we have to patch up the get_cpuid()
-- * assembler sequences. And that, in turn, requires that we know
-- * if we are on a Starfire box or not. While we're here, patch up
-- * the sun4v sequences as well.
-+ /* To create a one-register-window buffer between the kernel's
-+ * initial stack and the last stack frame we use from the firmware,
-+ * do the rest of the boot from a C helper function.
- */
-- call check_if_starfire
-- nop
-- call per_cpu_patch
-- nop
-- call sun4v_patch
-- nop
--
--#ifdef CONFIG_SMP
-- call hard_smp_processor_id
-- nop
-- cmp %o0, NR_CPUS
-- blu,pt %xcc, 1f
-- nop
-- call boot_cpu_id_too_large
-- nop
-- /* Not reached... */
--
--1:
--#else
-- mov 0, %o0
--#endif
-- sth %o0, [%g6 + TI_CPU]
--
-- call prom_init_report
-- nop
--
-- /* Off we go.... */
-- call start_kernel
-+ call start_early_boot
- nop
- /* Not reached... */
-
-diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
-index b7ddcdd..cdbfec2 100644
---- a/arch/sparc/kernel/hvtramp.S
-+++ b/arch/sparc/kernel/hvtramp.S
-@@ -109,7 +109,6 @@ hv_cpu_startup:
- sllx %g5, THREAD_SHIFT, %g5
- sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
-- mov 0, %fp
-
- call init_irqwork_curcpu
- nop
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 50e7b62..79fae35 100644
--- a/arch/sparc/kernel/process_32.c
@@ -10587,70 +11247,8 @@ index c13c9f2..d572c34 100644
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
-index 3fdb455..949f773 100644
---- a/arch/sparc/kernel/setup_64.c
-+++ b/arch/sparc/kernel/setup_64.c
-@@ -30,6 +30,7 @@
- #include <linux/cpu.h>
- #include <linux/initrd.h>
- #include <linux/module.h>
-+#include <linux/start_kernel.h>
-
- #include <asm/io.h>
- #include <asm/processor.h>
-@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
-
- static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-
--void __init per_cpu_patch(void)
-+static void __init per_cpu_patch(void)
- {
- struct cpuid_patch_entry *p;
- unsigned long ver;
-@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
- }
- }
-
--void __init sun4v_patch(void)
-+static void __init sun4v_patch(void)
- {
- extern void sun4v_hvapi_init(void);
-
-@@ -335,14 +336,25 @@ static void __init pause_patch(void)
- }
- }
-
--#ifdef CONFIG_SMP
--void __init boot_cpu_id_too_large(int cpu)
-+void __init start_early_boot(void)
- {
-- prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
-- cpu, NR_CPUS);
-- prom_halt();
-+ int cpu;
-+
-+ check_if_starfire();
-+ per_cpu_patch();
-+ sun4v_patch();
-+
-+ cpu = hard_smp_processor_id();
-+ if (cpu >= NR_CPUS) {
-+ prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
-+ cpu, NR_CPUS);
-+ prom_halt();
-+ }
-+ current_thread_info()->cpu = cpu;
-+
-+ prom_init_report();
-+ start_kernel();
- }
--#endif
-
- /* On Ultra, we support all of the v8 capabilities. */
- unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
-index f7ba875..b65677e 100644
+index c9300bf..b2080cf 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
@@ -10915,36 +11513,6 @@ index 33a17e7..d87fb1f 100644
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
2:
-diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
-index 737f8cb..88ede1d 100644
---- a/arch/sparc/kernel/trampoline_64.S
-+++ b/arch/sparc/kernel/trampoline_64.S
-@@ -109,10 +109,13 @@ startup_continue:
- brnz,pn %g1, 1b
- nop
-
-- sethi %hi(p1275buf), %g2
-- or %g2, %lo(p1275buf), %g2
-- ldx [%g2 + 0x10], %l2
-- add %l2, -(192 + 128), %sp
-+ /* Get onto temporary stack which will be in the locked
-+ * kernel image.
-+ */
-+ sethi %hi(tramp_stack), %g1
-+ or %g1, %lo(tramp_stack), %g1
-+ add %g1, TRAMP_STACK_SIZE, %g1
-+ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
- flushw
-
- /* Setup the loop variables:
-@@ -394,7 +397,6 @@ after_lock_tlb:
- sllx %g5, THREAD_SHIFT, %g5
- sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
-- mov 0, %fp
-
- rdpr %pstate, %o1
- or %o1, PSTATE_IE, %o1
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 6fd386c5..6907d81 100644
--- a/arch/sparc/kernel/traps_32.c
@@ -10979,7 +11547,7 @@ index 6fd386c5..6907d81 100644
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index fb6640e..2daada8 100644
+index 981a769..d906eda 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
@@ -11097,8 +11665,8 @@ index fb6640e..2daada8 100644
+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
}
- unsigned long sun4v_err_itlb_vaddr;
-@@ -2116,9 +2127,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+ static void sun4v_tlb_error(struct pt_regs *regs)
+@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -11110,7 +11678,7 @@ index fb6640e..2daada8 100644
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
-@@ -2140,9 +2151,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -11122,7 +11690,7 @@ index fb6640e..2daada8 100644
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
-@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = (unsigned long)sf->fp + STACK_BIAS;
}
@@ -11138,7 +11706,7 @@ index fb6640e..2daada8 100644
graph++;
}
}
-@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
return (struct reg_window *) (fp + STACK_BIAS);
}
@@ -11147,7 +11715,7 @@ index fb6640e..2daada8 100644
void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
-@@ -2411,7 +2424,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
while (rw &&
count++ < 30 &&
kstack_valid(tp, (unsigned long) rw)) {
@@ -11156,7 +11724,7 @@ index fb6640e..2daada8 100644
(void *) rw->ins[7]);
rw = kernel_stack_up(rw);
-@@ -2424,8 +2437,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
user_instruction_dump ((unsigned int __user *) regs->tpc);
}
@@ -11762,7 +12330,7 @@ index 908e8c1..1524793 100644
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 587cd05..fbdf17a 100644
+index 18fcd71..e4fe821 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -22,6 +22,9 @@
@@ -12251,7 +12819,7 @@ index 587cd05..fbdf17a 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
-@@ -350,6 +813,29 @@ retry:
+@@ -353,6 +816,29 @@ retry:
if (!vma)
goto bad_area;
@@ -12281,47 +12849,6 @@ index 587cd05..fbdf17a 100644
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
-diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
-index 1aed043..ae6ce38 100644
---- a/arch/sparc/mm/gup.c
-+++ b/arch/sparc/mm/gup.c
-@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- return 1;
- }
-
-+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
-+ struct page **pages)
-+{
-+ struct mm_struct *mm = current->mm;
-+ unsigned long addr, len, end;
-+ unsigned long next, flags;
-+ pgd_t *pgdp;
-+ int nr = 0;
-+
-+ start &= PAGE_MASK;
-+ addr = start;
-+ len = (unsigned long) nr_pages << PAGE_SHIFT;
-+ end = start + len;
-+
-+ local_irq_save(flags);
-+ pgdp = pgd_offset(mm, addr);
-+ do {
-+ pgd_t pgd = *pgdp;
-+
-+ next = pgd_addr_end(addr, end);
-+ if (pgd_none(pgd))
-+ break;
-+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
-+ break;
-+ } while (pgdp++, addr = next, addr != end);
-+ local_irq_restore(flags);
-+
-+ return nr;
-+}
-+
- int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
- {
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d329537..2c3746a 100644
--- a/arch/sparc/mm/hugetlbpage.c
@@ -12429,10 +12956,10 @@ index d329537..2c3746a 100644
pte_t *huge_pte_alloc(struct mm_struct *mm,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index 98ac8e8..ba7dd39 100644
+index 04bc826..0fefab9 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
-@@ -190,9 +190,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int num_kernel_image_mappings;
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -12444,7 +12971,7 @@ index 98ac8e8..ba7dd39 100644
#endif
#endif
-@@ -200,7 +200,7 @@ inline void flush_dcache_page_impl(struct page *page)
+@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
{
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -12453,7 +12980,7 @@ index 98ac8e8..ba7dd39 100644
#endif
#ifdef DCACHE_ALIASING_POSSIBLE
-@@ -472,10 +472,10 @@ void mmu_info(struct seq_file *m)
+@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
#ifdef CONFIG_DEBUG_DCFLUSH
seq_printf(m, "DCPageFlushes\t: %d\n",
@@ -12478,63 +13005,6 @@ index ece4af0..f04b862 100644
+
+ bpf_prog_unlock_free(fp);
}
-diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
-index 9c86b4b..8050f38 100644
---- a/arch/sparc/prom/cif.S
-+++ b/arch/sparc/prom/cif.S
-@@ -11,11 +11,10 @@
- .text
- .globl prom_cif_direct
- prom_cif_direct:
-+ save %sp, -192, %sp
- sethi %hi(p1275buf), %o1
- or %o1, %lo(p1275buf), %o1
-- ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
-- save %o2, -192, %sp
-- ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
-+ ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
- mov %g4, %l0
- mov %g5, %l1
- mov %g6, %l3
-diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
-index d95db75..110b0d7 100644
---- a/arch/sparc/prom/init_64.c
-+++ b/arch/sparc/prom/init_64.c
-@@ -26,13 +26,13 @@ phandle prom_chosen_node;
- * It gets passed the pointer to the PROM vector.
- */
-
--extern void prom_cif_init(void *, void *);
-+extern void prom_cif_init(void *);
-
--void __init prom_init(void *cif_handler, void *cif_stack)
-+void __init prom_init(void *cif_handler)
- {
- phandle node;
-
-- prom_cif_init(cif_handler, cif_stack);
-+ prom_cif_init(cif_handler);
-
- prom_chosen_node = prom_finddevice(prom_chosen_path);
- if (!prom_chosen_node || (s32)prom_chosen_node == -1)
-diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
-index e58b817..c27c30e4 100644
---- a/arch/sparc/prom/p1275.c
-+++ b/arch/sparc/prom/p1275.c
-@@ -19,7 +19,6 @@
- struct {
- long prom_callback; /* 0x00 */
- void (*prom_cif_handler)(long *); /* 0x08 */
-- unsigned long prom_cif_stack; /* 0x10 */
- } p1275buf;
-
- extern void prom_world(int);
-@@ -51,5 +50,4 @@ void p1275_cmd_direct(unsigned long *args)
- void prom_cif_init(void *cif_handler, void *cif_stack)
- {
- p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
-- p1275buf.prom_cif_stack = (unsigned long)cif_stack;
- }
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 7fcd492..1311074 100644
--- a/arch/tile/Kconfig
@@ -17338,10 +17808,10 @@ index 53cdfb2..d1369e6 100644
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 7c492ed..d16311f 100644
+index 92d3486..0d47ae1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
-@@ -990,6 +990,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+@@ -991,6 +991,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}
@@ -17362,7 +17832,7 @@ index 7c492ed..d16311f 100644
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
-@@ -1048,7 +1062,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+@@ -1049,7 +1063,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_define_shared_msr(unsigned index, u32 msr);
@@ -29477,18 +29947,10 @@ index ddf7427..fd84599 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index bfe11cf..deb3959 100644
+index 6a118fa..c0b3c00 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
-@@ -453,6 +453,7 @@ struct vcpu_vmx {
- int gs_ldt_reload_needed;
- int fs_reload_needed;
- u64 msr_host_bndcfgs;
-+ unsigned long vmcs_host_cr4; /* May not match real cr4 */
- } host_state;
- struct {
- int vm86_active;
-@@ -1340,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
+@@ -1341,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
#endif
}
@@ -29503,7 +29965,7 @@ index bfe11cf..deb3959 100644
{
vmcs_writel(field, vmcs_readl(field) | mask);
}
-@@ -1605,7 +1606,11 @@ static void reload_tss(void)
+@@ -1606,7 +1606,11 @@ static void reload_tss(void)
struct desc_struct *descs;
descs = (void *)gdt->address;
@@ -29515,7 +29977,7 @@ index bfe11cf..deb3959 100644
load_TR_desc();
}
-@@ -1833,6 +1838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+@@ -1834,6 +1838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
@@ -29526,7 +29988,7 @@ index bfe11cf..deb3959 100644
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
vmx->loaded_vmcs->cpu = cpu;
-@@ -2122,7 +2131,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
+@@ -2123,7 +2131,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
* reads and returns guest's timestamp counter "register"
* guest_tsc = host_tsc + tsc_offset -- 21.3
*/
@@ -29535,7 +29997,7 @@ index bfe11cf..deb3959 100644
{
u64 host_tsc, tsc_offset;
-@@ -2631,12 +2640,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+@@ -2632,12 +2640,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default:
msr = find_msr_entry(vmx, msr_index);
if (msr) {
@@ -29553,7 +30015,7 @@ index bfe11cf..deb3959 100644
}
break;
}
-@@ -3110,8 +3122,11 @@ static __init int hardware_setup(void)
+@@ -3111,8 +3122,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -29567,7 +30029,7 @@ index bfe11cf..deb3959 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -3122,13 +3137,15 @@ static __init int hardware_setup(void)
+@@ -3123,13 +3137,15 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
@@ -29587,26 +30049,18 @@ index bfe11cf..deb3959 100644
if (nested)
nested_vmx_setup_ctls_msrs();
-@@ -4235,10 +4252,17 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
- u32 low32, high32;
- unsigned long tmpl;
- struct desc_ptr dt;
-+ unsigned long cr4;
+@@ -4239,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
-- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
++
+#ifndef CONFIG_PAX_PER_CPU_PGD
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
+#endif
-+
-+ /* Save the most likely value for this task's CR4 in the VMCS. */
-+ cr4 = read_cr4();
-+ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
-+ vmx->host_state.vmcs_host_cr4 = cr4;
- vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
- #ifdef CONFIG_X86_64
-@@ -4260,7 +4284,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ /* Save the most likely value for this task's CR4 in the VMCS. */
+ cr4 = read_cr4();
+@@ -4266,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
@@ -29615,7 +30069,7 @@ index bfe11cf..deb3959 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -5257,7 +5281,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
+@@ -5263,7 +5282,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
@@ -29624,7 +30078,7 @@ index bfe11cf..deb3959 100644
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
-@@ -6630,6 +6654,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+@@ -6636,6 +6655,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return 1;
}
@@ -29637,7 +30091,7 @@ index bfe11cf..deb3959 100644
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
-@@ -6675,6 +6705,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+@@ -6681,6 +6706,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
[EXIT_REASON_INVEPT] = handle_invept,
@@ -29645,7 +30099,7 @@ index bfe11cf..deb3959 100644
};
static const int kvm_vmx_max_exit_handlers =
-@@ -6908,7 +6939,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+@@ -6914,7 +6940,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
@@ -29654,7 +30108,7 @@ index bfe11cf..deb3959 100644
/*
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
-@@ -7049,10 +7080,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+@@ -7055,10 +7081,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
@@ -29668,29 +30122,7 @@ index bfe11cf..deb3959 100644
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
-@@ -7376,7 +7407,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
- static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-- unsigned long debugctlmsr;
-+ unsigned long debugctlmsr, cr4;
-
- /* Record the guest's net vcpu time for enforced NMI injections. */
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
-@@ -7397,6 +7428,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
-+ cr4 = read_cr4();
-+ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
-+ vmcs_writel(HOST_CR4, cr4);
-+ vmx->host_state.vmcs_host_cr4 = cr4;
-+ }
-+
- /* When single-stepping over STI and MOV SS, we must clear the
- * corresponding interruptibility bits in the guest state. Otherwise
- * vmentry fails as it then expects bit 14 (BS) in pending debug
-@@ -7453,6 +7490,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7465,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
@@ -29703,7 +30135,7 @@ index bfe11cf..deb3959 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
-@@ -7505,6 +7548,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7517,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -29715,7 +30147,7 @@ index bfe11cf..deb3959 100644
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
-@@ -7518,7 +7566,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7530,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
@@ -29724,7 +30156,7 @@ index bfe11cf..deb3959 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -7527,8 +7575,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7539,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -35133,7 +35565,7 @@ index 6440221..f84b5c7 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index 5c8cb80..728d0cd 100644
+index 5c8cb80..5fd7860 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -15,7 +15,11 @@
@@ -35232,7 +35664,7 @@ index 5c8cb80..728d0cd 100644
prog->bpf_func = (void *)image;
prog->jited = 1;
}
-@@ -930,23 +931,16 @@ out:
+@@ -930,23 +931,15 @@ out:
kfree(addrs);
}
@@ -35260,7 +35692,6 @@ index 5c8cb80..728d0cd 100644
+ if (!fp->jited)
+ goto free_filter;
+
-+ set_memory_rw(addr, 1);
+ module_free_exec(NULL, (void *)addr);
+
+free_filter:
@@ -42880,12 +43311,12 @@ index 0cb92e3..c7d453d 100644
if (atomic_read(&uhid->report_done))
goto unlock;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index 531a593..0b43a69 100644
+index 19bad59..ca24eaf 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
-@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ unsigned long flags;
int ret = 0;
- int t;
- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
- atomic_inc(&vmbus_connection.next_gpadl_handle);
@@ -42895,7 +43326,7 @@ index 531a593..0b43a69 100644
ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
if (ret)
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
-index edfc848..d83e195 100644
+index 3e4235c..877d0e5 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
@@ -42907,7 +43338,7 @@ index edfc848..d83e195 100644
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
"=a"(hv_status_lo) : "d" (control_hi),
-@@ -154,7 +154,7 @@ int hv_init(void)
+@@ -156,7 +156,7 @@ int hv_init(void)
/* See if the hypercall page is already set */
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -43002,10 +43433,10 @@ index 5e90c5d..d8fcefb 100644
cap_msg.caps.cap_bits.balloon = 1;
cap_msg.caps.cap_bits.hot_add = 1;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
-index 22b7507..fc2fc47 100644
+index c386d8d..d6004c4 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
-@@ -607,7 +607,7 @@ enum vmbus_connect_state {
+@@ -611,7 +611,7 @@ enum vmbus_connect_state {
struct vmbus_connection {
enum vmbus_connect_state conn_state;
@@ -48012,7 +48443,7 @@ index 1c5d62e..8e14d54 100644
+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
+};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
-index ea27383..d695e45 100644
+index ea27383..faa8936 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2463,7 +2463,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
@@ -48197,7 +48628,7 @@ index ea27383..d695e45 100644
- DBGPR("<--xgbe_init_function_ptrs\n");
-}
+ .config_dcb_tc = xgbe_config_dcb_tc,
-+ .config_dcb_pfc = xgbe_config_dcb_pfc
++ .config_dcb_pfc = xgbe_config_dcb_pfc,
+};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b26d758..b0d1c3b 100644
@@ -50599,7 +51030,7 @@ index 5a40516..136d5a7 100644
kfree(msi_dev_attr);
++count;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
-index 9ff0a90..e819dda 100644
+index 76ef791..adc3bd1 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
@@ -51332,7 +51763,7 @@ index f374fa5..26f0683 100644
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
ARRAY_SIZE(mc13892_regulators));
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
-index b0e4a3e..e5dc11e 100644
+index 5b2e761..c8c8a4a 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
@@ -52337,7 +52768,7 @@ index d646540..5b13554 100644
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index be9698d..a328a41 100644
+index 8252c0e..613adad 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1493,8 +1493,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
@@ -52573,6 +53004,19 @@ index 01cf888..59e0475 100644
case BLKTRACESTART:
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
+diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
+index 11a5043..e36f04c 100644
+--- a/drivers/soc/tegra/fuse/fuse-tegra.c
++++ b/drivers/soc/tegra/fuse/fuse-tegra.c
+@@ -70,7 +70,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
+ return i;
+ }
+
+-static struct bin_attribute fuse_bin_attr = {
++static bin_attribute_no_const fuse_bin_attr = {
+ .attr = { .name = "fuse", .mode = S_IRUGO, },
+ .read = fuse_read,
+ };
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ca935df..ae8a3dc 100644
--- a/drivers/spi/spi.c
@@ -52903,6 +53347,28 @@ index dc23395..cf7e9b1 100644
struct io_req {
struct list_head list;
+diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
+index 2bf2e2f..84421c9 100644
+--- a/drivers/staging/unisys/visorchipset/visorchipset.h
++++ b/drivers/staging/unisys/visorchipset/visorchipset.h
+@@ -228,7 +228,7 @@ typedef struct {
+ void (*device_resume)(ulong busNo, ulong devNo);
+ int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
+ ulong *maxSize);
+-} VISORCHIPSET_BUSDEV_NOTIFIERS;
++} __no_const VISORCHIPSET_BUSDEV_NOTIFIERS;
+
+ /* These functions live inside visorchipset, and will be called to indicate
+ * responses to specific events (by code outside of visorchipset).
+@@ -243,7 +243,7 @@ typedef struct {
+ void (*device_destroy)(ulong busNo, ulong devNo, int response);
+ void (*device_pause)(ulong busNo, ulong devNo, int response);
+ void (*device_resume)(ulong busNo, ulong devNo, int response);
+-} VISORCHIPSET_BUSDEV_RESPONDERS;
++} __no_const VISORCHIPSET_BUSDEV_RESPONDERS;
+
+ /** Register functions (in the bus driver) to get called by visorchipset
+ * whenever a bus or device appears for which this service partition is
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 164136b..7244df5 100644
--- a/drivers/staging/vt6655/hostap.c
@@ -59907,10 +60373,10 @@ index f70119f..ab5894d 100644
spin_lock_init(&delayed_root->lock);
init_waitqueue_head(&delayed_root->wait);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index 8a8e298..9f904ad 100644
+index b765d41..5a8b0c3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
-@@ -3939,9 +3939,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+@@ -3975,9 +3975,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
@@ -59923,7 +60389,7 @@ index 8a8e298..9f904ad 100644
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
-@@ -3963,10 +3966,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+@@ -3999,10 +4002,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
memcpy(dest, &space, sizeof(space));
dest++;
space_args.total_spaces++;
@@ -61029,7 +61495,7 @@ index a93f7e6..d58bcbe 100644
return 0;
while (nr) {
diff --git a/fs/dcache.c b/fs/dcache.c
-index cb25a1a..fa91d33 100644
+index cb25a1a..c557cb6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
@@ -61126,7 +61592,15 @@ index cb25a1a..fa91d33 100644
d_lru_isolate(dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
-@@ -1255,7 +1255,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+@@ -1149,6 +1149,7 @@ out_unlock:
+ return;
+
+ rename_retry:
++ done_seqretry(&rename_lock, seq);
+ if (!retry)
+ return;
+ seq = 1;
+@@ -1255,7 +1256,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
} else {
if (dentry->d_flags & DCACHE_LRU_LIST)
d_lru_del(dentry);
@@ -61135,7 +61609,7 @@ index cb25a1a..fa91d33 100644
d_shrink_add(dentry, &data->dispose);
data->found++;
}
-@@ -1303,7 +1303,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
+@@ -1303,7 +1304,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
return D_WALK_CONTINUE;
/* root with refcount 1 is fine */
@@ -61144,7 +61618,7 @@ index cb25a1a..fa91d33 100644
return D_WALK_CONTINUE;
printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
-@@ -1312,7 +1312,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
+@@ -1312,7 +1313,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry,
@@ -61153,7 +61627,7 @@ index cb25a1a..fa91d33 100644
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
WARN_ON(1);
-@@ -1438,7 +1438,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1438,7 +1439,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
*/
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
if (name->len > DNAME_INLINE_LEN-1) {
@@ -61162,7 +61636,7 @@ index cb25a1a..fa91d33 100644
if (!dname) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
-@@ -1456,7 +1456,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1456,7 +1457,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb();
dentry->d_name.name = dname;
@@ -61171,7 +61645,7 @@ index cb25a1a..fa91d33 100644
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
-@@ -2196,7 +2196,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
+@@ -2196,7 +2197,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next;
}
@@ -61180,7 +61654,7 @@ index cb25a1a..fa91d33 100644
found = dentry;
spin_unlock(&dentry->d_lock);
break;
-@@ -2295,7 +2295,7 @@ again:
+@@ -2295,7 +2296,7 @@ again:
spin_lock(&dentry->d_lock);
inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode);
@@ -61189,7 +61663,21 @@ index cb25a1a..fa91d33 100644
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
-@@ -3300,7 +3300,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+@@ -2675,11 +2676,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+ if (!IS_ROOT(new)) {
+ spin_unlock(&inode->i_lock);
+ dput(new);
++ iput(inode);
+ return ERR_PTR(-EIO);
+ }
+ if (d_ancestor(new, dentry)) {
+ spin_unlock(&inode->i_lock);
+ dput(new);
++ iput(inode);
+ return ERR_PTR(-EIO);
+ }
+ write_seqlock(&rename_lock);
+@@ -3300,7 +3303,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
@@ -61198,7 +61686,7 @@ index cb25a1a..fa91d33 100644
}
}
return D_WALK_CONTINUE;
-@@ -3416,7 +3416,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3416,7 +3419,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -61225,7 +61713,7 @@ index 1e3b99d..6512101 100644
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
-index d4a9431..77f9b2e 100644
+index 57ee4c5..ecb13b0 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
@@ -64338,7 +64826,7 @@ index 6740a62..ccb472f 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index a7b05bf..9b251d4 100644
+index 3ddb044..5533df9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -64900,10 +65388,10 @@ index a7b05bf..9b251d4 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index ef42d9b..b8dfe4f 100644
+index 7f67b46..c4ad324 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -1360,6 +1360,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags)
if (!(sb->s_flags & MS_RDONLY))
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
up_write(&sb->s_umount);
@@ -64913,7 +65401,7 @@ index ef42d9b..b8dfe4f 100644
return retval;
}
-@@ -1382,6 +1385,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1384,6 +1387,9 @@ static int do_umount(struct mount *mnt, int flags)
}
unlock_mount_hash();
namespace_unlock();
@@ -64923,7 +65411,7 @@ index ef42d9b..b8dfe4f 100644
return retval;
}
-@@ -1401,7 +1407,7 @@ static inline bool may_mount(void)
+@@ -1403,7 +1409,7 @@ static inline bool may_mount(void)
* unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
*/
@@ -64932,7 +65420,7 @@ index ef42d9b..b8dfe4f 100644
{
struct path path;
struct mount *mnt;
-@@ -1443,7 +1449,7 @@ out:
+@@ -1445,7 +1451,7 @@ out:
/*
* The 2.0 compatible umount. No flags.
*/
@@ -64941,7 +65429,7 @@ index ef42d9b..b8dfe4f 100644
{
return sys_umount(name, 0);
}
-@@ -2492,6 +2498,16 @@ long do_mount(const char *dev_name, const char *dir_name,
+@@ -2494,6 +2500,16 @@ long do_mount(const char *dev_name, const char *dir_name,
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
@@ -64958,7 +65446,7 @@ index ef42d9b..b8dfe4f 100644
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
data_page);
-@@ -2506,6 +2522,9 @@ long do_mount(const char *dev_name, const char *dir_name,
+@@ -2508,6 +2524,9 @@ long do_mount(const char *dev_name, const char *dir_name,
dev_name, data_page);
dput_out:
path_put(&path);
@@ -64968,7 +65456,7 @@ index ef42d9b..b8dfe4f 100644
return retval;
}
-@@ -2523,7 +2542,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+@@ -2525,7 +2544,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
* number incrementing at 10Ghz will take 12,427 years to wrap which
* is effectively never, so we can ignore the possibility.
*/
@@ -64977,7 +65465,7 @@ index ef42d9b..b8dfe4f 100644
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
-@@ -2538,7 +2557,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2540,7 +2559,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
kfree(new_ns);
return ERR_PTR(ret);
}
@@ -64986,7 +65474,7 @@ index ef42d9b..b8dfe4f 100644
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
-@@ -2548,7 +2567,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2550,7 +2569,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
@@ -64995,7 +65483,7 @@ index ef42d9b..b8dfe4f 100644
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
-@@ -2669,8 +2688,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+@@ -2671,8 +2690,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
@@ -65006,7 +65494,7 @@ index ef42d9b..b8dfe4f 100644
{
int ret;
char *kernel_type;
-@@ -2783,6 +2802,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2785,6 +2804,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (error)
goto out2;
@@ -65018,7 +65506,7 @@ index ef42d9b..b8dfe4f 100644
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -2820,6 +2844,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2822,6 +2846,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make sure we can reach put_old from new_root */
if (!is_path_reachable(old_mnt, old.dentry, &new))
goto out4;
@@ -65028,7 +65516,7 @@ index ef42d9b..b8dfe4f 100644
root_mp->m_count++; /* pin it so it won't go away */
lock_mount_hash();
detach_mnt(new_mnt, &parent_path);
-@@ -3051,7 +3078,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
+@@ -3053,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -65088,7 +65576,7 @@ index 5e0dc52..64681bc 100644
static struct nfsd4_operation nfsd4_ops[];
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index b01f6e1..4aab09a 100644
+index 353aac8..32035ee 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1534,7 +1534,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
@@ -65139,7 +65627,7 @@ index ff95676..96cf3f62 100644
break;
case RC_REPLBUFF:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index f501a9b..8155556 100644
+index 6ab077b..5ac7f0b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -855,7 +855,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
@@ -65275,7 +65763,7 @@ index a80a741..7b96e1b 100644
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
-index b13992a..536c8d8 100644
+index c991616..5ae51af 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
@@ -65398,6 +65886,19 @@ index 0440134..d52c93a 100644
bail:
if (handle)
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 8add6f1..b931e04 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -158,7 +158,7 @@ bail_add:
+ * NOTE: This dentry already has ->d_op set from
+ * ocfs2_get_parent() and ocfs2_get_dentry()
+ */
+- if (ret)
++ if (!IS_ERR_OR_NULL(ret))
+ dentry = ret;
+
+ status = ocfs2_dentry_attach_lock(dentry, inode,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index bbec539..7b266d5 100644
--- a/fs/ocfs2/ocfs2.h
@@ -83562,7 +84063,7 @@ index f230a97..714c006 100644
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 8981cc8..76fd8c2 100644
+index 16e6f1e..d79d2f1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
@@ -83596,7 +84097,7 @@ index 8981cc8..76fd8c2 100644
struct mmu_gather;
struct inode;
-@@ -1144,8 +1150,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -1163,8 +1169,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
@@ -83607,7 +84108,7 @@ index 8981cc8..76fd8c2 100644
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
-@@ -1184,9 +1190,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1203,9 +1209,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
@@ -83620,7 +84121,7 @@ index 8981cc8..76fd8c2 100644
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
-@@ -1219,34 +1225,6 @@ int set_page_dirty_lock(struct page *page);
+@@ -1238,34 +1244,6 @@ int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -83655,7 +84156,7 @@ index 8981cc8..76fd8c2 100644
extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
-@@ -1346,6 +1324,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
+@@ -1365,6 +1343,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
@@ -83671,7 +84172,7 @@ index 8981cc8..76fd8c2 100644
int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-@@ -1364,8 +1351,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+@@ -1383,8 +1370,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
{
return 0;
}
@@ -83687,7 +84188,7 @@ index 8981cc8..76fd8c2 100644
#endif
#ifdef __PAGETABLE_PMD_FOLDED
-@@ -1374,8 +1368,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+@@ -1393,8 +1387,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
{
return 0;
}
@@ -83703,7 +84204,7 @@ index 8981cc8..76fd8c2 100644
#endif
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-@@ -1393,11 +1394,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+@@ -1412,11 +1413,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
NULL: pud_offset(pgd, address);
}
@@ -83727,7 +84228,7 @@ index 8981cc8..76fd8c2 100644
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-@@ -1796,7 +1809,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1815,7 +1828,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
@@ -83736,7 +84237,7 @@ index 8981cc8..76fd8c2 100644
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-@@ -1804,6 +1817,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1823,6 +1836,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
@@ -83744,7 +84245,7 @@ index 8981cc8..76fd8c2 100644
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
-@@ -1832,10 +1846,11 @@ struct vm_unmapped_area_info {
+@@ -1851,10 +1865,11 @@ struct vm_unmapped_area_info {
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
@@ -83758,7 +84259,7 @@ index 8981cc8..76fd8c2 100644
/*
* Search for an unmapped address range.
-@@ -1847,7 +1862,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+@@ -1866,7 +1881,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
static inline unsigned long
@@ -83767,7 +84268,7 @@ index 8981cc8..76fd8c2 100644
{
if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
return unmapped_area(info);
-@@ -1909,6 +1924,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+@@ -1928,6 +1943,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
@@ -83778,7 +84279,7 @@ index 8981cc8..76fd8c2 100644
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1937,15 +1956,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+@@ -1956,15 +1975,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
@@ -83794,7 +84295,7 @@ index 8981cc8..76fd8c2 100644
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-@@ -1997,6 +2007,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+@@ -2016,6 +2026,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct mm_struct *mm,
unsigned long flags, struct file *file, long pages)
{
@@ -83806,7 +84307,7 @@ index 8981cc8..76fd8c2 100644
mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -2085,7 +2100,7 @@ extern int unpoison_memory(unsigned long pfn);
+@@ -2104,7 +2119,7 @@ extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
@@ -83815,7 +84316,7 @@ index 8981cc8..76fd8c2 100644
extern int soft_offline_page(struct page *page, int flags);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-@@ -2120,5 +2135,11 @@ void __init setup_nr_node_ids(void);
+@@ -2139,5 +2154,11 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
@@ -84995,7 +85496,7 @@ index ed8f9e7..999bc96 100644
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index b867a4d..84f03ad 100644
+index 2b1d9e9..10ba706 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -132,6 +132,7 @@ struct fs_struct;
@@ -85241,7 +85742,7 @@ index b867a4d..84f03ad 100644
{
return tsk->pid;
}
-@@ -2095,6 +2209,25 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2097,6 +2211,25 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
@@ -85267,7 +85768,7 @@ index b867a4d..84f03ad 100644
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
-@@ -2228,7 +2361,9 @@ void yield(void);
+@@ -2230,7 +2363,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -85277,7 +85778,7 @@ index b867a4d..84f03ad 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2261,6 +2396,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2263,6 +2398,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -85285,7 +85786,7 @@ index b867a4d..84f03ad 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2425,7 +2561,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2427,7 +2563,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -85294,7 +85795,7 @@ index b867a4d..84f03ad 100644
extern int do_execve(struct filename *,
const char __user * const __user *,
-@@ -2640,9 +2776,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2642,9 +2778,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -89847,7 +90348,7 @@ index a91e47d..71c9064 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 815d7af..3d0743b 100644
+index f3a3a07..6820bc0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -202,7 +202,7 @@ struct futex_pi_state {
@@ -89868,16 +90369,7 @@ index 815d7af..3d0743b 100644
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
-@@ -343,6 +343,8 @@ static void get_futex_key_refs(union futex_key *key)
- case FUT_OFF_MMSHARED:
- futex_get_mm(key); /* implies MB (B) */
- break;
-+ default:
-+ smp_mb(); /* explicit MB (B) */
- }
- }
-
-@@ -394,6 +396,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+@@ -396,6 +396,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
struct page *page, *page_head;
int err, ro = 0;
@@ -89889,7 +90381,7 @@ index 815d7af..3d0743b 100644
/*
* The futex address must be "naturally" aligned.
*/
-@@ -593,7 +600,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+@@ -595,7 +600,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
@@ -89898,7 +90390,73 @@ index 815d7af..3d0743b 100644
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -2998,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -641,8 +646,14 @@ static struct futex_pi_state * alloc_pi_state(void)
+ return pi_state;
+ }
+
++/*
++ * Must be called with the hb lock held.
++ */
+ static void free_pi_state(struct futex_pi_state *pi_state)
+ {
++ if (!pi_state)
++ return;
++
+ if (!atomic_dec_and_test(&pi_state->refcount))
+ return;
+
+@@ -1521,15 +1532,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ }
+
+ retry:
+- if (pi_state != NULL) {
+- /*
+- * We will have to lookup the pi_state again, so free this one
+- * to keep the accounting correct.
+- */
+- free_pi_state(pi_state);
+- pi_state = NULL;
+- }
+-
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
+ if (unlikely(ret != 0))
+ goto out;
+@@ -1619,6 +1621,8 @@ retry_private:
+ case 0:
+ break;
+ case -EFAULT:
++ free_pi_state(pi_state);
++ pi_state = NULL;
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+ put_futex_key(&key2);
+@@ -1634,6 +1638,8 @@ retry_private:
+ * exit to complete.
+ * - The user space value changed.
+ */
++ free_pi_state(pi_state);
++ pi_state = NULL;
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+ put_futex_key(&key2);
+@@ -1710,6 +1716,7 @@ retry_private:
+ }
+
+ out_unlock:
++ free_pi_state(pi_state);
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+
+@@ -1727,8 +1734,6 @@ out_put_keys:
+ out_put_key1:
+ put_futex_key(&key1);
+ out:
+- if (pi_state != NULL)
+- free_pi_state(pi_state);
+ return ret ? ret : task_count;
+ }
+
+@@ -3000,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
@@ -89906,7 +90464,7 @@ index 815d7af..3d0743b 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -3009,8 +3017,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3011,8 +3017,11 @@ static void __init futex_detect_cmpxchg(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -94871,10 +95429,32 @@ index 114d1be..ab0350c 100644
(val << avg->factor)) >> avg->weight :
(val << avg->factor);
diff --git a/lib/bitmap.c b/lib/bitmap.c
-index 1e031f2..a53eb90 100644
+index 1e031f2..89e3d6f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
-@@ -429,7 +429,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
+ lower = src[off + k];
+ if (left && off + k == lim - 1)
+ lower &= mask;
+- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
++ dst[k] = lower >> rem;
++ if (rem)
++ dst[k] |= upper << (BITS_PER_LONG - rem);
+ if (left && k == lim - 1)
+ dst[k] &= mask;
+ }
+@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
+ upper = src[k];
+ if (left && k == lim - 1)
+ upper &= (1UL << left) - 1;
+- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
++ dst[k + off] = upper << rem;
++ if (rem)
++ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
+ if (left && k + off == lim - 1)
+ dst[k + off] &= (1UL << left) - 1;
+ }
+@@ -429,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
@@ -94883,7 +95463,7 @@ index 1e031f2..a53eb90 100644
bitmap_zero(maskp, nmaskbits);
-@@ -514,7 +514,7 @@ int bitmap_parse_user(const char __user *ubuf,
+@@ -514,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -94892,7 +95472,7 @@ index 1e031f2..a53eb90 100644
ulen, 1, maskp, nmaskbits);
}
-@@ -605,7 +605,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+@@ -605,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
{
unsigned a, b;
int c, old_c, totaldigits;
@@ -94901,7 +95481,7 @@ index 1e031f2..a53eb90 100644
int exp_digit, in_range;
totaldigits = c = 0;
-@@ -700,7 +700,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+@@ -700,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -96279,7 +96859,7 @@ index 44c6bd2..60369dc3 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index e229970..a3eb2ce 100644
+index e229970..68218aa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -96329,7 +96909,15 @@ index e229970..a3eb2ce 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-@@ -1500,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1147,6 +1153,7 @@ again:
+ print_bad_pte(vma, addr, ptent, page);
+ if (unlikely(!__tlb_remove_page(tlb, page))) {
+ force_flush = 1;
++ addr += PAGE_SIZE;
+ break;
+ }
+ continue;
+@@ -1500,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -96340,7 +96928,7 @@ index e229970..a3eb2ce 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -1544,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1544,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -96362,7 +96950,7 @@ index e229970..a3eb2ce 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -1629,6 +1651,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -1629,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -96370,7 +96958,7 @@ index e229970..a3eb2ce 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -1876,7 +1899,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1876,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -96381,7 +96969,7 @@ index e229970..a3eb2ce 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -1896,7 +1921,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -1896,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -96392,7 +96980,7 @@ index e229970..a3eb2ce 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2018,6 +2045,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+@@ -2018,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
return ret;
}
@@ -96579,7 +97167,7 @@ index e229970..a3eb2ce 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2216,6 +2423,12 @@ gotten:
+@@ -2216,6 +2424,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -96592,7 +97180,7 @@ index e229970..a3eb2ce 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2269,6 +2482,10 @@ gotten:
+@@ -2269,6 +2483,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -96603,7 +97191,7 @@ index e229970..a3eb2ce 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2543,6 +2760,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2543,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -96615,7 +97203,7 @@ index e229970..a3eb2ce 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -2566,6 +2788,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2566,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -96627,7 +97215,7 @@ index e229970..a3eb2ce 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2585,40 +2812,6 @@ out_release:
+@@ -2585,40 +2813,6 @@ out_release:
}
/*
@@ -96668,7 +97256,7 @@ index e229970..a3eb2ce 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -2628,27 +2821,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2628,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int flags)
{
struct mem_cgroup *memcg;
@@ -96701,7 +97289,7 @@ index e229970..a3eb2ce 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -2672,6 +2861,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2672,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -96713,7 +97301,7 @@ index e229970..a3eb2ce 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
-@@ -2681,6 +2875,12 @@ setpte:
+@@ -2681,6 +2876,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -96726,7 +97314,7 @@ index e229970..a3eb2ce 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2911,6 +3111,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2911,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, false, false);
@@ -96738,7 +97326,7 @@ index e229970..a3eb2ce 100644
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
-@@ -2953,7 +3158,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2953,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(fault_page);
goto uncharge_out;
}
@@ -96757,7 +97345,7 @@ index e229970..a3eb2ce 100644
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
-@@ -3003,6 +3219,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3003,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, true, false);
@@ -96769,7 +97357,7 @@ index e229970..a3eb2ce 100644
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
-@@ -3244,6 +3465,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+@@ -3244,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -96782,7 +97370,7 @@ index e229970..a3eb2ce 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3263,9 +3490,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3263,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -96824,7 +97412,7 @@ index e229970..a3eb2ce 100644
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3399,6 +3658,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3399,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -96848,7 +97436,7 @@ index e229970..a3eb2ce 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3429,6 +3705,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3429,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -96879,7 +97467,7 @@ index e229970..a3eb2ce 100644
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
-@@ -3538,8 +3838,8 @@ out:
+@@ -3538,8 +3839,8 @@ out:
return ret;
}
@@ -96890,7 +97478,7 @@ index e229970..a3eb2ce 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -3565,8 +3865,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -3565,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -96901,7 +97489,7 @@ index e229970..a3eb2ce 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -3574,7 +3874,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3574,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -96910,7 +97498,7 @@ index e229970..a3eb2ce 100644
void *maddr;
struct page *page = NULL;
-@@ -3635,8 +3935,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3635,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -96921,7 +97509,7 @@ index e229970..a3eb2ce 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -3646,11 +3946,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3646,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -97013,10 +97601,10 @@ index 8f5330d..b41914b 100644
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
diff --git a/mm/migrate.c b/mm/migrate.c
-index 2740360..d20a37d 100644
+index 0143995..b294728 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
-@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+@@ -1495,8 +1495,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
@@ -99124,7 +99712,7 @@ index 469f90d..34a09ee 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 7c52b38..dc55dcb 100644
+index 7c52b38..3ccc17e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -316,10 +316,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -99177,29 +99765,28 @@ index 7c52b38..dc55dcb 100644
slab_early_init = 0;
-@@ -3384,6 +3388,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -3384,6 +3388,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab) {
-+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
-+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
++ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
++ STATS_INC_NOT_SANITIZED(cachep);
++ else {
++ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
+
-+ if (cachep->ctor)
-+ cachep->ctor(objp);
++ if (cachep->ctor)
++ cachep->ctor(objp);
+
-+ STATS_INC_SANITIZED(cachep);
-+ } else
-+ STATS_INC_NOT_SANITIZED(cachep);
++ STATS_INC_SANITIZED(cachep);
+ }
+#endif
+
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3607,6 +3626,7 @@ void kfree(const void *objp)
+@@ -3607,6 +3625,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -99207,7 +99794,7 @@ index 7c52b38..dc55dcb 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4056,14 +4076,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+@@ -4056,14 +4075,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
}
/* cpu stats */
{
@@ -99234,7 +99821,7 @@ index 7c52b38..dc55dcb 100644
#endif
}
-@@ -4281,13 +4309,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4281,13 +4308,69 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -99306,10 +99893,10 @@ index 7c52b38..dc55dcb 100644
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slab.h b/mm/slab.h
-index 0e0fdd3..c61c735 100644
+index 0e0fdd3..d0fd761 100644
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
+@@ -32,6 +32,20 @@ extern struct list_head slab_caches;
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;
@@ -99319,13 +99906,18 @@ index 0e0fdd3..c61c735 100644
+#else
+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
+#endif
-+extern bool pax_sanitize_slab;
++enum pax_sanitize_mode {
++ PAX_SANITIZE_SLAB_OFF = 0,
++ PAX_SANITIZE_SLAB_FAST,
++ PAX_SANITIZE_SLAB_FULL,
++};
++extern enum pax_sanitize_mode pax_sanitize_slab;
+#endif
+
unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size);
-@@ -67,7 +76,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+@@ -67,7 +81,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
@@ -99335,7 +99927,7 @@ index 0e0fdd3..c61c735 100644
#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
-@@ -251,6 +261,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+@@ -251,6 +266,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return s;
page = virt_to_head_page(x);
@@ -99346,10 +99938,10 @@ index 0e0fdd3..c61c735 100644
if (slab_equal_or_root(cachep, s))
return cachep;
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index d319502..9eb3eb5 100644
+index d319502..da7714e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
-@@ -25,11 +25,22 @@
+@@ -25,11 +25,35 @@
#include "slab.h"
@@ -99360,20 +99952,33 @@ index d319502..9eb3eb5 100644
struct kmem_cache *kmem_cache;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+bool pax_sanitize_slab __read_only = true;
++enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
+static int __init pax_sanitize_slab_setup(char *str)
+{
-+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
-+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
-+ return 1;
++ if (!str)
++ return 0;
++
++ if (!strcmp(str, "0") || !strcmp(str, "off")) {
++ pr_info("PaX slab sanitization: %s\n", "disabled");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
++ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
++ pr_info("PaX slab sanitization: %s\n", "fast");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
++ } else if (!strcmp(str, "full")) {
++ pr_info("PaX slab sanitization: %s\n", "full");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
++ } else
++ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
++
++ return 0;
+}
-+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
++early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
+#endif
+
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
{
-@@ -160,7 +171,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
+@@ -160,7 +184,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
if (err)
goto out_free_cache;
@@ -99382,7 +99987,21 @@ index d319502..9eb3eb5 100644
list_add(&s->list, &slab_caches);
out:
if (err)
-@@ -341,8 +352,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+@@ -222,6 +246,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
+ */
+ flags &= CACHE_CREATE_MASK;
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_NO_SANITIZE;
++ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++ flags &= ~SLAB_NO_SANITIZE;
++#endif
++
+ s = __kmem_cache_alias(name, size, align, flags, ctor);
+ if (s)
+ goto out_unlock;
+@@ -341,8 +372,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
mutex_lock(&slab_mutex);
@@ -99392,7 +100011,7 @@ index d319502..9eb3eb5 100644
goto out_unlock;
if (memcg_cleanup_cache_params(s) != 0)
-@@ -362,7 +372,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+@@ -362,7 +392,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_barrier();
memcg_free_cache_params(s);
@@ -99401,7 +100020,7 @@ index d319502..9eb3eb5 100644
sysfs_slab_remove(s);
#else
slab_kmem_cache_release(s);
-@@ -418,7 +428,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
+@@ -418,7 +448,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
name, size, err);
@@ -99410,7 +100029,7 @@ index d319502..9eb3eb5 100644
}
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
-@@ -431,7 +441,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
+@@ -431,7 +461,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
create_boot_cache(s, name, size, flags);
list_add(&s->list, &slab_caches);
@@ -99419,7 +100038,7 @@ index d319502..9eb3eb5 100644
return s;
}
-@@ -443,6 +453,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+@@ -443,6 +473,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif
@@ -99431,7 +100050,7 @@ index d319502..9eb3eb5 100644
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
-@@ -507,6 +522,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
+@@ -507,6 +542,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
return kmalloc_dma_caches[index];
#endif
@@ -99445,7 +100064,7 @@ index d319502..9eb3eb5 100644
return kmalloc_caches[index];
}
-@@ -563,7 +585,7 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -563,7 +605,7 @@ void __init create_kmalloc_caches(unsigned long flags)
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
if (!kmalloc_caches[i]) {
kmalloc_caches[i] = create_kmalloc_cache(NULL,
@@ -99454,7 +100073,7 @@ index d319502..9eb3eb5 100644
}
/*
-@@ -572,10 +594,10 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -572,10 +614,10 @@ void __init create_kmalloc_caches(unsigned long flags)
* earlier power of two caches
*/
if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
@@ -99467,7 +100086,7 @@ index d319502..9eb3eb5 100644
}
/* Kmalloc array is now usable */
-@@ -608,6 +630,23 @@ void __init create_kmalloc_caches(unsigned long flags)
+@@ -608,6 +650,23 @@ void __init create_kmalloc_caches(unsigned long flags)
}
}
#endif
@@ -99491,7 +100110,7 @@ index d319502..9eb3eb5 100644
}
#endif /* !CONFIG_SLOB */
-@@ -666,6 +705,9 @@ void print_slabinfo_header(struct seq_file *m)
+@@ -666,6 +725,9 @@ void print_slabinfo_header(struct seq_file *m)
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
@@ -99502,7 +100121,7 @@ index d319502..9eb3eb5 100644
seq_putc(m, '\n');
}
diff --git a/mm/slob.c b/mm/slob.c
-index 21980e0..ed9a648 100644
+index 21980e0..975f1bf 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
@@ -99583,6 +100202,15 @@ index 21980e0..ed9a648 100644
INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
+@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+ /*
+ * slob_free: entry point into the slob allocator.
+ */
+-static void slob_free(void *block, int size)
++static void slob_free(struct kmem_cache *c, void *block, int size)
+ {
+ struct page *sp;
+ slob_t *prev, *next, *b = (slob_t *)block;
@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
if (slob_page_free(sp))
clear_slob_page_free(sp);
@@ -99595,7 +100223,7 @@ index 21980e0..ed9a648 100644
}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab)
++ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
@@ -99676,7 +100304,7 @@ index 21980e0..ed9a648 100644
- slob_free(m, *m + align);
- } else
+ slob_t *m = (slob_t *)(block - align);
-+ slob_free(m, m[0].units + align);
++ slob_free(NULL, m, m[0].units + align);
+ } else {
+ __ClearPageSlab(sp);
+ page_mapcount_reset(sp);
@@ -99820,24 +100448,34 @@ index 21980e0..ed9a648 100644
if (b && c->ctor)
c->ctor(b);
-@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+ EXPORT_SYMBOL(kmem_cache_alloc_node);
+ #endif
- static void __kmem_cache_free(void *b, int size)
+-static void __kmem_cache_free(void *b, int size)
++static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
{
- if (size < PAGE_SIZE)
+- slob_free(b, size);
+ struct page *sp;
+
+ sp = virt_to_page(b);
+ BUG_ON(!PageSlab(sp));
+ if (!sp->private)
- slob_free(b, size);
++ slob_free(c, b, size);
else
- slob_free_pages(b, get_order(size));
+ slob_free_pages(sp, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head)
+ struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
+ void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
+
+- __kmem_cache_free(b, slob_rcu->size);
++ __kmem_cache_free(NULL, b, slob_rcu->size);
+ }
void kmem_cache_free(struct kmem_cache *c, void *b)
{
@@ -99860,7 +100498,7 @@ index 21980e0..ed9a648 100644
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
- __kmem_cache_free(b, c->size);
-+ __kmem_cache_free(b, size);
++ __kmem_cache_free(c, b, size);
}
+#ifdef CONFIG_PAX_USERCOPY_SLABS
@@ -99873,7 +100511,7 @@ index 21980e0..ed9a648 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 3e8afcc..68c99031 100644
+index 3e8afcc..d6e2c89 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -207,7 +207,7 @@ struct track {
@@ -99899,7 +100537,7 @@ index 3e8afcc..68c99031 100644
slab_free_hook(s, x);
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
++ if (!(s->flags & SLAB_NO_SANITIZE)) {
+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
+ if (s->ctor)
+ s->ctor(x);
@@ -99923,7 +100561,7 @@ index 3e8afcc..68c99031 100644
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
++ (!(flags & SLAB_NO_SANITIZE)) ||
+#endif
s->ctor)) {
/*
@@ -100078,7 +100716,7 @@ index 3e8afcc..68c99031 100644
}
SLAB_ATTR_RO(aliases);
-@@ -4554,6 +4627,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+@@ -4554,6 +4627,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(cache_dma);
#endif
@@ -100090,20 +100728,31 @@ index 3e8afcc..68c99031 100644
+SLAB_ATTR_RO(usercopy);
+#endif
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
++}
++SLAB_ATTR_RO(sanitize);
++#endif
++
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
-@@ -4888,6 +4969,9 @@ static struct attribute *slab_attrs[] = {
+@@ -4888,6 +4977,12 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+ &usercopy_attr.attr,
+#endif
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ &sanitize_attr.attr,
++#endif
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
-@@ -5132,6 +5216,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5132,6 +5227,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -100111,7 +100760,7 @@ index 3e8afcc..68c99031 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5205,6 +5290,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5205,6 +5301,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -100119,7 +100768,7 @@ index 3e8afcc..68c99031 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5218,6 +5304,7 @@ struct saved_alias {
+@@ -5218,6 +5315,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -100127,7 +100776,7 @@ index 3e8afcc..68c99031 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5240,6 +5327,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5240,6 +5338,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -101059,10 +101708,10 @@ index 115f149..f0ba286 100644
err = -EFAULT;
break;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index 46547b9..f5defc1 100644
+index 14ca8ae..262d49a 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
-@@ -3569,8 +3569,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+@@ -3565,8 +3565,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
break;
case L2CAP_CONF_RFC:
@@ -107938,10 +108587,10 @@ index 293828b..9fbe696 100755
# Find all available archs
find_all_archs()
diff --git a/security/Kconfig b/security/Kconfig
-index beb86b5..9becb4a 100644
+index beb86b5..505c33c 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,965 @@
+@@ -4,6 +4,969 @@
menu "Security options"
@@ -108704,10 +109353,14 @@ index beb86b5..9becb4a 100644
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
++ The slab sanitization feature excludes a few slab caches per default
++ for performance reasons. To extend the feature to cover those as
++ well, pass "pax_sanitize_slab=full" as kernel command line parameter.
++
+ To reduce the performance penalty by sanitizing pages only, albeit
+ limiting the effectiveness of this feature at the same time, slab
-+ sanitization can be disabled with the kernel commandline parameter
-+ "pax_sanitize_slab=0".
++ sanitization can be disabled with the kernel command line parameter
++ "pax_sanitize_slab=off".
+
+ Note that this feature does not protect data stored in live pages,
+ e.g., process memory swapped to disk may stay there for a long time.
@@ -108783,7 +109436,7 @@ index beb86b5..9becb4a 100644
+config PAX_REFCOUNT
+ bool "Prevent various kernel object reference counter overflows"
+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || SPARC64 || X86)
++ depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || PPC || SPARC64 || X86)
+ help
+ By saying Y here the kernel will detect and prevent overflowing
+ various (but not all) kinds of object reference counters. Such
@@ -108907,7 +109560,7 @@ index beb86b5..9becb4a 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -103,7 +1062,7 @@ config INTEL_TXT
+@@ -103,7 +1066,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -109021,7 +109674,7 @@ index bab0611..f9a0ff5 100644
if (bprm->cap_effective)
return 1;
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
-index 57da4bd..db453a2 100644
+index 0fb456c..83711f9 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -118,8 +118,8 @@ int ima_init_template(void);
@@ -109725,7 +110378,7 @@ index 102e8fd..7263bb8 100644
if (err < 0)
return err;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
-index 8cd2f93..8412c57 100644
+index a95356f..0f5eabf 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2815,11 +2815,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
@@ -118050,10 +118703,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..4dc6368
+index 0000000..06374de
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5850 @@
+@@ -0,0 +1,5866 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
@@ -118667,7 +119320,8 @@ index 0000000..4dc6368
+kvm_mmu_notifier_test_young_7139 kvm_mmu_notifier_test_young 3 7139 NULL
+__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
+hdlc_loop_7255 hdlc_loop 0 7255 NULL
-+rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
++rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL nohasharray
++kimage_alloc_init_7282 kimage_alloc_init 3 7282 &rx_rate_rx_frames_per_rates_read_7282
+get_string_7302 get_string 0 7302 NULL
+pci_vpd_info_field_size_7324 pci_vpd_info_field_size 0 7324 NULL
+mgmt_control_7349 mgmt_control 3 7349 NULL
@@ -118776,6 +119430,7 @@ index 0000000..4dc6368
+mlx5_vzalloc_8663 mlx5_vzalloc 1 8663 NULL
+dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
+lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
++bpf_prog_size_8685 bpf_prog_size 0 8685 NULL
+rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
+skb_frag_size_8695 skb_frag_size 0 8695 NULL
+arcfb_write_8702 arcfb_write 3 8702 NULL
@@ -119028,6 +119683,7 @@ index 0000000..4dc6368
+tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
+page_offset_11120 page_offset 0 11120 NULL
+tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
++alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
+snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 3-4 11172 NULL
+il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
+comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL
@@ -119375,6 +120031,7 @@ index 0000000..4dc6368
+ttm_page_pool_free_14797 ttm_page_pool_free 2-0 14797 &__kfifo_in_14797
+hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
++do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
+mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
+lcd_write_14857 lcd_write 3 14857 NULL
+get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
@@ -119466,6 +120123,7 @@ index 0000000..4dc6368
+viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
+dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
+read_file_spectral_period_16057 read_file_spectral_period 3 16057 NULL
++SYSC_kexec_file_load_16058 SYSC_kexec_file_load 3 16058 NULL
+si5351_msynth_params_address_16062 si5351_msynth_params_address 0-1 16062 NULL
+isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
+isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 NULL nohasharray
@@ -119704,7 +120362,8 @@ index 0000000..4dc6368
+sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
+smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
+debug_output_18575 debug_output 3 18575 NULL
-+filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL
++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
++slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
+iowarrior_write_18604 iowarrior_write 3 18604 NULL
+from_buffer_18625 from_buffer 3 18625 NULL
+kmalloc_kernel_18641 kmalloc_kernel 1 18641 NULL
@@ -121180,6 +121839,7 @@ index 0000000..4dc6368
+striped_read_35218 striped_read 0-2 35218 NULL nohasharray
+security_key_getsecurity_35218 security_key_getsecurity 0 35218 &striped_read_35218
+rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL
++kimage_file_prepare_segments_35232 kimage_file_prepare_segments 5 35232 NULL
+set_fd_set_35249 set_fd_set 1 35249 NULL
+ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
+dis_disc_write_35265 dis_disc_write 3 35265 NULL
@@ -121695,6 +122355,7 @@ index 0000000..4dc6368
+vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
+snd_vx_create_40948 snd_vx_create 4 40948 NULL
+rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
++ima_appraise_measurement_40978 ima_appraise_measurement 6 40978 NULL
+il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
+iwl_dbgfs_scan_ant_rxchain_read_40999 iwl_dbgfs_scan_ant_rxchain_read 3 40999 NULL
+mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
@@ -121774,6 +122435,7 @@ index 0000000..4dc6368
+ptlrpc_new_bulk_41804 ptlrpc_new_bulk 1 41804 NULL
+rtw_android_get_macaddr_41812 rtw_android_get_macaddr 0 41812 NULL
+sco_send_frame_41815 sco_send_frame 3 41815 NULL
++kimage_file_alloc_init_41827 kimage_file_alloc_init 5 41827 NULL
+copy_page_to_iter_bvec_41830 copy_page_to_iter_bvec 0-3 41830 NULL
+ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL
+do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
@@ -121943,6 +122605,7 @@ index 0000000..4dc6368
+fuse_send_read_43725 fuse_send_read 4-0 43725 NULL
+drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
+snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
++__alloc_alien_cache_43734 __alloc_alien_cache 2 43734 NULL
+fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736 NULL
+gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
+sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
@@ -122268,6 +122931,7 @@ index 0000000..4dc6368
+mcp23s17_read_regs_47491 mcp23s17_read_regs 4 47491 NULL
+core_sys_select_47494 core_sys_select 1 47494 NULL
+as3722_block_write_47503 as3722_block_write 2-3 47503 NULL
++alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
+unlink_simple_47506 unlink_simple 3 47506 NULL
+pstore_decompress_47510 pstore_decompress 0 47510 NULL
+ec_i2c_count_response_47518 ec_i2c_count_response 0 47518 NULL
@@ -122803,6 +123467,7 @@ index 0000000..4dc6368
+verity_status_53120 verity_status 5 53120 NULL
+brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
+ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
++copy_user_segment_list_53150 copy_user_segment_list 2 53150 NULL
+ieee80211_bss_info_update_53170 ieee80211_bss_info_update 4 53170 NULL
+btrfs_io_bio_alloc_53179 btrfs_io_bio_alloc 2 53179 NULL
+clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
@@ -123205,6 +123870,7 @@ index 0000000..4dc6368
+dio_send_cur_page_57348 dio_send_cur_page 0 57348 NULL
+tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL
+tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
++bzImage64_load_57388 bzImage64_load 7 57388 NULL
+read_file_blob_57406 read_file_blob 3 57406 NULL
+enclosure_register_57412 enclosure_register 3 57412 NULL
+compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL
@@ -123580,7 +124246,9 @@ index 0000000..4dc6368
+insert_one_name_61668 insert_one_name 7 61668 NULL
+qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
+lock_loop_61681 lock_loop 1 61681 NULL
++__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
+filter_read_61692 filter_read 3 61692 NULL
++SyS_kexec_file_load_61715 SyS_kexec_file_load 3 61715 NULL
+iov_length_61716 iov_length 0 61716 NULL
+fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
+null_alloc_reqbuf_61719 null_alloc_reqbuf 3 61719 NULL
@@ -123601,6 +124269,7 @@ index 0000000..4dc6368
+rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
+au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL
+sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
++efi_get_runtime_map_size_61927 efi_get_runtime_map_size 0 61927 NULL
+SyS_kexec_load_61946 SyS_kexec_load 2 61946 NULL
+il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
+squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
@@ -125414,10 +126083,10 @@ index 714b949..1f0dc1e 100644
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 95519bc..43f5d42 100644
+index 6a3f29b..a1d2e93 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
-@@ -76,12 +76,17 @@ LIST_HEAD(vm_list);
+@@ -77,12 +77,17 @@ LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
static int kvm_usage_count = 0;
@@ -125437,7 +126106,7 @@ index 95519bc..43f5d42 100644
struct dentry *kvm_debugfs_dir;
-@@ -763,7 +768,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+@@ -780,7 +785,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* We can read the guest memory with __xxx_user() later on. */
if ((mem->slot < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
@@ -125446,7 +126115,7 @@ index 95519bc..43f5d42 100644
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
-@@ -1620,9 +1625,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+@@ -1637,9 +1642,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
@@ -125466,7 +126135,7 @@ index 95519bc..43f5d42 100644
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
-@@ -1872,7 +1885,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
+@@ -1889,7 +1902,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
@@ -125475,7 +126144,7 @@ index 95519bc..43f5d42 100644
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
#ifdef CONFIG_COMPAT
-@@ -2573,7 +2586,7 @@ out:
+@@ -2593,7 +2606,7 @@ out:
}
#endif
@@ -125484,7 +126153,7 @@ index 95519bc..43f5d42 100644
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
#ifdef CONFIG_COMPAT
-@@ -2646,7 +2659,7 @@ out:
+@@ -2666,7 +2679,7 @@ out:
return r;
}
@@ -125493,7 +126162,7 @@ index 95519bc..43f5d42 100644
.unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
-@@ -2672,7 +2685,7 @@ static void hardware_enable_nolock(void *junk)
+@@ -2692,7 +2705,7 @@ static void hardware_enable_nolock(void *junk)
if (r) {
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -125502,7 +126171,7 @@ index 95519bc..43f5d42 100644
printk(KERN_INFO "kvm: enabling virtualization on "
"CPU%d failed\n", cpu);
}
-@@ -2728,10 +2741,10 @@ static int hardware_enable_all(void)
+@@ -2748,10 +2761,10 @@ static int hardware_enable_all(void)
kvm_usage_count++;
if (kvm_usage_count == 1) {
@@ -125515,7 +126184,7 @@ index 95519bc..43f5d42 100644
hardware_disable_all_nolock();
r = -EBUSY;
}
-@@ -3136,7 +3149,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+@@ -3156,7 +3169,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
@@ -125524,7 +126193,7 @@ index 95519bc..43f5d42 100644
struct module *module)
{
int r;
-@@ -3183,7 +3196,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3203,7 +3216,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
@@ -125533,7 +126202,7 @@ index 95519bc..43f5d42 100644
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_3;
-@@ -3193,9 +3206,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3213,9 +3226,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
if (r)
goto out_free;
@@ -125545,7 +126214,7 @@ index 95519bc..43f5d42 100644
r = misc_register(&kvm_dev);
if (r) {
-@@ -3205,9 +3220,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3225,9 +3240,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
register_syscore_ops(&kvm_syscore_ops);
diff --git a/3.17.1/4425_grsec_remove_EI_PAX.patch b/3.17.2/4425_grsec_remove_EI_PAX.patch
index fc51f79..fc51f79 100644
--- a/3.17.1/4425_grsec_remove_EI_PAX.patch
+++ b/3.17.2/4425_grsec_remove_EI_PAX.patch
diff --git a/3.17.1/4427_force_XATTR_PAX_tmpfs.patch b/3.17.2/4427_force_XATTR_PAX_tmpfs.patch
index 21c0171..21c0171 100644
--- a/3.17.1/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.17.2/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.17.1/4430_grsec-remove-localversion-grsec.patch b/3.17.2/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.17.1/4430_grsec-remove-localversion-grsec.patch
+++ b/3.17.2/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.17.1/4435_grsec-mute-warnings.patch b/3.17.2/4435_grsec-mute-warnings.patch
index 4a959cc..4a959cc 100644
--- a/3.17.1/4435_grsec-mute-warnings.patch
+++ b/3.17.2/4435_grsec-mute-warnings.patch
diff --git a/3.17.1/4440_grsec-remove-protected-paths.patch b/3.17.2/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.17.1/4440_grsec-remove-protected-paths.patch
+++ b/3.17.2/4440_grsec-remove-protected-paths.patch
diff --git a/3.17.1/4450_grsec-kconfig-default-gids.patch b/3.17.2/4450_grsec-kconfig-default-gids.patch
index 8a63d7f..8a63d7f 100644
--- a/3.17.1/4450_grsec-kconfig-default-gids.patch
+++ b/3.17.2/4450_grsec-kconfig-default-gids.patch
diff --git a/3.17.1/4465_selinux-avc_audit-log-curr_ip.patch b/3.17.2/4465_selinux-avc_audit-log-curr_ip.patch
index 747ac53..747ac53 100644
--- a/3.17.1/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.17.2/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.17.1/4470_disable-compat_vdso.patch b/3.17.2/4470_disable-compat_vdso.patch
index dec59f7..dec59f7 100644
--- a/3.17.1/4470_disable-compat_vdso.patch
+++ b/3.17.2/4470_disable-compat_vdso.patch
diff --git a/3.17.1/4475_emutramp_default_on.patch b/3.17.2/4475_emutramp_default_on.patch
index cf88fd9..cf88fd9 100644
--- a/3.17.1/4475_emutramp_default_on.patch
+++ b/3.17.2/4475_emutramp_default_on.patch
diff --git a/3.2.63/0000_README b/3.2.63/0000_README
index 5a21a10..1c94b09 100644
--- a/3.2.63/0000_README
+++ b/3.2.63/0000_README
@@ -170,7 +170,7 @@ Patch: 1062_linux-3.2.63.patch
From: http://www.kernel.org
Desc: Linux 3.2.63
-Patch: 4420_grsecurity-3.0-3.2.63-201410250023.patch
+Patch: 4420_grsecurity-3.0-3.2.63-201410312211.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.63/4420_grsecurity-3.0-3.2.63-201410250023.patch b/3.2.63/4420_grsecurity-3.0-3.2.63-201410312211.patch
index 02b9ab1..82f3ff6 100644
--- a/3.2.63/4420_grsecurity-3.0-3.2.63-201410250023.patch
+++ b/3.2.63/4420_grsecurity-3.0-3.2.63-201410312211.patch
@@ -203,7 +203,7 @@ index dfa6fc6..ccbfbf3 100644
+zconf.lex.c
zoffset.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 1b196ea..ea70ff0 100644
+index 1b196ea..da5220d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -859,6 +859,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
@@ -216,7 +216,7 @@ index 1b196ea..ea70ff0 100644
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
-@@ -1962,6 +1965,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -1962,6 +1965,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -226,8 +226,13 @@ index 1b196ea..ea70ff0 100644
+ page table updates on X86-64.
+
+ pax_sanitize_slab=
-+ 0/1 to disable/enable slab object sanitization (enabled by
-+ default).
++ Format: { 0 | 1 | off | fast | full }
++ Options '0' and '1' are only provided for backward
++ compatibility, 'off' or 'fast' should be used instead.
++ 0|off : disable slab object sanitization
++ 1|fast: enable slab object sanitization excluding
++ whitelisted slabs (default)
++ full : sanitize all slabs, even the whitelisted ones
+
+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
+
@@ -83753,7 +83758,7 @@ index 1b4ea29..9347e29 100644
static inline void nf_reset_trace(struct sk_buff *skb)
diff --git a/include/linux/slab.h b/include/linux/slab.h
-index 67d5d94..51dd834 100644
+index 67d5d94..c51ce65 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -11,14 +11,29 @@
@@ -83786,7 +83791,7 @@ index 67d5d94..51dd834 100644
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-@@ -87,10 +102,22 @@
+@@ -87,10 +102,27 @@
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
* Both make kfree a no-op.
*/
@@ -83807,12 +83812,17 @@ index 67d5d94..51dd834 100644
+#else
+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
+#endif
-+extern bool pax_sanitize_slab;
++enum pax_sanitize_mode {
++ PAX_SANITIZE_SLAB_OFF = 0,
++ PAX_SANITIZE_SLAB_FAST,
++ PAX_SANITIZE_SLAB_FULL,
++};
++extern enum pax_sanitize_mode pax_sanitize_slab;
+#endif
/*
* struct kmem_cache related prototypes
-@@ -161,6 +188,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
+@@ -161,6 +193,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
@@ -83961,7 +83971,7 @@ index 6a40c76..1747b67 100644
enum {
false = 0,
diff --git a/include/linux/string.h b/include/linux/string.h
-index e033564..e43a65c 100644
+index e033564..7cdb1a8 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -133,7 +133,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
@@ -83973,12 +83983,11 @@ index e033564..e43a65c 100644
/**
* strstarts - does @str start with @prefix?
-@@ -144,5 +144,9 @@ static inline bool strstarts(const char *str, const char *prefix)
+@@ -144,5 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
+
-+size_t memweight(const void *ptr, size_t bytes);
+void memzero_explicit(void *s, size_t count);
+
#endif
@@ -96738,22 +96747,38 @@ index 39b3a7d..660592a 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mm_init.c b/mm/mm_init.c
-index 1ffd97a..240aa20 100644
+index 1ffd97a..ed75674 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
-@@ -11,6 +11,17 @@
+@@ -9,8 +9,33 @@
+ #include <linux/init.h>
+ #include <linux/kobject.h>
#include <linux/export.h>
++#include <linux/slab.h>
#include "internal.h"
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+bool pax_sanitize_slab __read_only = true;
++enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
+static int __init pax_sanitize_slab_setup(char *str)
+{
-+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
-+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
-+ return 1;
++ if (!str)
++ return 0;
++
++ if (!strcmp(str, "0") || !strcmp(str, "off")) {
++ pr_info("PaX slab sanitization: %s\n", "disabled");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
++ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
++ pr_info("PaX slab sanitization: %s\n", "fast");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
++ } else if (!strcmp(str, "full")) {
++ pr_info("PaX slab sanitization: %s\n", "full");
++ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
++ } else
++ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
++
++ return 0;
+}
-+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
++early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
+#endif
+
#ifdef CONFIG_DEBUG_MEMORY_INIT
@@ -99087,7 +99112,7 @@ index 1371021..7104960 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index aea5e42..a3a3241 100644
+index aea5e42..98a44a8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -151,7 +151,7 @@
@@ -99236,29 +99261,42 @@ index aea5e42..a3a3241 100644
sizes++;
names++;
}
-@@ -3662,6 +3683,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -2284,6 +2305,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+ */
+ BUG_ON(flags & ~CREATE_MASK);
+
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_NO_SANITIZE;
++ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++ flags &= ~SLAB_NO_SANITIZE;
++#endif
++
+ /*
+ * Check that size is in terms of words. This is needed to avoid
+ * unaligned accesses for some archs when redzoning is used, and makes
+@@ -3662,6 +3690,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab) {
-+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
-+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, obj_size(cachep));
++ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
++ STATS_INC_NOT_SANITIZED(cachep);
++ else {
++ memset(objp, PAX_MEMORY_SANITIZE_VALUE, obj_size(cachep));
+
-+ if (cachep->ctor)
-+ cachep->ctor(objp);
++ if (cachep->ctor)
++ cachep->ctor(objp);
+
-+ STATS_INC_SANITIZED(cachep);
-+ } else
-+ STATS_INC_NOT_SANITIZED(cachep);
++ STATS_INC_SANITIZED(cachep);
+ }
+#endif
+
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3879,6 +3915,7 @@ void kfree(const void *objp)
+@@ -3879,6 +3921,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -99266,7 +99304,7 @@ index aea5e42..a3a3241 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4216,6 +4253,9 @@ static void print_slabinfo_header(struct seq_file *m)
+@@ -4216,6 +4259,9 @@ static void print_slabinfo_header(struct seq_file *m)
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
@@ -99276,7 +99314,7 @@ index aea5e42..a3a3241 100644
#endif
seq_putc(m, '\n');
}
-@@ -4325,14 +4365,22 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4325,14 +4371,22 @@ static int s_show(struct seq_file *m, void *p)
}
/* cpu stats */
{
@@ -99303,7 +99341,7 @@ index aea5e42..a3a3241 100644
#endif
seq_putc(m, '\n');
return 0;
-@@ -4587,13 +4635,71 @@ static int __init slab_proc_init(void)
+@@ -4587,13 +4641,71 @@ static int __init slab_proc_init(void)
{
proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -99377,7 +99415,7 @@ index aea5e42..a3a3241 100644
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slob.c b/mm/slob.c
-index 8105be4..8c1ce34 100644
+index 8105be4..93fb21c 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -29,7 +29,7 @@
@@ -99397,7 +99435,21 @@ index 8105be4..8c1ce34 100644
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
-@@ -100,9 +101,8 @@ struct slob_page {
+@@ -91,6 +92,13 @@ struct slob_block {
+ };
+ typedef struct slob_block slob_t;
+
++struct kmem_cache {
++ unsigned int size, align;
++ unsigned long flags;
++ const char *name;
++ void (*ctor)(void *);
++};
++
+ /*
+ * We use struct page fields to manage some slob allocation aspects,
+ * however to avoid the horrible mess in include/linux/mm_types.h, we'll
+@@ -100,9 +108,8 @@ struct slob_page {
union {
struct {
unsigned long flags; /* mandatory */
@@ -99408,7 +99460,7 @@ index 8105be4..8c1ce34 100644
slob_t *free; /* first free slob_t in page */
struct list_head list; /* linked list of free pages */
};
-@@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
+@@ -135,7 +142,7 @@ static LIST_HEAD(free_slob_large);
*/
static inline int is_slob_page(struct slob_page *sp)
{
@@ -99417,7 +99469,7 @@ index 8105be4..8c1ce34 100644
}
static inline void set_slob_page(struct slob_page *sp)
-@@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
+@@ -150,7 +157,7 @@ static inline void clear_slob_page(struct slob_page *sp)
static inline struct slob_page *slob_page(const void *addr)
{
@@ -99426,7 +99478,7 @@ index 8105be4..8c1ce34 100644
}
/*
-@@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
+@@ -210,7 +217,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
/*
* Return the size of a slob block.
*/
@@ -99435,7 +99487,7 @@ index 8105be4..8c1ce34 100644
{
if (s->units > 0)
return s->units;
-@@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
+@@ -220,7 +227,7 @@ static slobidx_t slob_units(slob_t *s)
/*
* Return the next free slob block pointer after this one.
*/
@@ -99444,7 +99496,7 @@ index 8105be4..8c1ce34 100644
{
slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
slobidx_t next;
-@@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
+@@ -235,7 +242,7 @@ static slob_t *slob_next(slob_t *s)
/*
* Returns true if s is the last free block in its page.
*/
@@ -99453,7 +99505,7 @@ index 8105be4..8c1ce34 100644
{
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-@@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
+@@ -254,6 +261,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
if (!page)
return NULL;
@@ -99461,7 +99513,7 @@ index 8105be4..8c1ce34 100644
return page_address(page);
}
-@@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+@@ -370,11 +378,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
if (!b)
return NULL;
sp = slob_page(b);
@@ -99474,19 +99526,28 @@ index 8105be4..8c1ce34 100644
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
-@@ -418,6 +419,11 @@ static void slob_free(void *block, int size)
+@@ -390,7 +398,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+ /*
+ * slob_free: entry point into the slob allocator.
+ */
+-static void slob_free(void *block, int size)
++static void slob_free(struct kmem_cache *c, void *block, int size)
+ {
+ struct slob_page *sp;
+ slob_t *prev, *next, *b = (slob_t *)block;
+@@ -418,6 +426,11 @@ static void slob_free(void *block, int size)
return;
}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab)
++ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
if (!slob_page_free(sp)) {
/* This slob page is about to become partially free. Easy! */
sp->units = units;
-@@ -476,10 +482,9 @@ out:
+@@ -476,10 +489,9 @@ out:
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
@@ -99499,7 +99560,7 @@ index 8105be4..8c1ce34 100644
void *ret;
gfp &= gfp_allowed_mask;
-@@ -494,7 +499,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+@@ -494,7 +506,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
if (!m)
return NULL;
@@ -99511,7 +99572,7 @@ index 8105be4..8c1ce34 100644
ret = (void *)m + align;
trace_kmalloc_node(_RET_IP_, ret,
-@@ -506,16 +514,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+@@ -506,16 +521,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
if (ret) {
@@ -99541,7 +99602,7 @@ index 8105be4..8c1ce34 100644
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
-@@ -530,16 +547,92 @@ void kfree(const void *block)
+@@ -530,16 +554,92 @@ void kfree(const void *block)
return;
kmemleak_free(block);
@@ -99553,7 +99614,7 @@ index 8105be4..8c1ce34 100644
- slob_free(m, *m + align);
- } else
+ slob_t *m = (slob_t *)(block - align);
-+ slob_free(m, m[0].units + align);
++ slob_free(NULL, m, m[0].units + align);
+ } else {
+ clear_slob_page(sp);
+ free_slob_page(sp);
@@ -99637,7 +99698,7 @@ index 8105be4..8c1ce34 100644
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
-@@ -552,10 +645,10 @@ size_t ksize(const void *block)
+@@ -552,27 +652,32 @@ size_t ksize(const void *block)
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
@@ -99651,7 +99712,15 @@ index 8105be4..8c1ce34 100644
}
EXPORT_SYMBOL(ksize);
-@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+-struct kmem_cache {
+- unsigned int size, align;
+- unsigned long flags;
+- const char *name;
+- void (*ctor)(void *);
+-};
+-
+ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *c;
@@ -99662,10 +99731,26 @@ index 8105be4..8c1ce34 100644
c = slob_alloc(sizeof(struct kmem_cache),
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
+#endif
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_NO_SANITIZE;
++ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++ flags &= ~SLAB_NO_SANITIZE;
++#endif
if (c) {
c->name = name;
-@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+@@ -602,7 +707,7 @@ void kmem_cache_destroy(struct kmem_cache *c)
+ kmemleak_free(c);
+ if (c->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
+- slob_free(c, sizeof(struct kmem_cache));
++ slob_free(NULL, c, sizeof(struct kmem_cache));
+ }
+ EXPORT_SYMBOL(kmem_cache_destroy);
+
+@@ -614,17 +719,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
lockdep_trace_alloc(flags);
@@ -99691,16 +99776,20 @@ index 8105be4..8c1ce34 100644
if (c->ctor)
c->ctor(b);
-@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
+@@ -634,12 +747,18 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+ }
+ EXPORT_SYMBOL(kmem_cache_alloc_node);
- static void __kmem_cache_free(void *b, int size)
+-static void __kmem_cache_free(void *b, int size)
++static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
{
- if (size < PAGE_SIZE)
+- slob_free(b, size);
+- else
+ struct slob_page *sp = slob_page(b);
+
+ if (is_slob_page(sp))
- slob_free(b, size);
-- else
++ slob_free(c, b, size);
+ else {
+ clear_slob_page(sp);
+ free_slob_page(sp);
@@ -99710,7 +99799,13 @@ index 8105be4..8c1ce34 100644
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -647,22 +766,36 @@ static void kmem_rcu_free(struct rcu_head *head)
+ struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
+ void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
+
+- __kmem_cache_free(b, slob_rcu->size);
++ __kmem_cache_free(NULL, b, slob_rcu->size);
+ }
void kmem_cache_free(struct kmem_cache *c, void *b)
{
@@ -99733,7 +99828,7 @@ index 8105be4..8c1ce34 100644
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
- __kmem_cache_free(b, c->size);
-+ __kmem_cache_free(b, size);
++ __kmem_cache_free(c, b, size);
}
+#ifdef CONFIG_PAX_USERCOPY_SLABS
@@ -99746,7 +99841,7 @@ index 8105be4..8c1ce34 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 6a4c2fb..1e9af90 100644
+index 6a4c2fb..18d36e8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -186,7 +186,7 @@ static enum {
@@ -99781,7 +99876,7 @@ index 6a4c2fb..1e9af90 100644
slab_free_hook(s, x);
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
++ if (!(s->flags & SLAB_NO_SANITIZE)) {
+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->objsize);
+ if (s->ctor)
+ s->ctor(x);
@@ -99809,7 +99904,17 @@ index 6a4c2fb..1e9af90 100644
/*
* Calculate the order of allocation given an slab object size.
-@@ -3055,7 +3065,7 @@ static int kmem_cache_open(struct kmem_cache *s,
+@@ -2909,6 +2919,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ s->inuse = size;
+
+ if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ (!(flags & SLAB_NO_SANITIZE)) ||
++#endif
+ s->ctor)) {
+ /*
+ * Relocate free pointer after the object if it is not
+@@ -3055,7 +3068,7 @@ static int kmem_cache_open(struct kmem_cache *s,
else
s->cpu_partial = 30;
@@ -99818,7 +99923,7 @@ index 6a4c2fb..1e9af90 100644
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
-@@ -3159,8 +3169,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
+@@ -3159,8 +3172,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
down_write(&slub_lock);
@@ -99828,7 +99933,7 @@ index 6a4c2fb..1e9af90 100644
list_del(&s->list);
up_write(&slub_lock);
if (kmem_cache_close(s)) {
-@@ -3189,6 +3198,10 @@ static struct kmem_cache *kmem_cache;
+@@ -3189,6 +3201,10 @@ static struct kmem_cache *kmem_cache;
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
#endif
@@ -99839,7 +99944,7 @@ index 6a4c2fb..1e9af90 100644
static int __init setup_slub_min_order(char *str)
{
get_option(&str, &slub_min_order);
-@@ -3303,6 +3316,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+@@ -3303,6 +3319,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
return kmalloc_dma_caches[index];
#endif
@@ -99853,7 +99958,7 @@ index 6a4c2fb..1e9af90 100644
return kmalloc_caches[index];
}
-@@ -3371,6 +3391,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
+@@ -3371,6 +3394,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -99913,7 +100018,7 @@ index 6a4c2fb..1e9af90 100644
size_t ksize(const void *object)
{
struct page *page;
-@@ -3435,6 +3508,7 @@ void kfree(const void *x)
+@@ -3435,6 +3511,7 @@ void kfree(const void *x)
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
@@ -99921,7 +100026,7 @@ index 6a4c2fb..1e9af90 100644
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
-@@ -3645,7 +3719,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
+@@ -3645,7 +3722,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
int node;
list_add(&s->list, &slab_caches);
@@ -99930,7 +100035,7 @@ index 6a4c2fb..1e9af90 100644
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
-@@ -3762,17 +3836,17 @@ void __init kmem_cache_init(void)
+@@ -3762,17 +3839,17 @@ void __init kmem_cache_init(void)
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 32) {
@@ -99951,7 +100056,7 @@ index 6a4c2fb..1e9af90 100644
caches++;
}
-@@ -3814,6 +3888,22 @@ void __init kmem_cache_init(void)
+@@ -3814,6 +3891,22 @@ void __init kmem_cache_init(void)
}
}
#endif
@@ -99974,7 +100079,7 @@ index 6a4c2fb..1e9af90 100644
printk(KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d\n",
-@@ -3840,7 +3930,7 @@ static int slab_unmergeable(struct kmem_cache *s)
+@@ -3840,7 +3933,7 @@ static int slab_unmergeable(struct kmem_cache *s)
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
@@ -99983,8 +100088,18 @@ index 6a4c2fb..1e9af90 100644
return 1;
return 0;
-@@ -3899,7 +3989,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3897,9 +3990,17 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ return NULL;
+
down_write(&slub_lock);
++
++#ifdef CONFIG_PAX_MEMORY_SANITIZE
++ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
++ flags |= SLAB_NO_SANITIZE;
++ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
++ flags &= ~SLAB_NO_SANITIZE;
++#endif
++
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
- s->refcount++;
@@ -99992,7 +100107,7 @@ index 6a4c2fb..1e9af90 100644
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
-@@ -3908,7 +3998,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+@@ -3908,7 +4009,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
if (sysfs_slab_alias(s, name)) {
@@ -100001,7 +100116,7 @@ index 6a4c2fb..1e9af90 100644
goto err;
}
up_write(&slub_lock);
-@@ -3979,7 +4069,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
+@@ -3979,7 +4080,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -100010,7 +100125,7 @@ index 6a4c2fb..1e9af90 100644
.notifier_call = slab_cpuup_callback
};
-@@ -4037,7 +4127,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+@@ -4037,7 +4138,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#endif
@@ -100019,7 +100134,7 @@ index 6a4c2fb..1e9af90 100644
static int count_inuse(struct page *page)
{
return page->inuse;
-@@ -4424,12 +4514,12 @@ static void resiliency_test(void)
+@@ -4424,12 +4525,12 @@ static void resiliency_test(void)
validate_slab_cache(kmalloc_caches[9]);
}
#else
@@ -100034,7 +100149,7 @@ index 6a4c2fb..1e9af90 100644
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4676,7 +4766,7 @@ SLAB_ATTR_RO(ctor);
+@@ -4676,7 +4777,7 @@ SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
@@ -100043,7 +100158,7 @@ index 6a4c2fb..1e9af90 100644
}
SLAB_ATTR_RO(aliases);
-@@ -5243,6 +5333,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5243,6 +5344,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -100051,7 +100166,7 @@ index 6a4c2fb..1e9af90 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5271,7 +5362,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
+@@ -5271,7 +5373,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
}
s->kobj.kset = slab_kset;
@@ -100060,7 +100175,7 @@ index 6a4c2fb..1e9af90 100644
if (err) {
kobject_put(&s->kobj);
return err;
-@@ -5305,6 +5396,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5305,6 +5407,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -100068,7 +100183,7 @@ index 6a4c2fb..1e9af90 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5318,6 +5410,7 @@ struct saved_alias {
+@@ -5318,6 +5421,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -100076,7 +100191,7 @@ index 6a4c2fb..1e9af90 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5340,6 +5433,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5340,6 +5444,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -109562,10 +109677,10 @@ index 38f6617..e70b72b 100755
exuberant()
diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..c9f7da3 100644
+index 51bd5a0..96aff0b 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,963 @@
+@@ -4,6 +4,967 @@
menu "Security options"
@@ -110330,10 +110445,14 @@ index 51bd5a0..c9f7da3 100644
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
++ The slab sanitization feature excludes a few slab caches per default
++ for performance reasons. To extend the feature to cover those as
++ well, pass "pax_sanitize_slab=full" as kernel command line parameter.
++
+ To reduce the performance penalty by sanitizing pages only, albeit
+ limiting the effectiveness of this feature at the same time, slab
-+ sanitization can be disabled with the kernel commandline parameter
-+ "pax_sanitize_slab=0".
++ sanitization can be disabled with the kernel command line parameter
++ "pax_sanitize_slab=off".
+
+ Note that this feature does not protect data stored in live pages,
+ e.g., process memory swapped to disk may stay there for a long time.
@@ -110529,7 +110648,7 @@ index 51bd5a0..c9f7da3 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -169,7 +1126,7 @@ config INTEL_TXT
+@@ -169,7 +1130,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -120713,10 +120832,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..212e4c0
+index 0000000..ea4ae44
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5113 @@
+@@ -0,0 +1,5116 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
@@ -121588,6 +121707,7 @@ index 0000000..212e4c0
+tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
+page_offset_11120 page_offset 0 11120 NULL
+tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL
++alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
+ioat2_alloc_ring_11172 ioat2_alloc_ring 2 11172 NULL nohasharray
+snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 3-4 11172 &ioat2_alloc_ring_11172
+__swab16p_11220 __swab16p 0 11220 NULL
@@ -122190,7 +122310,8 @@ index 0000000..212e4c0
+vb2_streamon_18562 vb2_streamon 0 18562 NULL
+debug_output_18575 debug_output 3 18575 NULL
+__netdev_alloc_skb_18595 __netdev_alloc_skb 2 18595 NULL
-+filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL
++filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
++slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
+iowarrior_write_18604 iowarrior_write 3 18604 NULL
+from_buffer_18625 from_buffer 3 18625 NULL
+f1x_map_sysaddr_to_csrow_18628 f1x_map_sysaddr_to_csrow 2 18628 NULL
@@ -124415,6 +124536,7 @@ index 0000000..212e4c0
+posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
+newpart_47485 newpart 6-4 47485 NULL
+core_sys_select_47494 core_sys_select 1 47494 NULL
++alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
+unlink_simple_47506 unlink_simple 3 47506 NULL
+process_vm_rw_47533 process_vm_rw 3-5 47533 NULL nohasharray
+vscnprintf_47533 vscnprintf 0-2 47533 &process_vm_rw_47533