diff options
Diffstat (limited to '4.8.12/1011_linux-4.8.12.patch')
-rw-r--r-- | 4.8.12/1011_linux-4.8.12.patch | 1563 |
1 files changed, 1563 insertions, 0 deletions
diff --git a/4.8.12/1011_linux-4.8.12.patch b/4.8.12/1011_linux-4.8.12.patch new file mode 100644 index 0000000..6e460f1 --- /dev/null +++ b/4.8.12/1011_linux-4.8.12.patch @@ -0,0 +1,1563 @@ +diff --git a/Makefile b/Makefile +index 2b1bcba..7b0c92f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 8 +-SUBLEVEL = 11 ++SUBLEVEL = 12 + EXTRAVERSION = + NAME = Psychotic Stoned Sheep + +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig +index af12c2d..81c11a6 100644 +--- a/arch/parisc/Kconfig ++++ b/arch/parisc/Kconfig +@@ -33,7 +33,9 @@ config PARISC + select HAVE_ARCH_HASH + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK +- select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) ++ select GENERIC_SCHED_CLOCK ++ select HAVE_UNSTABLE_SCHED_CLOCK if SMP ++ select GENERIC_CLOCKEVENTS + select ARCH_NO_COHERENT_DMA_MMAP + select CPU_NO_EFFICIENT_FFS + +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c +index 6700127..c2259d4 100644 +--- a/arch/parisc/kernel/cache.c ++++ b/arch/parisc/kernel/cache.c +@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void) + { + unsigned long rangetime, alltime; + unsigned long size, start; ++ unsigned long threshold; + + alltime = mfctl(16); + flush_data_cache(); +@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void) + printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime, size, rangetime); + +- /* Racy, but if we see an intermediate value, it's ok too... */ +- parisc_cache_flush_threshold = size * alltime / rangetime; +- +- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); +- if (!parisc_cache_flush_threshold) +- parisc_cache_flush_threshold = FLUSH_THRESHOLD; +- +- if (parisc_cache_flush_threshold > cache_info.dc_size) +- parisc_cache_flush_threshold = cache_info.dc_size; +- +- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", ++ threshold = L1_CACHE_ALIGN(size * alltime / rangetime); ++ if (threshold > cache_info.dc_size) ++ threshold = cache_info.dc_size; ++ if (threshold) ++ parisc_cache_flush_threshold = threshold; ++ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", + parisc_cache_flush_threshold/1024); + + /* calculate TLB flush threshold */ +@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void) + flush_tlb_all(); + alltime = mfctl(16) - alltime; + +- size = PAGE_SIZE; ++ size = 0; + start = (unsigned long) _text; + rangetime = mfctl(16); + while (start < (unsigned long) _end) { +@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void) + printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime, size, rangetime); + +- parisc_tlb_flush_threshold = size * alltime / rangetime; +- parisc_tlb_flush_threshold *= num_online_cpus(); +- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); +- if (!parisc_tlb_flush_threshold) +- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; +- +- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", ++ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); ++ if (threshold) ++ parisc_tlb_flush_threshold = threshold; ++ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", + parisc_tlb_flush_threshold/1024); + } + +diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S +index b743a80..6755219 100644 +--- a/arch/parisc/kernel/pacache.S ++++ b/arch/parisc/kernel/pacache.S +@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */ + + fitmanymiddle: /* Loop if LOOP >= 2 */ + addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ +- pitlbe 0(%sr1, %r28) ++ pitlbe %r0(%sr1, %r28) + pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ + addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ + copy %arg3, %r31 /* Re-init inner loop count */ +@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */ + + fdtmanymiddle: /* Loop if LOOP >= 2 */ + addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ +- pdtlbe 0(%sr1, %r28) ++ pdtlbe %r0(%sr1, %r28) + pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ + addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ + copy %arg3, %r31 /* Re-init inner loop count */ +@@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm) + /* Purge any old translations */ + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r28) +- pdtlb,l 0(%r29) ++ pdtlb,l %r0(%r28) ++ pdtlb,l %r0(%r29) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r28) +- pdtlb 0(%r29) ++ pdtlb %r0(%r28) ++ pdtlb %r0(%r29) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm) + /* Purge any old translation */ + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r28) ++ pdtlb,l %r0(%r28) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r28) ++ pdtlb %r0(%r28) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm) + /* Purge any old translation */ + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r28) ++ pdtlb,l %r0(%r28) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r28) ++ pdtlb %r0(%r28) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm) + sync + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r25) ++ pdtlb,l %r0(%r25) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r25) ++ pdtlb %r0(%r25) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm) + depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + #endif + +- /* Purge any old translation */ ++ /* Purge any old translation. Note that the FIC instruction ++ * may use either the instruction or data TLB. Given that we ++ * have a flat address space, it's not clear which TLB will be ++ * used. So, we purge both entries. */ + + #ifdef CONFIG_PA20 ++ pdtlb,l %r0(%r28) + pitlb,l %r0(%sr4,%r28) + #else + tlb_lock %r20,%r21,%r22 +- pitlb (%sr4,%r28) ++ pdtlb %r0(%r28) ++ pitlb %r0(%sr4,%r28) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm) + sync + + #ifdef CONFIG_PA20 ++ pdtlb,l %r0(%r28) + pitlb,l %r0(%sr4,%r25) + #else + tlb_lock %r20,%r21,%r22 +- pitlb (%sr4,%r25) ++ pdtlb %r0(%r28) ++ pitlb %r0(%sr4,%r25) + tlb_unlock %r20,%r21,%r22 + #endif + +diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c +index 02d9ed0..494ff6e 100644 +--- a/arch/parisc/kernel/pci-dma.c ++++ b/arch/parisc/kernel/pci-dma.c +@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte, + + if (!pte_none(*pte)) + printk(KERN_ERR "map_pte_uncached: page already exists\n"); +- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); + purge_tlb_start(flags); ++ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); + pdtlb_kernel(orig_vaddr); + purge_tlb_end(flags); + vaddr += PAGE_SIZE; +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c +index 81d6f63..2e66a88 100644 +--- a/arch/parisc/kernel/setup.c ++++ b/arch/parisc/kernel/setup.c +@@ -334,6 +334,10 @@ static int __init parisc_init(void) + /* tell PDC we're Linux. Nevermind failure. */ + pdc_stable_write(0x40, &osid, sizeof(osid)); + ++ /* start with known state */ ++ flush_cache_all_local(); ++ flush_tlb_all_local(NULL); ++ + processor_init(); + #ifdef CONFIG_SMP + pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", +diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c +index 9b63b87..325f30d 100644 +--- a/arch/parisc/kernel/time.c ++++ b/arch/parisc/kernel/time.c +@@ -14,6 +14,7 @@ + #include <linux/module.h> + #include <linux/rtc.h> + #include <linux/sched.h> ++#include <linux/sched_clock.h> + #include <linux/kernel.h> + #include <linux/param.h> + #include <linux/string.h> +@@ -39,18 +40,6 @@ + + static unsigned long clocktick __read_mostly; /* timer cycles per tick */ + +-#ifndef CONFIG_64BIT +-/* +- * The processor-internal cycle counter (Control Register 16) is used as time +- * source for the sched_clock() function. This register is 64bit wide on a +- * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always +- * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits +- * with a per-cpu variable which we increase every time the counter +- * wraps-around (which happens every ~4 secounds). +- */ +-static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); +-#endif +- + /* + * We keep time on PA-RISC Linux by using the Interval Timer which is + * a pair of registers; one is read-only and one is write-only; both +@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) + */ + mtctl(next_tick, 16); + +-#if !defined(CONFIG_64BIT) +- /* check for overflow on a 32bit kernel (every ~4 seconds). */ +- if (unlikely(next_tick < now)) +- this_cpu_inc(cr16_high_32_bits); +-#endif +- + /* Skip one clocktick on purpose if we missed next_tick. + * The new CR16 must be "later" than current CR16 otherwise + * itimer would not fire until CR16 wrapped - e.g 4 seconds +@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); + + /* clock source code */ + +-static cycle_t read_cr16(struct clocksource *cs) ++static cycle_t notrace read_cr16(struct clocksource *cs) + { + return get_cycles(); + } +@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts) + } + + +-/* +- * sched_clock() framework +- */ +- +-static u32 cyc2ns_mul __read_mostly; +-static u32 cyc2ns_shift __read_mostly; +- +-u64 sched_clock(void) ++static u64 notrace read_cr16_sched_clock(void) + { +- u64 now; +- +- /* Get current cycle counter (Control Register 16). */ +-#ifdef CONFIG_64BIT +- now = mfctl(16); +-#else +- now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); +-#endif +- +- /* return the value in ns (cycles_2_ns) */ +- return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); ++ return get_cycles(); + } + + +@@ -316,17 +282,16 @@ u64 sched_clock(void) + + void __init time_init(void) + { +- unsigned long current_cr16_khz; ++ unsigned long cr16_hz; + +- current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ + clocktick = (100 * PAGE0->mem_10msec) / HZ; +- +- /* calculate mult/shift values for cr16 */ +- clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, +- NSEC_PER_MSEC, 0); +- + start_cpu_itimer(); /* get CPU 0 started */ + ++ cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ ++ + /* register at clocksource framework */ +- clocksource_register_khz(&clocksource_cr16, current_cr16_khz); ++ clocksource_register_hz(&clocksource_cr16, cr16_hz); ++ ++ /* register as sched_clock source */ ++ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); + } +diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c +index d80161b..60522d2 100644 +--- a/arch/powerpc/boot/main.c ++++ b/arch/powerpc/boot/main.c +@@ -217,8 +217,12 @@ void start(void) + console_ops.close(); + + kentry = (kernel_entry_t) vmlinux.addr; +- if (ft_addr) +- kentry(ft_addr, 0, NULL); ++ if (ft_addr) { ++ if(platform_ops.kentry) ++ platform_ops.kentry(ft_addr, vmlinux.addr); ++ else ++ kentry(ft_addr, 0, NULL); ++ } + else + kentry((unsigned long)initrd.addr, initrd.size, + loader_info.promptr); +diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S +index ff2f1b9..2a99fc9 100644 +--- a/arch/powerpc/boot/opal-calls.S ++++ b/arch/powerpc/boot/opal-calls.S +@@ -12,6 +12,19 @@ + + .text + ++ .globl opal_kentry ++opal_kentry: ++ /* r3 is the fdt ptr */ ++ mtctr r4 ++ li r4, 0 ++ li r5, 0 ++ li r6, 0 ++ li r7, 0 ++ ld r11,opal@got(r2) ++ ld r8,0(r11) ++ ld r9,8(r11) ++ bctr ++ + #define OPAL_CALL(name, token) \ + .globl name; \ + name: \ +diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c +index 1f37e1c..d7b4fd4 100644 +--- a/arch/powerpc/boot/opal.c ++++ b/arch/powerpc/boot/opal.c +@@ -23,14 +23,25 @@ struct opal { + + static u32 opal_con_id; + ++/* see opal-wrappers.S */ + int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); + int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); + int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); + int64_t opal_console_flush(uint64_t term_number); + int64_t opal_poll_events(uint64_t *outstanding_event_mask); + ++void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr); ++ + static int opal_con_open(void) + { ++ /* ++ * When OPAL loads the boot kernel it stashes the OPAL base and entry ++ * address in r8 and r9 so the kernel can use the OPAL console ++ * before unflattening the devicetree. While executing the wrapper will ++ * probably trash r8 and r9 so this kentry hook restores them before ++ * entering the decompressed kernel. ++ */ ++ platform_ops.kentry = opal_kentry; + return 0; + } + +diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h +index e19b64e..deeae6f 100644 +--- a/arch/powerpc/boot/ops.h ++++ b/arch/powerpc/boot/ops.h +@@ -30,6 +30,7 @@ struct platform_ops { + void * (*realloc)(void *ptr, unsigned long size); + void (*exit)(void); + void * (*vmlinux_alloc)(unsigned long size); ++ void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr); + }; + extern struct platform_ops platform_ops; + +diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h +index e2fb408..fd10b58 100644 +--- a/arch/powerpc/include/asm/mmu.h ++++ b/arch/powerpc/include/asm/mmu.h +@@ -29,6 +29,12 @@ + */ + + /* ++ * Kernel read only support. ++ * We added the ppp value 0b110 in ISA 2.04. ++ */ ++#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000) ++ ++/* + * We need to clear top 16bits of va (from the remaining 64 bits )in + * tlbie* instructions + */ +@@ -103,10 +109,10 @@ + #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 + #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA + #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +-#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +-#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +-#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +-#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE ++#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO ++#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO ++#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO ++#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO + #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ + MMU_FTR_CI_LARGE_PAGE + #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 978dada..52cbf04 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -355,6 +355,7 @@ + #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */ + #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */ + #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */ ++#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */ + #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */ + #define LPCR_MER_SH 11 + #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */ +diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S +index 52ff3f0..37c027c 100644 +--- a/arch/powerpc/kernel/cpu_setup_power.S ++++ b/arch/powerpc/kernel/cpu_setup_power.S +@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9) + li r0,0 + mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR +- ori r3, r3, LPCR_PECEDH +- ori r3, r3, LPCR_HVICE ++ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) ++ or r3, r3, r4 + bl __init_LPCR + bl __init_HFSCR + bl __init_tlb_power9 +@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9) + li r0,0 + mtspr SPRN_LPID,r0 + mfspr r3,SPRN_LPCR +- ori r3, r3, LPCR_PECEDH +- ori r3, r3, LPCR_HVICE ++ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) ++ or r3, r3, r4 + bl __init_LPCR + bl __init_HFSCR + bl __init_tlb_power9 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c +index 28923b2..8dff9ce 100644 +--- a/arch/powerpc/mm/hash_utils_64.c ++++ b/arch/powerpc/mm/hash_utils_64.c +@@ -190,8 +190,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) + /* + * Kernel read only mapped with ppp bits 0b110 + */ +- if (!(pteflags & _PAGE_WRITE)) +- rflags |= (HPTE_R_PP0 | 0x2); ++ if (!(pteflags & _PAGE_WRITE)) { ++ if (mmu_has_feature(MMU_FTR_KERNEL_RO)) ++ rflags |= (HPTE_R_PP0 | 0x2); ++ else ++ rflags |= 0x3; ++ } + } else { + if (pteflags & _PAGE_RWX) + rflags |= 0x2; +diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c +index 178989e..ea960d6 100644 +--- a/arch/tile/kernel/time.c ++++ b/arch/tile/kernel/time.c +@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) + */ + unsigned long long sched_clock(void) + { +- return clocksource_cyc2ns(get_cycles(), +- sched_clock_mult, SCHED_CLOCK_SHIFT); ++ return mult_frac(get_cycles(), ++ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); + } + + int setup_profiling_timer(unsigned int multiplier) +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c +index 9b983a4..8fc714b 100644 +--- a/arch/x86/events/intel/ds.c ++++ b/arch/x86/events/intel/ds.c +@@ -1070,20 +1070,20 @@ static void setup_pebs_sample_data(struct perf_event *event, + } + + /* +- * We use the interrupt regs as a base because the PEBS record +- * does not contain a full regs set, specifically it seems to +- * lack segment descriptors, which get used by things like +- * user_mode(). ++ * We use the interrupt regs as a base because the PEBS record does not ++ * contain a full regs set, specifically it seems to lack segment ++ * descriptors, which get used by things like user_mode(). + * +- * In the simple case fix up only the IP and BP,SP regs, for +- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. +- * A possible PERF_SAMPLE_REGS will have to transfer all regs. ++ * In the simple case fix up only the IP for PERF_SAMPLE_IP. ++ * ++ * We must however always use BP,SP from iregs for the unwinder to stay ++ * sane; the record BP,SP can point into thin air when the record is ++ * from a previous PMI context or an (I)RET happend between the record ++ * and PMI. + */ + *regs = *iregs; + regs->flags = pebs->flags; + set_linear_ip(regs, pebs->ip); +- regs->bp = pebs->bp; +- regs->sp = pebs->sp; + + if (sample_type & PERF_SAMPLE_REGS_INTR) { + regs->ax = pebs->ax; +@@ -1092,10 +1092,21 @@ static void setup_pebs_sample_data(struct perf_event *event, + regs->dx = pebs->dx; + regs->si = pebs->si; + regs->di = pebs->di; +- regs->bp = pebs->bp; +- regs->sp = pebs->sp; + +- regs->flags = pebs->flags; ++ /* ++ * Per the above; only set BP,SP if we don't need callchains. ++ * ++ * XXX: does this make sense? ++ */ ++ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { ++ regs->bp = pebs->bp; ++ regs->sp = pebs->sp; ++ } ++ ++ /* ++ * Preserve PERF_EFLAGS_VM from set_linear_ip(). ++ */ ++ regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); + #ifndef CONFIG_X86_32 + regs->r8 = pebs->r8; + regs->r9 = pebs->r9; +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h +index 8c4a477..181c238 100644 +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -113,7 +113,7 @@ struct debug_store { + * Per register state. + */ + struct er_account { +- raw_spinlock_t lock; /* per-core: protect structure */ ++ raw_spinlock_t lock; /* per-core: protect structure */ + u64 config; /* extra MSR config */ + u64 reg; /* extra MSR number */ + atomic_t ref; /* reference count */ +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index 3fc03a0..c289e2f 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -517,14 +517,14 @@ void fpu__clear(struct fpu *fpu) + { + WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ + +- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { +- /* FPU state will be reallocated lazily at the first use. */ +- fpu__drop(fpu); +- } else { +- if (!fpu->fpstate_active) { +- fpu__activate_curr(fpu); +- user_fpu_begin(); +- } ++ fpu__drop(fpu); ++ ++ /* ++ * Make sure fpstate is cleared and initialized. ++ */ ++ if (static_cpu_has(X86_FEATURE_FPU)) { ++ fpu__activate_curr(fpu); ++ user_fpu_begin(); + copy_init_fpstate_to_fpregs(); + } + } +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index cbd7b92..a3ce9d2 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) + static int em_jmp_far(struct x86_emulate_ctxt *ctxt) + { + int rc; +- unsigned short sel, old_sel; +- struct desc_struct old_desc, new_desc; +- const struct x86_emulate_ops *ops = ctxt->ops; ++ unsigned short sel; ++ struct desc_struct new_desc; + u8 cpl = ctxt->ops->cpl(ctxt); + +- /* Assignment of RIP may only fail in 64-bit mode */ +- if (ctxt->mode == X86EMUL_MODE_PROT64) +- ops->get_segment(ctxt, &old_sel, &old_desc, NULL, +- VCPU_SREG_CS); +- + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); + + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, +@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) + return rc; + + rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); +- if (rc != X86EMUL_CONTINUE) { +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); +- /* assigning eip failed; restore the old cs */ +- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); +- return rc; +- } ++ /* Error handling is not implemented. */ ++ if (rc != X86EMUL_CONTINUE) ++ return X86EMUL_UNHANDLEABLE; ++ + return rc; + } + +@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) + { + int rc; + unsigned long eip, cs; +- u16 old_cs; + int cpl = ctxt->ops->cpl(ctxt); +- struct desc_struct old_desc, new_desc; +- const struct x86_emulate_ops *ops = ctxt->ops; +- +- if (ctxt->mode == X86EMUL_MODE_PROT64) +- ops->get_segment(ctxt, &old_cs, &old_desc, NULL, +- VCPU_SREG_CS); ++ struct desc_struct new_desc; + + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); + if (rc != X86EMUL_CONTINUE) +@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) + if (rc != X86EMUL_CONTINUE) + return rc; + rc = assign_eip_far(ctxt, eip, &new_desc); +- if (rc != X86EMUL_CONTINUE) { +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); +- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); +- } ++ /* Error handling is not implemented. */ ++ if (rc != X86EMUL_CONTINUE) ++ return X86EMUL_UNHANDLEABLE; ++ + return rc; + } + +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c +index 1a22de7..6e219e5 100644 +--- a/arch/x86/kvm/ioapic.c ++++ b/arch/x86/kvm/ioapic.c +@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, + static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) + { + ioapic->rtc_status.pending_eoi = 0; +- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); ++ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); + } + + static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); +diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h +index 7d2692a..1cc6e54 100644 +--- a/arch/x86/kvm/ioapic.h ++++ b/arch/x86/kvm/ioapic.h +@@ -42,13 +42,13 @@ struct kvm_vcpu; + + struct dest_map { + /* vcpu bitmap where IRQ has been sent */ +- DECLARE_BITMAP(map, KVM_MAX_VCPUS); ++ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); + + /* + * Vector sent to a given vcpu, only valid when + * the vcpu's bit in map is set + */ +- u8 vectors[KVM_MAX_VCPUS]; ++ u8 vectors[KVM_MAX_VCPU_ID]; + }; + + +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c +index 25810b1..e7a112a 100644 +--- a/arch/x86/kvm/irq_comm.c ++++ b/arch/x86/kvm/irq_comm.c +@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, + bool line_status) + { + struct kvm_pic *pic = pic_irqchip(kvm); ++ ++ /* ++ * XXX: rejecting pic routes when pic isn't in use would be better, ++ * but the default routing table is installed while kvm->arch.vpic is ++ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE. ++ */ ++ if (!pic) ++ return -1; ++ + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); + } + +@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, + bool line_status) + { + struct kvm_ioapic *ioapic = kvm->arch.vioapic; ++ ++ if (!ioapic) ++ return -1; ++ + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, + line_status); + } +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index b62c852..d2255e4 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, + *mask = dest_id & 0xff; + return true; + case KVM_APIC_MODE_XAPIC_CLUSTER: +- *cluster = map->xapic_cluster_map[dest_id >> 4]; ++ *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; + *mask = dest_id & 0xf; + return true; + default: +diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c +index 832b98f..a3a983f 100644 +--- a/arch/x86/mm/extable.c ++++ b/arch/x86/mm/extable.c +@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) + if (early_recursion_flag > 2) + goto halt_loop; + +- if (regs->cs != __KERNEL_CS) ++ /* ++ * Old CPUs leave the high bits of CS on the stack ++ * undefined. I'm not sure which CPUs do this, but at least ++ * the 486 DX works this way. ++ */ ++ if ((regs->cs & 0xFFFF) != __KERNEL_CS) + goto fail; + + /* +diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c +index 865f46e..c80765b 100644 +--- a/crypto/asymmetric_keys/x509_cert_parser.c ++++ b/crypto/asymmetric_keys/x509_cert_parser.c +@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) + return cert; + + error_decode: +- kfree(cert->pub->key); + kfree(ctx); + error_no_ctx: + x509_free_certificate(cert); +diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c +index 29f600f..ff64313 100644 +--- a/drivers/dax/dax.c ++++ b/drivers/dax/dax.c +@@ -323,8 +323,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma, + if (!dax_dev->alive) + return -ENXIO; + +- /* prevent private / writable mappings from being established */ +- if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) { ++ /* prevent private mappings from being established */ ++ if ((vma->vm_flags & VM_SHARED) != VM_SHARED) { + dev_info(dev, "%s: %s: fail, attempted private mapping\n", + current->comm, func); + return -EINVAL; +diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c +index 73ae849..76dd42d 100644 +--- a/drivers/dax/pmem.c ++++ b/drivers/dax/pmem.c +@@ -77,7 +77,9 @@ static int dax_pmem_probe(struct device *dev) + nsio = to_nd_namespace_io(&ndns->dev); + + /* parse the 'pfn' info block via ->rw_bytes */ +- devm_nsio_enable(dev, nsio); ++ rc = devm_nsio_enable(dev, nsio); ++ if (rc) ++ return rc; + altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); + if (IS_ERR(altmap)) + return PTR_ERR(altmap); +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 58470f5..8c53748 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, + struct pci_dev *pdev = to_pci_dev(data); + struct dmar_pci_notify_info *info; + +- /* Only care about add/remove events for physical functions */ ++ /* Only care about add/remove events for physical functions. ++ * For VFs we actually do the lookup based on the corresponding ++ * PF in device_to_iommu() anyway. */ + if (pdev->is_virtfn) + return NOTIFY_DONE; + if (action != BUS_NOTIFY_ADD_DEVICE && +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 1257b0b..7fb5387 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf + return NULL; + + if (dev_is_pci(dev)) { ++ struct pci_dev *pf_pdev; ++ + pdev = to_pci_dev(dev); ++ /* VFs aren't listed in scope tables; we need to look up ++ * the PF instead to find the IOMMU. */ ++ pf_pdev = pci_physfn(pdev); ++ dev = &pf_pdev->dev; + segment = pci_domain_nr(pdev->bus); + } else if (has_acpi_companion(dev)) + dev = &ACPI_COMPANION(dev)->dev; +@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf + for_each_active_dev_scope(drhd->devices, + drhd->devices_cnt, i, tmp) { + if (tmp == dev) { ++ /* For a VF use its original BDF# not that of the PF ++ * which we used for the IOMMU lookup. Strictly speaking ++ * we could do this for all PCI devices; we only need to ++ * get the BDF# from the scope table for ACPI matches. */ ++ if (pdev->is_virtfn) ++ goto got_pdev; ++ + *bus = drhd->devices[i].bus; + *devfn = drhd->devices[i].devfn; + goto out; +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index 8ebb353..cb72e00 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) + struct page *pages; + int order; + +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; +- if (order < 0) +- order = 0; +- ++ /* Start at 2 because it's defined as 2^(1+PSS) */ ++ iommu->pasid_max = 2 << ecap_pss(iommu->ecap); ++ ++ /* Eventually I'm promised we will get a multi-level PASID table ++ * and it won't have to be physically contiguous. Until then, ++ * limit the size because 8MiB contiguous allocations can be hard ++ * to come by. The limit of 0x20000, which is 1MiB for each of ++ * the PASID and PASID-state tables, is somewhat arbitrary. */ ++ if (iommu->pasid_max > 0x20000) ++ iommu->pasid_max = 0x20000; ++ ++ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!pages) { + pr_warn("IOMMU: %s: Failed to allocate PASID table\n", +@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) + pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); + + if (ecap_dis(iommu->ecap)) { ++ /* Just making it explicit... */ ++ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (pages) + iommu->pasid_state_table = page_address(pages); +@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) + + int intel_svm_free_pasid_tables(struct intel_iommu *iommu) + { +- int order; +- +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; +- if (order < 0) +- order = 0; ++ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); + + if (iommu->pasid_table) { + free_pages((unsigned long)iommu->pasid_table, order); +@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ + } + svm->iommu = iommu; + +- if (pasid_max > 2 << ecap_pss(iommu->ecap)) +- pasid_max = 2 << ecap_pss(iommu->ecap); ++ if (pasid_max > iommu->pasid_max) ++ pasid_max = iommu->pasid_max; + + /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ + ret = idr_alloc(&iommu->pasid_idr, svm, +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c +index 317ef63..8d96a22 100644 +--- a/drivers/media/tuners/tuner-xc2028.c ++++ b/drivers/media/tuners/tuner-xc2028.c +@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv) + int i; + tuner_dbg("%s called\n", __func__); + ++ /* free allocated f/w string */ ++ if (priv->fname != firmware_name) ++ kfree(priv->fname); ++ priv->fname = NULL; ++ ++ priv->state = XC2028_NO_FIRMWARE; ++ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); ++ + if (!priv->firm) + return; + +@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv) + + priv->firm = NULL; + priv->firm_size = 0; +- priv->state = XC2028_NO_FIRMWARE; +- +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + } + + static int load_all_firmwares(struct dvb_frontend *fe, +@@ -884,9 +889,8 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type, + return 0; + + fail: +- priv->state = XC2028_NO_FIRMWARE; ++ free_firmware(priv); + +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + if (retry_count < 8) { + msleep(50); + retry_count++; +@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) + mutex_lock(&xc2028_list_mutex); + + /* only perform final cleanup if this is the last instance */ +- if (hybrid_tuner_report_instance_count(priv) == 1) { ++ if (hybrid_tuner_report_instance_count(priv) == 1) + free_firmware(priv); +- kfree(priv->ctrl.fname); +- priv->ctrl.fname = NULL; +- } + + if (priv) + hybrid_tuner_release_state(priv); +@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + /* + * Copy the config data. +- * For the firmware name, keep a local copy of the string, +- * in order to avoid troubles during device release. + */ +- kfree(priv->ctrl.fname); +- priv->ctrl.fname = NULL; + memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); +- if (p->fname) { +- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); +- if (priv->ctrl.fname == NULL) { +- rc = -ENOMEM; +- goto unlock; +- } +- } + + /* + * If firmware name changed, frees firmware. As free_firmware will +@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + if (priv->state == XC2028_NO_FIRMWARE) { + if (!firmware_name[0]) +- priv->fname = priv->ctrl.fname; ++ priv->fname = kstrdup(p->fname, GFP_KERNEL); + else + priv->fname = firmware_name; + ++ if (!priv->fname) { ++ rc = -ENOMEM; ++ goto unlock; ++ } ++ + rc = request_firmware_nowait(THIS_MODULE, 1, + priv->fname, + priv->i2c_props.adap->dev.parent, +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c +index 239be2f..2267601 100644 +--- a/drivers/mmc/host/sdhci-of-esdhc.c ++++ b/drivers/mmc/host/sdhci-of-esdhc.c +@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host, + return ret; + } + } ++ /* ++ * The DAT[3:0] line signal levels and the CMD line signal level are ++ * not compatible with standard SDHC register. The line signal levels ++ * DAT[7:0] are at bits 31:24 and the command line signal level is at ++ * bit 23. All other bits are the same as in the standard SDHC ++ * register. ++ */ ++ if (spec_reg == SDHCI_PRESENT_STATE) { ++ ret = value & 0x000fffff; ++ ret |= (value >> 4) & SDHCI_DATA_LVL_MASK; ++ ret |= (value << 1) & SDHCI_CMD_LVL; ++ return ret; ++ } ++ + ret = value; + return ret; + } +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index 0411c9f..1b3bd1c 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -73,6 +73,7 @@ + #define SDHCI_DATA_LVL_MASK 0x00F00000 + #define SDHCI_DATA_LVL_SHIFT 20 + #define SDHCI_DATA_0_LVL_MASK 0x00100000 ++#define SDHCI_CMD_LVL 0x01000000 + + #define SDHCI_HOST_CONTROL 0x28 + #define SDHCI_CTRL_LED 0x01 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 46c0f5e..58e6029 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3894,6 +3894,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, + } + } + ++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) ++{ ++ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); ++} ++ + /** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object +@@ -3915,6 +3920,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) + if (!scmd) + continue; + count++; ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_unblock(scmd->device, ++ SDEV_RUNNING); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) +@@ -4019,8 +4027,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) + SAM_STAT_CHECK_CONDITION; + } + +- +- + /** + * scsih_qcmd - main scsi request entry point + * @scmd: pointer to scsi command object +@@ -4047,6 +4053,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); + ++ /* ++ * Lock the device for any subsequent command until command is ++ * done. ++ */ ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_block(scmd->device); ++ + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; +@@ -4622,6 +4635,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); ++ + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { +diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c +index 7a22307..afada65 100644 +--- a/drivers/thermal/intel_powerclamp.c ++++ b/drivers/thermal/intel_powerclamp.c +@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { + .set_cur_state = powerclamp_set_cur_state, + }; + ++static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { ++ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, ++ {} ++}; ++MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); ++ + static int __init powerclamp_probe(void) + { +- if (!boot_cpu_has(X86_FEATURE_MWAIT)) { ++ ++ if (!x86_match_cpu(intel_powerclamp_ids)) { + pr_err("CPU does not support MWAIT"); + return -ENODEV; + } +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index 69426e6..3dbb4a2 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) + if (!ci) + return -ENOMEM; + ++ spin_lock_init(&ci->lock); + ci->dev = dev; + ci->platdata = dev_get_platdata(dev); + ci->imx28_write_fix = !!(ci->platdata->flags & +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index b933568..bced28f 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -1895,8 +1895,6 @@ static int udc_start(struct ci_hdrc *ci) + struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; + int retval = 0; + +- spin_lock_init(&ci->lock); +- + ci->gadget.ops = &usb_gadget_ops; + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->gadget.max_speed = USB_SPEED_HIGH; +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index f61477b..243ac5e 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ ++ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 0ff7f38..6e9fc8b 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, ++ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 21011c0..48ee04c 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -596,6 +596,12 @@ + #define STK541_PID 0x2109 /* Zigbee Controller */ + + /* ++ * Texas Instruments ++ */ ++#define TI_VID 0x0451 ++#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ ++ ++/* + * Blackfin gnICE JTAG + * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice + */ +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c +index ffd0867..1a59f33 100644 +--- a/drivers/usb/storage/transport.c ++++ b/drivers/usb/storage/transport.c +@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) + + /* COMMAND STAGE */ + /* let's send the command via the control pipe */ ++ /* ++ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. ++ * Stack may be vmallocated. So no DMA for us. Make a copy. ++ */ ++ memcpy(us->iobuf, srb->cmnd, srb->cmd_len); + result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, + US_CBI_ADSC, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, +- us->ifnum, srb->cmnd, srb->cmd_len); ++ us->ifnum, us->iobuf, srb->cmd_len); + + /* check the return code for the command */ + usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index 52a2831..48efe62 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, + } + + ret = -EPROTONOSUPPORT; +- if (minorversion == 0) ++ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) + ret = nfs4_callback_up_net(serv, net); + else if (xprt->ops->bc_up) + ret = xprt->ops->bc_up(serv, net); +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 2d9b6500..d49e26c 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -429,6 +429,7 @@ struct intel_iommu { + struct page_req_dsc *prq; + unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct idr pasid_idr; ++ u32 pasid_max; + #endif + struct q_inval *qi; /* Queued invalidation info */ + u32 *iommu_state; /* Store iommu states between suspend and resume.*/ +diff --git a/kernel/events/core.c b/kernel/events/core.c +index fc9bb22..f8c5f5e 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -7908,6 +7908,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event) + * if <size> is not specified, the range is treated as a single address. + */ + enum { ++ IF_ACT_NONE = -1, + IF_ACT_FILTER, + IF_ACT_START, + IF_ACT_STOP, +@@ -7931,6 +7932,7 @@ static const match_table_t if_tokens = { + { IF_SRC_KERNEL, "%u/%u" }, + { IF_SRC_FILEADDR, "%u@%s" }, + { IF_SRC_KERNELADDR, "%u" }, ++ { IF_ACT_NONE, NULL }, + }; + + /* +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c +index 5464c87..e24388a 100644 +--- a/lib/mpi/mpi-pow.c ++++ b/lib/mpi/mpi-pow.c +@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + if (!esize) { + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 + * depending on if MOD equals 1. */ +- rp[0] = 1; + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; ++ if (res->nlimbs) { ++ if (mpi_resize(res, 1) < 0) ++ goto enomem; ++ rp = res->d; ++ rp[0] = 1; ++ } + res->sign = 0; + goto leave; + } +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index a2214c6..7401e99 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -3161,6 +3161,16 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla + if (!order || order > PAGE_ALLOC_COSTLY_ORDER) + return false; + ++#ifdef CONFIG_COMPACTION ++ /* ++ * This is a gross workaround to compensate a lack of reliable compaction ++ * operation. We cannot simply go OOM with the current state of the compaction ++ * code because this can lead to pre mature OOM declaration. ++ */ ++ if (order <= PAGE_ALLOC_COSTLY_ORDER) ++ return true; ++#endif ++ + /* + * There are setups with compaction disabled which would prefer to loop + * inside the allocator rather than hit the oom killer prematurely. +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 8af9d25..436a753 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -77,7 +77,7 @@ + (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ + (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) + +-#define CAN_BCM_VERSION "20160617" ++#define CAN_BCM_VERSION "20161123" + + MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); + MODULE_LICENSE("Dual BSD/GPL"); +@@ -109,8 +109,9 @@ struct bcm_op { + u32 count; + u32 nframes; + u32 currframe; +- struct canfd_frame *frames; +- struct canfd_frame *last_frames; ++ /* void pointers to arrays of struct can[fd]_frame */ ++ void *frames; ++ void *last_frames; + struct canfd_frame sframe; + struct canfd_frame last_sframe; + struct sock *sk; +@@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) + + if (op->flags & RX_FILTER_ID) { + /* the easiest case */ +- bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); ++ bcm_rx_update_and_send(op, op->last_frames, rxframe); + goto rx_starttimer; + } + +@@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + + if (msg_head->nframes) { + /* update CAN frames content */ +- err = memcpy_from_msg((u8 *)op->frames, msg, ++ err = memcpy_from_msg(op->frames, msg, + msg_head->nframes * op->cfsiz); + if (err < 0) + return err; +@@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + } + + if (msg_head->nframes) { +- err = memcpy_from_msg((u8 *)op->frames, msg, ++ err = memcpy_from_msg(op->frames, msg, + msg_head->nframes * op->cfsiz); + if (err < 0) { + if (op->frames != &op->sframe) +@@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + /* check flags */ + + if (op->flags & RX_RTR_FRAME) { ++ struct canfd_frame *frame0 = op->frames; + + /* no timers in RTR-mode */ + hrtimer_cancel(&op->thrtimer); +@@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + * prevent a full-load-loopback-test ... ;-] + */ + if ((op->flags & TX_CP_CAN_ID) || +- (op->frames[0].can_id == op->can_id)) +- op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; ++ (frame0->can_id == op->can_id)) ++ frame0->can_id = op->can_id & ~CAN_RTR_FLAG; + + } else { + if (op->flags & SETTIMER) { +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c +index 5550a86..396aac7 100644 +--- a/net/core/flow_dissector.c ++++ b/net/core/flow_dissector.c +@@ -945,4 +945,4 @@ static int __init init_default_flow_dissectors(void) + return 0; + } + +-late_initcall_sync(init_default_flow_dissectors); ++core_initcall(init_default_flow_dissectors); +diff --git a/net/wireless/core.h b/net/wireless/core.h +index eee9144..66f2a11 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -71,6 +71,7 @@ struct cfg80211_registered_device { + struct list_head bss_list; + struct rb_root bss_tree; + u32 bss_generation; ++ u32 bss_entries; + struct cfg80211_scan_request *scan_req; /* protected by RTNL */ + struct sk_buff *scan_msg; + struct cfg80211_sched_scan_request __rcu *sched_scan_req; +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 0358e12..438143a 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -57,6 +57,19 @@ + * also linked into the probe response struct. + */ + ++/* ++ * Limit the number of BSS entries stored in mac80211. Each one is ++ * a bit over 4k at most, so this limits to roughly 4-5M of memory. ++ * If somebody wants to really attack this though, they'd likely ++ * use small beacons, and only one type of frame, limiting each of ++ * the entries to a much smaller size (in order to generate more ++ * entries in total, so overhead is bigger.) ++ */ ++static int bss_entries_limit = 1000; ++module_param(bss_entries_limit, int, 0644); ++MODULE_PARM_DESC(bss_entries_limit, ++ "limit to number of scan BSS entries (per wiphy, default 1000)"); ++ + #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) + + static void bss_free(struct cfg80211_internal_bss *bss) +@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, + + list_del_init(&bss->list); + rb_erase(&bss->rbn, &rdev->bss_tree); ++ rdev->bss_entries--; ++ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), ++ "rdev bss entries[%d]/list[empty:%d] corruption\n", ++ rdev->bss_entries, list_empty(&rdev->bss_list)); + bss_ref_put(rdev, bss); + return true; + } +@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, + rdev->bss_generation++; + } + ++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) ++{ ++ struct cfg80211_internal_bss *bss, *oldest = NULL; ++ bool ret; ++ ++ lockdep_assert_held(&rdev->bss_lock); ++ ++ list_for_each_entry(bss, &rdev->bss_list, list) { ++ if (atomic_read(&bss->hold)) ++ continue; ++ ++ if (!list_empty(&bss->hidden_list) && ++ !bss->pub.hidden_beacon_bss) ++ continue; ++ ++ if (oldest && time_before(oldest->ts, bss->ts)) ++ continue; ++ oldest = bss; ++ } ++ ++ if (WARN_ON(!oldest)) ++ return false; ++ ++ /* ++ * The callers make sure to increase rdev->bss_generation if anything ++ * gets removed (and a new entry added), so there's no need to also do ++ * it here. ++ */ ++ ++ ret = __cfg80211_unlink_bss(rdev, oldest); ++ WARN_ON(!ret); ++ return ret; ++} ++ + void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, + bool send_message) + { +@@ -693,6 +744,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + const u8 *ie; + int i, ssidlen; + u8 fold = 0; ++ u32 n_entries = 0; + + ies = rcu_access_pointer(new->pub.beacon_ies); + if (WARN_ON(!ies)) +@@ -716,6 +768,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + /* This is the bad part ... */ + + list_for_each_entry(bss, &rdev->bss_list, list) { ++ /* ++ * we're iterating all the entries anyway, so take the ++ * opportunity to validate the list length accounting ++ */ ++ n_entries++; ++ + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) + continue; + if (bss->pub.channel != new->pub.channel) +@@ -744,6 +802,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + new->pub.beacon_ies); + } + ++ WARN_ONCE(n_entries != rdev->bss_entries, ++ "rdev bss entries[%d]/list[len:%d] corruption\n", ++ rdev->bss_entries, n_entries); ++ + return true; + } + +@@ -898,7 +960,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, + } + } + ++ if (rdev->bss_entries >= bss_entries_limit && ++ !cfg80211_bss_expire_oldest(rdev)) { ++ kfree(new); ++ goto drop; ++ } ++ + list_add_tail(&new->list, &rdev->bss_list); ++ rdev->bss_entries++; + rb_insert_bss(rdev, new); + found = new; + } +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index fc3036b..a4d90aa 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) + /* released below */ + cred = get_current_cred(); + cxt = cred_cxt(cred); +- profile = aa_cred_profile(cred); +- previous_profile = cxt->previous; ++ profile = aa_get_newest_profile(aa_cred_profile(cred)); ++ previous_profile = aa_get_newest_profile(cxt->previous); + + if (unconfined(profile)) { + info = "unconfined"; +@@ -718,6 +718,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) + out: + aa_put_profile(hat); + kfree(name); ++ aa_put_profile(profile); ++ aa_put_profile(previous_profile); + put_cred(cred); + + return error; |